linux/arch/x86/vdso/vma.c
<<
>>
Prefs
   1/*
   2 * Set up the VMAs to tell the VM about the vDSO.
   3 * Copyright 2007 Andi Kleen, SUSE Labs.
   4 * Subject to the GPL, v.2
   5 */
   6#include <linux/mm.h>
   7#include <linux/err.h>
   8#include <linux/sched.h>
   9#include <linux/slab.h>
  10#include <linux/init.h>
  11#include <linux/random.h>
  12#include <linux/elf.h>
  13#include <asm/vsyscall.h>
  14#include <asm/vgtod.h>
  15#include <asm/proto.h>
  16#include <asm/vdso.h>
  17#include <asm/page.h>
  18
  19unsigned int __read_mostly vdso_enabled = 1;
  20
  21extern char vdso_start[], vdso_end[];
  22extern unsigned short vdso_sync_cpuid;
  23
  24extern struct page *vdso_pages[];
  25static unsigned vdso_size;
  26
  27#ifdef CONFIG_X86_X32_ABI
  28extern char vdsox32_start[], vdsox32_end[];
  29extern struct page *vdsox32_pages[];
  30static unsigned vdsox32_size;
  31
  32static void __init patch_vdsox32(void *vdso, size_t len)
  33{
  34        Elf32_Ehdr *hdr = vdso;
  35        Elf32_Shdr *sechdrs, *alt_sec = 0;
  36        char *secstrings;
  37        void *alt_data;
  38        int i;
  39
  40        BUG_ON(len < sizeof(Elf32_Ehdr));
  41        BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
  42
  43        sechdrs = (void *)hdr + hdr->e_shoff;
  44        secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
  45
  46        for (i = 1; i < hdr->e_shnum; i++) {
  47                Elf32_Shdr *shdr = &sechdrs[i];
  48                if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
  49                        alt_sec = shdr;
  50                        goto found;
  51                }
  52        }
  53
  54        /* If we get here, it's probably a bug. */
  55        pr_warning("patch_vdsox32: .altinstructions not found\n");
  56        return;  /* nothing to patch */
  57
  58found:
  59        alt_data = (void *)hdr + alt_sec->sh_offset;
  60        apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
  61}
  62#endif
  63
  64static void __init patch_vdso64(void *vdso, size_t len)
  65{
  66        Elf64_Ehdr *hdr = vdso;
  67        Elf64_Shdr *sechdrs, *alt_sec = 0;
  68        char *secstrings;
  69        void *alt_data;
  70        int i;
  71
  72        BUG_ON(len < sizeof(Elf64_Ehdr));
  73        BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
  74
  75        sechdrs = (void *)hdr + hdr->e_shoff;
  76        secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
  77
  78        for (i = 1; i < hdr->e_shnum; i++) {
  79                Elf64_Shdr *shdr = &sechdrs[i];
  80                if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
  81                        alt_sec = shdr;
  82                        goto found;
  83                }
  84        }
  85
  86        /* If we get here, it's probably a bug. */
  87        pr_warning("patch_vdso64: .altinstructions not found\n");
  88        return;  /* nothing to patch */
  89
  90found:
  91        alt_data = (void *)hdr + alt_sec->sh_offset;
  92        apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
  93}
  94
  95static int __init init_vdso(void)
  96{
  97        int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
  98        int i;
  99
 100        patch_vdso64(vdso_start, vdso_end - vdso_start);
 101
 102        vdso_size = npages << PAGE_SHIFT;
 103        for (i = 0; i < npages; i++)
 104                vdso_pages[i] = virt_to_page(vdso_start + i*PAGE_SIZE);
 105
 106#ifdef CONFIG_X86_X32_ABI
 107        patch_vdsox32(vdsox32_start, vdsox32_end - vdsox32_start);
 108        npages = (vdsox32_end - vdsox32_start + PAGE_SIZE - 1) / PAGE_SIZE;
 109        vdsox32_size = npages << PAGE_SHIFT;
 110        for (i = 0; i < npages; i++)
 111                vdsox32_pages[i] = virt_to_page(vdsox32_start + i*PAGE_SIZE);
 112#endif
 113
 114        return 0;
 115}
 116subsys_initcall(init_vdso);
 117
 118struct linux_binprm;
 119
 120/* Put the vdso above the (randomized) stack with another randomized offset.
 121   This way there is no hole in the middle of address space.
 122   To save memory make sure it is still in the same PTE as the stack top.
 123   This doesn't give that many random bits */
 124static unsigned long vdso_addr(unsigned long start, unsigned len)
 125{
 126        unsigned long addr, end;
 127        unsigned offset;
 128        end = (start + PMD_SIZE - 1) & PMD_MASK;
 129        if (end >= TASK_SIZE_MAX)
 130                end = TASK_SIZE_MAX;
 131        end -= len;
 132        /* This loses some more bits than a modulo, but is cheaper */
 133        offset = get_random_int() & (PTRS_PER_PTE - 1);
 134        addr = start + (offset << PAGE_SHIFT);
 135        if (addr >= end)
 136                addr = end;
 137
 138        /*
 139         * page-align it here so that get_unmapped_area doesn't
 140         * align it wrongfully again to the next page. addr can come in 4K
 141         * unaligned here as a result of stack start randomization.
 142         */
 143        addr = PAGE_ALIGN(addr);
 144        addr = align_vdso_addr(addr);
 145
 146        return addr;
 147}
 148
 149/* Setup a VMA at program startup for the vsyscall page.
 150   Not called for compat tasks */
 151static int setup_additional_pages(struct linux_binprm *bprm,
 152                                  int uses_interp,
 153                                  struct page **pages,
 154                                  unsigned size)
 155{
 156        struct mm_struct *mm = current->mm;
 157        unsigned long addr;
 158        int ret;
 159
 160        if (!vdso_enabled)
 161                return 0;
 162
 163        down_write(&mm->mmap_sem);
 164        addr = vdso_addr(mm->start_stack, size);
 165        addr = get_unmapped_area(NULL, addr, size, 0, 0);
 166        if (IS_ERR_VALUE(addr)) {
 167                ret = addr;
 168                goto up_fail;
 169        }
 170
 171        current->mm->context.vdso = (void *)addr;
 172
 173        ret = install_special_mapping(mm, addr, size,
 174                                      VM_READ|VM_EXEC|
 175                                      VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
 176                                      pages);
 177        if (ret) {
 178                current->mm->context.vdso = NULL;
 179                goto up_fail;
 180        }
 181
 182up_fail:
 183        up_write(&mm->mmap_sem);
 184        return ret;
 185}
 186
 187int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 188{
 189        return setup_additional_pages(bprm, uses_interp, vdso_pages,
 190                                      vdso_size);
 191}
 192
 193#ifdef CONFIG_X86_X32_ABI
 194int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 195{
 196        return setup_additional_pages(bprm, uses_interp, vdsox32_pages,
 197                                      vdsox32_size);
 198}
 199#endif
 200
 201static __init int vdso_setup(char *s)
 202{
 203        vdso_enabled = simple_strtoul(s, NULL, 0);
 204        return 0;
 205}
 206__setup("vdso=", vdso_setup);
 207