linux/arch/x86/vdso/vma.c
<<
>>
Prefs
   1/*
   2 * Set up the VMAs to tell the VM about the vDSO.
   3 * Copyright 2007 Andi Kleen, SUSE Labs.
   4 * Subject to the GPL, v.2
   5 */
   6#include <linux/mm.h>
   7#include <linux/err.h>
   8#include <linux/sched.h>
   9#include <linux/slab.h>
  10#include <linux/init.h>
  11#include <linux/random.h>
  12#include <linux/elf.h>
  13#include <asm/vsyscall.h>
  14#include <asm/vgtod.h>
  15#include <asm/proto.h>
  16#include <asm/vdso.h>
  17#include <asm/page.h>
  18
  19unsigned int __read_mostly vdso_enabled = 1;
  20
  21extern char vdso_start[], vdso_end[];
  22extern unsigned short vdso_sync_cpuid;
  23
  24extern struct page *vdso_pages[];
  25static unsigned vdso_size;
  26
  27#ifdef CONFIG_X86_X32_ABI
  28extern char vdsox32_start[], vdsox32_end[];
  29extern struct page *vdsox32_pages[];
  30static unsigned vdsox32_size;
  31
  32static void __init patch_vdsox32(void *vdso, size_t len)
  33{
  34        Elf32_Ehdr *hdr = vdso;
  35        Elf32_Shdr *sechdrs, *alt_sec = 0;
  36        char *secstrings;
  37        void *alt_data;
  38        int i;
  39
  40        BUG_ON(len < sizeof(Elf32_Ehdr));
  41        BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
  42
  43        sechdrs = (void *)hdr + hdr->e_shoff;
  44        secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
  45
  46        for (i = 1; i < hdr->e_shnum; i++) {
  47                Elf32_Shdr *shdr = &sechdrs[i];
  48                if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
  49                        alt_sec = shdr;
  50                        goto found;
  51                }
  52        }
  53
  54        /* If we get here, it's probably a bug. */
  55        pr_warning("patch_vdsox32: .altinstructions not found\n");
  56        return;  /* nothing to patch */
  57
  58found:
  59        alt_data = (void *)hdr + alt_sec->sh_offset;
  60        apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
  61}
  62#endif
  63
  64static void __init patch_vdso64(void *vdso, size_t len)
  65{
  66        Elf64_Ehdr *hdr = vdso;
  67        Elf64_Shdr *sechdrs, *alt_sec = 0;
  68        char *secstrings;
  69        void *alt_data;
  70        int i;
  71
  72        BUG_ON(len < sizeof(Elf64_Ehdr));
  73        BUG_ON(memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0);
  74
  75        sechdrs = (void *)hdr + hdr->e_shoff;
  76        secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
  77
  78        for (i = 1; i < hdr->e_shnum; i++) {
  79                Elf64_Shdr *shdr = &sechdrs[i];
  80                if (!strcmp(secstrings + shdr->sh_name, ".altinstructions")) {
  81                        alt_sec = shdr;
  82                        goto found;
  83                }
  84        }
  85
  86        /* If we get here, it's probably a bug. */
  87        pr_warning("patch_vdso64: .altinstructions not found\n");
  88        return;  /* nothing to patch */
  89
  90found:
  91        alt_data = (void *)hdr + alt_sec->sh_offset;
  92        apply_alternatives(alt_data, alt_data + alt_sec->sh_size);
  93}
  94
  95static int __init init_vdso(void)
  96{
  97        int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE;
  98        int i;
  99
 100        patch_vdso64(vdso_start, vdso_end - vdso_start);
 101
 102        vdso_size = npages << PAGE_SHIFT;
 103        for (i = 0; i < npages; i++)
 104                vdso_pages[i] = virt_to_page(vdso_start + i*PAGE_SIZE);
 105
 106#ifdef CONFIG_X86_X32_ABI
 107        patch_vdsox32(vdsox32_start, vdsox32_end - vdsox32_start);
 108        npages = (vdsox32_end - vdsox32_start + PAGE_SIZE - 1) / PAGE_SIZE;
 109        vdsox32_size = npages << PAGE_SHIFT;
 110        for (i = 0; i < npages; i++)
 111                vdsox32_pages[i] = virt_to_page(vdsox32_start + i*PAGE_SIZE);
 112#endif
 113
 114        return 0;
 115}
 116subsys_initcall(init_vdso);
 117
 118struct linux_binprm;
 119
 120/* 
 121 * Put the vdso above the (randomized) stack with another randomized
 122 * offset.  This way there is no hole in the middle of address space.
 123 * To save memory make sure it is still in the same PTE as the stack
 124 * top.  This doesn't give that many random bits.
 125 *
 126 * Note that this algorithm is imperfect: the distribution of the vdso
 127 * start address within a PMD is biased toward the end.
 128 *
 129 * Only used for the 64-bit and x32 vdsos.
 130 */
 131static unsigned long vdso_addr(unsigned long start, unsigned len)
 132{
 133        unsigned long addr, end;
 134        unsigned offset;
 135
 136        /*
 137         * Round up the start address.  It can start out unaligned as a result
 138         * of stack start randomization.
 139         */
 140        start = PAGE_ALIGN(start);
 141
 142        /* Round the lowest possible end address up to a PMD boundary. */
 143        end = (start + len + PMD_SIZE - 1) & PMD_MASK;
 144        if (end >= TASK_SIZE_MAX)
 145                end = TASK_SIZE_MAX;
 146        end -= len;
 147
 148        if (end > start) {
 149                offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
 150                addr = start + (offset << PAGE_SHIFT);
 151        } else {
 152                addr = start;
 153        }
 154
 155        /*
 156         * Forcibly align the final address in case we have a hardware
 157         * issue that requires alignment for performance reasons.
 158         */
 159        addr = align_vdso_addr(addr);
 160
 161        return addr;
 162}
 163
 164/* Setup a VMA at program startup for the vsyscall page.
 165   Not called for compat tasks */
 166static int setup_additional_pages(struct linux_binprm *bprm,
 167                                  int uses_interp,
 168                                  struct page **pages,
 169                                  unsigned size)
 170{
 171        struct mm_struct *mm = current->mm;
 172        unsigned long addr;
 173        int ret;
 174
 175        if (!vdso_enabled)
 176                return 0;
 177
 178        down_write(&mm->mmap_sem);
 179        addr = vdso_addr(mm->start_stack, size);
 180        addr = get_unmapped_area(NULL, addr, size, 0, 0);
 181        if (IS_ERR_VALUE(addr)) {
 182                ret = addr;
 183                goto up_fail;
 184        }
 185
 186        current->mm->context.vdso = (void *)addr;
 187
 188        ret = install_special_mapping(mm, addr, size,
 189                                      VM_READ|VM_EXEC|
 190                                      VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
 191                                      pages);
 192        if (ret) {
 193                current->mm->context.vdso = NULL;
 194                goto up_fail;
 195        }
 196
 197up_fail:
 198        up_write(&mm->mmap_sem);
 199        return ret;
 200}
 201
 202int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 203{
 204        return setup_additional_pages(bprm, uses_interp, vdso_pages,
 205                                      vdso_size);
 206}
 207
 208#ifdef CONFIG_X86_X32_ABI
 209int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 210{
 211        return setup_additional_pages(bprm, uses_interp, vdsox32_pages,
 212                                      vdsox32_size);
 213}
 214#endif
 215
 216static __init int vdso_setup(char *s)
 217{
 218        vdso_enabled = simple_strtoul(s, NULL, 0);
 219        return 0;
 220}
 221__setup("vdso=", vdso_setup);
 222