linux/arch/x86/entry/vdso/vma.c
<<
>>
Prefs
   1/*
   2 * Copyright 2007 Andi Kleen, SUSE Labs.
   3 * Subject to the GPL, v.2
   4 *
   5 * This contains most of the x86 vDSO kernel-side code.
   6 */
   7#include <linux/mm.h>
   8#include <linux/err.h>
   9#include <linux/sched.h>
  10#include <linux/slab.h>
  11#include <linux/init.h>
  12#include <linux/random.h>
  13#include <linux/elf.h>
  14#include <linux/cpu.h>
  15#include <asm/pvclock.h>
  16#include <asm/vgtod.h>
  17#include <asm/proto.h>
  18#include <asm/vdso.h>
  19#include <asm/vvar.h>
  20#include <asm/page.h>
  21#include <asm/desc.h>
  22#include <asm/cpufeature.h>
  23
  24#if defined(CONFIG_X86_64)
  25unsigned int __read_mostly vdso64_enabled = 1;
  26#endif
  27
  28void __init init_vdso_image(const struct vdso_image *image)
  29{
  30        BUG_ON(image->size % PAGE_SIZE != 0);
  31
  32        apply_alternatives((struct alt_instr *)(image->data + image->alt),
  33                           (struct alt_instr *)(image->data + image->alt +
  34                                                image->alt_len));
  35}
  36
  37struct linux_binprm;
  38
  39/*
  40 * Put the vdso above the (randomized) stack with another randomized
  41 * offset.  This way there is no hole in the middle of address space.
  42 * To save memory make sure it is still in the same PTE as the stack
  43 * top.  This doesn't give that many random bits.
  44 *
  45 * Note that this algorithm is imperfect: the distribution of the vdso
  46 * start address within a PMD is biased toward the end.
  47 *
  48 * Only used for the 64-bit and x32 vdsos.
  49 */
  50static unsigned long vdso_addr(unsigned long start, unsigned len)
  51{
  52#ifdef CONFIG_X86_32
  53        return 0;
  54#else
  55        unsigned long addr, end;
  56        unsigned offset;
  57
  58        /*
  59         * Round up the start address.  It can start out unaligned as a result
  60         * of stack start randomization.
  61         */
  62        start = PAGE_ALIGN(start);
  63
  64        /* Round the lowest possible end address up to a PMD boundary. */
  65        end = (start + len + PMD_SIZE - 1) & PMD_MASK;
  66        if (end >= TASK_SIZE_MAX)
  67                end = TASK_SIZE_MAX;
  68        end -= len;
  69
  70        if (end > start) {
  71                offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
  72                addr = start + (offset << PAGE_SHIFT);
  73        } else {
  74                addr = start;
  75        }
  76
  77        /*
  78         * Forcibly align the final address in case we have a hardware
  79         * issue that requires alignment for performance reasons.
  80         */
  81        addr = align_vdso_addr(addr);
  82
  83        return addr;
  84#endif
  85}
  86
  87static int vdso_fault(const struct vm_special_mapping *sm,
  88                      struct vm_area_struct *vma, struct vm_fault *vmf)
  89{
  90        const struct vdso_image *image = vma->vm_mm->context.vdso_image;
  91
  92        if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
  93                return VM_FAULT_SIGBUS;
  94
  95        vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
  96        get_page(vmf->page);
  97        return 0;
  98}
  99
 100static const struct vm_special_mapping text_mapping = {
 101        .name = "[vdso]",
 102        .fault = vdso_fault,
 103};
 104
 105static int vvar_fault(const struct vm_special_mapping *sm,
 106                      struct vm_area_struct *vma, struct vm_fault *vmf)
 107{
 108        const struct vdso_image *image = vma->vm_mm->context.vdso_image;
 109        long sym_offset;
 110        int ret = -EFAULT;
 111
 112        if (!image)
 113                return VM_FAULT_SIGBUS;
 114
 115        sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) +
 116                image->sym_vvar_start;
 117
 118        /*
 119         * Sanity check: a symbol offset of zero means that the page
 120         * does not exist for this vdso image, not that the page is at
 121         * offset zero relative to the text mapping.  This should be
 122         * impossible here, because sym_offset should only be zero for
 123         * the page past the end of the vvar mapping.
 124         */
 125        if (sym_offset == 0)
 126                return VM_FAULT_SIGBUS;
 127
 128        if (sym_offset == image->sym_vvar_page) {
 129                ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address,
 130                                    __pa_symbol(&__vvar_page) >> PAGE_SHIFT);
 131        } else if (sym_offset == image->sym_pvclock_page) {
 132                struct pvclock_vsyscall_time_info *pvti =
 133                        pvclock_pvti_cpu0_va();
 134                if (pvti && vclock_was_used(VCLOCK_PVCLOCK)) {
 135                        ret = vm_insert_pfn(
 136                                vma,
 137                                (unsigned long)vmf->virtual_address,
 138                                __pa(pvti) >> PAGE_SHIFT);
 139                }
 140        }
 141
 142        if (ret == 0 || ret == -EBUSY)
 143                return VM_FAULT_NOPAGE;
 144
 145        return VM_FAULT_SIGBUS;
 146}
 147
 148static int map_vdso(const struct vdso_image *image, bool calculate_addr)
 149{
 150        struct mm_struct *mm = current->mm;
 151        struct vm_area_struct *vma;
 152        unsigned long addr, text_start;
 153        int ret = 0;
 154        static const struct vm_special_mapping vvar_mapping = {
 155                .name = "[vvar]",
 156                .fault = vvar_fault,
 157        };
 158
 159        if (calculate_addr) {
 160                addr = vdso_addr(current->mm->start_stack,
 161                                 image->size - image->sym_vvar_start);
 162        } else {
 163                addr = 0;
 164        }
 165
 166        if (down_write_killable(&mm->mmap_sem))
 167                return -EINTR;
 168
 169        addr = get_unmapped_area(NULL, addr,
 170                                 image->size - image->sym_vvar_start, 0, 0);
 171        if (IS_ERR_VALUE(addr)) {
 172                ret = addr;
 173                goto up_fail;
 174        }
 175
 176        text_start = addr - image->sym_vvar_start;
 177        current->mm->context.vdso = (void __user *)text_start;
 178        current->mm->context.vdso_image = image;
 179
 180        /*
 181         * MAYWRITE to allow gdb to COW and set breakpoints
 182         */
 183        vma = _install_special_mapping(mm,
 184                                       text_start,
 185                                       image->size,
 186                                       VM_READ|VM_EXEC|
 187                                       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
 188                                       &text_mapping);
 189
 190        if (IS_ERR(vma)) {
 191                ret = PTR_ERR(vma);
 192                goto up_fail;
 193        }
 194
 195        vma = _install_special_mapping(mm,
 196                                       addr,
 197                                       -image->sym_vvar_start,
 198                                       VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
 199                                       VM_PFNMAP,
 200                                       &vvar_mapping);
 201
 202        if (IS_ERR(vma)) {
 203                ret = PTR_ERR(vma);
 204                goto up_fail;
 205        }
 206
 207up_fail:
 208        if (ret)
 209                current->mm->context.vdso = NULL;
 210
 211        up_write(&mm->mmap_sem);
 212        return ret;
 213}
 214
 215#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
 216static int load_vdso32(void)
 217{
 218        if (vdso32_enabled != 1)  /* Other values all mean "disabled" */
 219                return 0;
 220
 221        return map_vdso(&vdso_image_32, false);
 222}
 223#endif
 224
 225#ifdef CONFIG_X86_64
 226int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 227{
 228        if (!vdso64_enabled)
 229                return 0;
 230
 231        return map_vdso(&vdso_image_64, true);
 232}
 233
 234#ifdef CONFIG_COMPAT
 235int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
 236                                       int uses_interp)
 237{
 238#ifdef CONFIG_X86_X32_ABI
 239        if (test_thread_flag(TIF_X32)) {
 240                if (!vdso64_enabled)
 241                        return 0;
 242
 243                return map_vdso(&vdso_image_x32, true);
 244        }
 245#endif
 246#ifdef CONFIG_IA32_EMULATION
 247        return load_vdso32();
 248#else
 249        return 0;
 250#endif
 251}
 252#endif
 253#else
 254int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 255{
 256        return load_vdso32();
 257}
 258#endif
 259
 260#ifdef CONFIG_X86_64
 261static __init int vdso_setup(char *s)
 262{
 263        vdso64_enabled = simple_strtoul(s, NULL, 0);
 264        return 0;
 265}
 266__setup("vdso=", vdso_setup);
 267#endif
 268
 269#ifdef CONFIG_X86_64
 270static void vgetcpu_cpu_init(void *arg)
 271{
 272        int cpu = smp_processor_id();
 273        struct desc_struct d = { };
 274        unsigned long node = 0;
 275#ifdef CONFIG_NUMA
 276        node = cpu_to_node(cpu);
 277#endif
 278        if (static_cpu_has(X86_FEATURE_RDTSCP))
 279                write_rdtscp_aux((node << 12) | cpu);
 280
 281        /*
 282         * Store cpu number in limit so that it can be loaded
 283         * quickly in user space in vgetcpu. (12 bits for the CPU
 284         * and 8 bits for the node)
 285         */
 286        d.limit0 = cpu | ((node & 0xf) << 12);
 287        d.limit = node >> 4;
 288        d.type = 5;             /* RO data, expand down, accessed */
 289        d.dpl = 3;              /* Visible to user code */
 290        d.s = 1;                /* Not a system segment */
 291        d.p = 1;                /* Present */
 292        d.d = 1;                /* 32-bit */
 293
 294        write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
 295}
 296
 297static int
 298vgetcpu_cpu_notifier(struct notifier_block *n, unsigned long action, void *arg)
 299{
 300        long cpu = (long)arg;
 301
 302        if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
 303                smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1);
 304
 305        return NOTIFY_DONE;
 306}
 307
 308static int __init init_vdso(void)
 309{
 310        init_vdso_image(&vdso_image_64);
 311
 312#ifdef CONFIG_X86_X32_ABI
 313        init_vdso_image(&vdso_image_x32);
 314#endif
 315
 316        cpu_notifier_register_begin();
 317
 318        on_each_cpu(vgetcpu_cpu_init, NULL, 1);
 319        /* notifier priority > KVM */
 320        __hotcpu_notifier(vgetcpu_cpu_notifier, 30);
 321
 322        cpu_notifier_register_done();
 323
 324        return 0;
 325}
 326subsys_initcall(init_vdso);
 327#endif /* CONFIG_X86_64 */
 328