linux/arch/x86/entry/vdso/vma.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright 2007 Andi Kleen, SUSE Labs.
   4 *
   5 * This contains most of the x86 vDSO kernel-side code.
   6 */
   7#include <linux/mm.h>
   8#include <linux/err.h>
   9#include <linux/sched.h>
  10#include <linux/sched/task_stack.h>
  11#include <linux/slab.h>
  12#include <linux/init.h>
  13#include <linux/random.h>
  14#include <linux/elf.h>
  15#include <linux/cpu.h>
  16#include <linux/ptrace.h>
  17#include <linux/time_namespace.h>
  18
  19#include <asm/pvclock.h>
  20#include <asm/vgtod.h>
  21#include <asm/proto.h>
  22#include <asm/vdso.h>
  23#include <asm/vvar.h>
  24#include <asm/tlb.h>
  25#include <asm/page.h>
  26#include <asm/desc.h>
  27#include <asm/cpufeature.h>
  28#include <clocksource/hyperv_timer.h>
  29
  30#undef _ASM_X86_VVAR_H
  31#define EMIT_VVAR(name, offset) \
  32        const size_t name ## _offset = offset;
  33#include <asm/vvar.h>
  34
  35struct vdso_data *arch_get_vdso_data(void *vvar_page)
  36{
  37        return (struct vdso_data *)(vvar_page + _vdso_data_offset);
  38}
  39#undef EMIT_VVAR
  40
  41unsigned int vclocks_used __read_mostly;
  42
  43#if defined(CONFIG_X86_64)
  44unsigned int __read_mostly vdso64_enabled = 1;
  45#endif
  46
  47void __init init_vdso_image(const struct vdso_image *image)
  48{
  49        BUG_ON(image->size % PAGE_SIZE != 0);
  50
  51        apply_alternatives((struct alt_instr *)(image->data + image->alt),
  52                           (struct alt_instr *)(image->data + image->alt +
  53                                                image->alt_len));
  54}
  55
  56static const struct vm_special_mapping vvar_mapping;
  57struct linux_binprm;
  58
  59static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
  60                      struct vm_area_struct *vma, struct vm_fault *vmf)
  61{
  62        const struct vdso_image *image = vma->vm_mm->context.vdso_image;
  63
  64        if (!image || (vmf->pgoff << PAGE_SHIFT) >= image->size)
  65                return VM_FAULT_SIGBUS;
  66
  67        vmf->page = virt_to_page(image->data + (vmf->pgoff << PAGE_SHIFT));
  68        get_page(vmf->page);
  69        return 0;
  70}
  71
  72static void vdso_fix_landing(const struct vdso_image *image,
  73                struct vm_area_struct *new_vma)
  74{
  75#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
  76        if (in_ia32_syscall() && image == &vdso_image_32) {
  77                struct pt_regs *regs = current_pt_regs();
  78                unsigned long vdso_land = image->sym_int80_landing_pad;
  79                unsigned long old_land_addr = vdso_land +
  80                        (unsigned long)current->mm->context.vdso;
  81
  82                /* Fixing userspace landing - look at do_fast_syscall_32 */
  83                if (regs->ip == old_land_addr)
  84                        regs->ip = new_vma->vm_start + vdso_land;
  85        }
  86#endif
  87}
  88
  89static int vdso_mremap(const struct vm_special_mapping *sm,
  90                struct vm_area_struct *new_vma)
  91{
  92        unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
  93        const struct vdso_image *image = current->mm->context.vdso_image;
  94
  95        if (image->size != new_size)
  96                return -EINVAL;
  97
  98        vdso_fix_landing(image, new_vma);
  99        current->mm->context.vdso = (void __user *)new_vma->vm_start;
 100
 101        return 0;
 102}
 103
 104static int vvar_mremap(const struct vm_special_mapping *sm,
 105                struct vm_area_struct *new_vma)
 106{
 107        const struct vdso_image *image = new_vma->vm_mm->context.vdso_image;
 108        unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
 109
 110        if (new_size != -image->sym_vvar_start)
 111                return -EINVAL;
 112
 113        return 0;
 114}
 115
 116#ifdef CONFIG_TIME_NS
 117static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
 118{
 119        if (likely(vma->vm_mm == current->mm))
 120                return current->nsproxy->time_ns->vvar_page;
 121
 122        /*
 123         * VM_PFNMAP | VM_IO protect .fault() handler from being called
 124         * through interfaces like /proc/$pid/mem or
 125         * process_vm_{readv,writev}() as long as there's no .access()
 126         * in special_mapping_vmops().
 127         * For more details check_vma_flags() and __access_remote_vm()
 128         */
 129
 130        WARN(1, "vvar_page accessed remotely");
 131
 132        return NULL;
 133}
 134
 135/*
 136 * The vvar page layout depends on whether a task belongs to the root or
 137 * non-root time namespace. Whenever a task changes its namespace, the VVAR
 138 * page tables are cleared and then they will re-faulted with a
 139 * corresponding layout.
 140 * See also the comment near timens_setup_vdso_data() for details.
 141 */
 142int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
 143{
 144        struct mm_struct *mm = task->mm;
 145        struct vm_area_struct *vma;
 146
 147        mmap_read_lock(mm);
 148
 149        for (vma = mm->mmap; vma; vma = vma->vm_next) {
 150                unsigned long size = vma->vm_end - vma->vm_start;
 151
 152                if (vma_is_special_mapping(vma, &vvar_mapping))
 153                        zap_page_range(vma, vma->vm_start, size);
 154        }
 155
 156        mmap_read_unlock(mm);
 157        return 0;
 158}
 159#else
 160static inline struct page *find_timens_vvar_page(struct vm_area_struct *vma)
 161{
 162        return NULL;
 163}
 164#endif
 165
 166static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
 167                      struct vm_area_struct *vma, struct vm_fault *vmf)
 168{
 169        const struct vdso_image *image = vma->vm_mm->context.vdso_image;
 170        unsigned long pfn;
 171        long sym_offset;
 172
 173        if (!image)
 174                return VM_FAULT_SIGBUS;
 175
 176        sym_offset = (long)(vmf->pgoff << PAGE_SHIFT) +
 177                image->sym_vvar_start;
 178
 179        /*
 180         * Sanity check: a symbol offset of zero means that the page
 181         * does not exist for this vdso image, not that the page is at
 182         * offset zero relative to the text mapping.  This should be
 183         * impossible here, because sym_offset should only be zero for
 184         * the page past the end of the vvar mapping.
 185         */
 186        if (sym_offset == 0)
 187                return VM_FAULT_SIGBUS;
 188
 189        if (sym_offset == image->sym_vvar_page) {
 190                struct page *timens_page = find_timens_vvar_page(vma);
 191
 192                pfn = __pa_symbol(&__vvar_page) >> PAGE_SHIFT;
 193
 194                /*
 195                 * If a task belongs to a time namespace then a namespace
 196                 * specific VVAR is mapped with the sym_vvar_page offset and
 197                 * the real VVAR page is mapped with the sym_timens_page
 198                 * offset.
 199                 * See also the comment near timens_setup_vdso_data().
 200                 */
 201                if (timens_page) {
 202                        unsigned long addr;
 203                        vm_fault_t err;
 204
 205                        /*
 206                         * Optimization: inside time namespace pre-fault
 207                         * VVAR page too. As on timens page there are only
 208                         * offsets for clocks on VVAR, it'll be faulted
 209                         * shortly by VDSO code.
 210                         */
 211                        addr = vmf->address + (image->sym_timens_page - sym_offset);
 212                        err = vmf_insert_pfn(vma, addr, pfn);
 213                        if (unlikely(err & VM_FAULT_ERROR))
 214                                return err;
 215
 216                        pfn = page_to_pfn(timens_page);
 217                }
 218
 219                return vmf_insert_pfn(vma, vmf->address, pfn);
 220        } else if (sym_offset == image->sym_pvclock_page) {
 221                struct pvclock_vsyscall_time_info *pvti =
 222                        pvclock_get_pvti_cpu0_va();
 223                if (pvti && vclock_was_used(VDSO_CLOCKMODE_PVCLOCK)) {
 224                        return vmf_insert_pfn_prot(vma, vmf->address,
 225                                        __pa(pvti) >> PAGE_SHIFT,
 226                                        pgprot_decrypted(vma->vm_page_prot));
 227                }
 228        } else if (sym_offset == image->sym_hvclock_page) {
 229                struct ms_hyperv_tsc_page *tsc_pg = hv_get_tsc_page();
 230
 231                if (tsc_pg && vclock_was_used(VDSO_CLOCKMODE_HVCLOCK))
 232                        return vmf_insert_pfn(vma, vmf->address,
 233                                        virt_to_phys(tsc_pg) >> PAGE_SHIFT);
 234        } else if (sym_offset == image->sym_timens_page) {
 235                struct page *timens_page = find_timens_vvar_page(vma);
 236
 237                if (!timens_page)
 238                        return VM_FAULT_SIGBUS;
 239
 240                pfn = __pa_symbol(&__vvar_page) >> PAGE_SHIFT;
 241                return vmf_insert_pfn(vma, vmf->address, pfn);
 242        }
 243
 244        return VM_FAULT_SIGBUS;
 245}
 246
 247static const struct vm_special_mapping vdso_mapping = {
 248        .name = "[vdso]",
 249        .fault = vdso_fault,
 250        .mremap = vdso_mremap,
 251};
 252static const struct vm_special_mapping vvar_mapping = {
 253        .name = "[vvar]",
 254        .fault = vvar_fault,
 255        .mremap = vvar_mremap,
 256};
 257
 258/*
 259 * Add vdso and vvar mappings to current process.
 260 * @image          - blob to map
 261 * @addr           - request a specific address (zero to map at free addr)
 262 */
 263static int map_vdso(const struct vdso_image *image, unsigned long addr)
 264{
 265        struct mm_struct *mm = current->mm;
 266        struct vm_area_struct *vma;
 267        unsigned long text_start;
 268        int ret = 0;
 269
 270        if (mmap_write_lock_killable(mm))
 271                return -EINTR;
 272
 273        addr = get_unmapped_area(NULL, addr,
 274                                 image->size - image->sym_vvar_start, 0, 0);
 275        if (IS_ERR_VALUE(addr)) {
 276                ret = addr;
 277                goto up_fail;
 278        }
 279
 280        text_start = addr - image->sym_vvar_start;
 281
 282        /*
 283         * MAYWRITE to allow gdb to COW and set breakpoints
 284         */
 285        vma = _install_special_mapping(mm,
 286                                       text_start,
 287                                       image->size,
 288                                       VM_READ|VM_EXEC|
 289                                       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
 290                                       &vdso_mapping);
 291
 292        if (IS_ERR(vma)) {
 293                ret = PTR_ERR(vma);
 294                goto up_fail;
 295        }
 296
 297        vma = _install_special_mapping(mm,
 298                                       addr,
 299                                       -image->sym_vvar_start,
 300                                       VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
 301                                       VM_PFNMAP,
 302                                       &vvar_mapping);
 303
 304        if (IS_ERR(vma)) {
 305                ret = PTR_ERR(vma);
 306                do_munmap(mm, text_start, image->size, NULL);
 307        } else {
 308                current->mm->context.vdso = (void __user *)text_start;
 309                current->mm->context.vdso_image = image;
 310        }
 311
 312up_fail:
 313        mmap_write_unlock(mm);
 314        return ret;
 315}
 316
 317#ifdef CONFIG_X86_64
 318/*
 319 * Put the vdso above the (randomized) stack with another randomized
 320 * offset.  This way there is no hole in the middle of address space.
 321 * To save memory make sure it is still in the same PTE as the stack
 322 * top.  This doesn't give that many random bits.
 323 *
 324 * Note that this algorithm is imperfect: the distribution of the vdso
 325 * start address within a PMD is biased toward the end.
 326 *
 327 * Only used for the 64-bit and x32 vdsos.
 328 */
 329static unsigned long vdso_addr(unsigned long start, unsigned len)
 330{
 331        unsigned long addr, end;
 332        unsigned offset;
 333
 334        /*
 335         * Round up the start address.  It can start out unaligned as a result
 336         * of stack start randomization.
 337         */
 338        start = PAGE_ALIGN(start);
 339
 340        /* Round the lowest possible end address up to a PMD boundary. */
 341        end = (start + len + PMD_SIZE - 1) & PMD_MASK;
 342        if (end >= TASK_SIZE_MAX)
 343                end = TASK_SIZE_MAX;
 344        end -= len;
 345
 346        if (end > start) {
 347                offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
 348                addr = start + (offset << PAGE_SHIFT);
 349        } else {
 350                addr = start;
 351        }
 352
 353        /*
 354         * Forcibly align the final address in case we have a hardware
 355         * issue that requires alignment for performance reasons.
 356         */
 357        addr = align_vdso_addr(addr);
 358
 359        return addr;
 360}
 361
 362static int map_vdso_randomized(const struct vdso_image *image)
 363{
 364        unsigned long addr = vdso_addr(current->mm->start_stack, image->size-image->sym_vvar_start);
 365
 366        return map_vdso(image, addr);
 367}
 368#endif
 369
 370int map_vdso_once(const struct vdso_image *image, unsigned long addr)
 371{
 372        struct mm_struct *mm = current->mm;
 373        struct vm_area_struct *vma;
 374
 375        mmap_write_lock(mm);
 376        /*
 377         * Check if we have already mapped vdso blob - fail to prevent
 378         * abusing from userspace install_speciall_mapping, which may
 379         * not do accounting and rlimit right.
 380         * We could search vma near context.vdso, but it's a slowpath,
 381         * so let's explicitly check all VMAs to be completely sure.
 382         */
 383        for (vma = mm->mmap; vma; vma = vma->vm_next) {
 384                if (vma_is_special_mapping(vma, &vdso_mapping) ||
 385                                vma_is_special_mapping(vma, &vvar_mapping)) {
 386                        mmap_write_unlock(mm);
 387                        return -EEXIST;
 388                }
 389        }
 390        mmap_write_unlock(mm);
 391
 392        return map_vdso(image, addr);
 393}
 394
 395#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
 396static int load_vdso32(void)
 397{
 398        if (vdso32_enabled != 1)  /* Other values all mean "disabled" */
 399                return 0;
 400
 401        return map_vdso(&vdso_image_32, 0);
 402}
 403#endif
 404
 405#ifdef CONFIG_X86_64
 406int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 407{
 408        if (!vdso64_enabled)
 409                return 0;
 410
 411        return map_vdso_randomized(&vdso_image_64);
 412}
 413
 414#ifdef CONFIG_COMPAT
 415int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
 416                                       int uses_interp)
 417{
 418#ifdef CONFIG_X86_X32_ABI
 419        if (test_thread_flag(TIF_X32)) {
 420                if (!vdso64_enabled)
 421                        return 0;
 422                return map_vdso_randomized(&vdso_image_x32);
 423        }
 424#endif
 425#ifdef CONFIG_IA32_EMULATION
 426        return load_vdso32();
 427#else
 428        return 0;
 429#endif
 430}
 431#endif
 432#else
 433int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 434{
 435        return load_vdso32();
 436}
 437#endif
 438
 439#ifdef CONFIG_X86_64
 440static __init int vdso_setup(char *s)
 441{
 442        vdso64_enabled = simple_strtoul(s, NULL, 0);
 443        return 0;
 444}
 445__setup("vdso=", vdso_setup);
 446
 447static int __init init_vdso(void)
 448{
 449        BUILD_BUG_ON(VDSO_CLOCKMODE_MAX >= 32);
 450
 451        init_vdso_image(&vdso_image_64);
 452
 453#ifdef CONFIG_X86_X32_ABI
 454        init_vdso_image(&vdso_image_x32);
 455#endif
 456
 457        return 0;
 458}
 459subsys_initcall(init_vdso);
 460#endif /* CONFIG_X86_64 */
 461