linux/arch/s390/kernel/vdso.c
<<
>>
Prefs
   1/*
   2 * vdso setup for s390
   3 *
   4 *  Copyright IBM Corp. 2008
   5 *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License (version 2 only)
   9 * as published by the Free Software Foundation.
  10 */
  11
  12#include <linux/module.h>
  13#include <linux/errno.h>
  14#include <linux/sched.h>
  15#include <linux/kernel.h>
  16#include <linux/mm.h>
  17#include <linux/smp.h>
  18#include <linux/stddef.h>
  19#include <linux/unistd.h>
  20#include <linux/slab.h>
  21#include <linux/user.h>
  22#include <linux/elf.h>
  23#include <linux/security.h>
  24#include <linux/bootmem.h>
  25#include <linux/compat.h>
  26#include <asm/asm-offsets.h>
  27#include <asm/pgtable.h>
  28#include <asm/processor.h>
  29#include <asm/mmu.h>
  30#include <asm/mmu_context.h>
  31#include <asm/sections.h>
  32#include <asm/vdso.h>
  33#include <asm/facility.h>
  34
  35#ifdef CONFIG_COMPAT
  36extern char vdso32_start, vdso32_end;
  37static void *vdso32_kbase = &vdso32_start;
  38static unsigned int vdso32_pages;
  39static struct page **vdso32_pagelist;
  40#endif
  41
  42extern char vdso64_start, vdso64_end;
  43static void *vdso64_kbase = &vdso64_start;
  44static unsigned int vdso64_pages;
  45static struct page **vdso64_pagelist;
  46
  47/*
  48 * Should the kernel map a VDSO page into processes and pass its
  49 * address down to glibc upon exec()?
  50 */
  51unsigned int __read_mostly vdso_enabled = 1;
  52
  53static int __init vdso_setup(char *s)
  54{
  55        unsigned long val;
  56        int rc;
  57
  58        rc = 0;
  59        if (strncmp(s, "on", 3) == 0)
  60                vdso_enabled = 1;
  61        else if (strncmp(s, "off", 4) == 0)
  62                vdso_enabled = 0;
  63        else {
  64                rc = kstrtoul(s, 0, &val);
  65                vdso_enabled = rc ? 0 : !!val;
  66        }
  67        return !rc;
  68}
  69__setup("vdso=", vdso_setup);
  70
  71/*
  72 * The vdso data page
  73 */
  74static union {
  75        struct vdso_data        data;
  76        u8                      page[PAGE_SIZE];
  77} vdso_data_store __page_aligned_data;
  78struct vdso_data *vdso_data = &vdso_data_store.data;
  79
  80/*
  81 * Setup vdso data page.
  82 */
  83static void __init vdso_init_data(struct vdso_data *vd)
  84{
  85        vd->ectg_available = test_facility(31);
  86}
  87
  88/*
  89 * Allocate/free per cpu vdso data.
  90 */
  91#define SEGMENT_ORDER   2
  92
  93int vdso_alloc_per_cpu(struct lowcore *lowcore)
  94{
  95        unsigned long segment_table, page_table, page_frame;
  96        struct vdso_per_cpu_data *vd;
  97        u32 *psal, *aste;
  98        int i;
  99
 100        lowcore->vdso_per_cpu_data = __LC_PASTE;
 101
 102        if (!vdso_enabled)
 103                return 0;
 104
 105        segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
 106        page_table = get_zeroed_page(GFP_KERNEL | GFP_DMA);
 107        page_frame = get_zeroed_page(GFP_KERNEL);
 108        if (!segment_table || !page_table || !page_frame)
 109                goto out;
 110
 111        /* Initialize per-cpu vdso data page */
 112        vd = (struct vdso_per_cpu_data *) page_frame;
 113        vd->cpu_nr = lowcore->cpu_nr;
 114        vd->node_id = cpu_to_node(vd->cpu_nr);
 115
 116        /* Set up access register mode page table */
 117        clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY,
 118                    PAGE_SIZE << SEGMENT_ORDER);
 119        clear_table((unsigned long *) page_table, _PAGE_INVALID,
 120                    256*sizeof(unsigned long));
 121
 122        *(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table;
 123        *(unsigned long *) page_table = _PAGE_PROTECT + page_frame;
 124
 125        psal = (u32 *) (page_table + 256*sizeof(unsigned long));
 126        aste = psal + 32;
 127
 128        for (i = 4; i < 32; i += 4)
 129                psal[i] = 0x80000000;
 130
 131        lowcore->paste[4] = (u32)(addr_t) psal;
 132        psal[0] = 0x02000000;
 133        psal[2] = (u32)(addr_t) aste;
 134        *(unsigned long *) (aste + 2) = segment_table +
 135                _ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT;
 136        aste[4] = (u32)(addr_t) psal;
 137        lowcore->vdso_per_cpu_data = page_frame;
 138
 139        return 0;
 140
 141out:
 142        free_page(page_frame);
 143        free_page(page_table);
 144        free_pages(segment_table, SEGMENT_ORDER);
 145        return -ENOMEM;
 146}
 147
 148void vdso_free_per_cpu(struct lowcore *lowcore)
 149{
 150        unsigned long segment_table, page_table, page_frame;
 151        u32 *psal, *aste;
 152
 153        if (!vdso_enabled)
 154                return;
 155
 156        psal = (u32 *)(addr_t) lowcore->paste[4];
 157        aste = (u32 *)(addr_t) psal[2];
 158        segment_table = *(unsigned long *)(aste + 2) & PAGE_MASK;
 159        page_table = *(unsigned long *) segment_table;
 160        page_frame = *(unsigned long *) page_table;
 161
 162        free_page(page_frame);
 163        free_page(page_table);
 164        free_pages(segment_table, SEGMENT_ORDER);
 165}
 166
 167static void vdso_init_cr5(void)
 168{
 169        unsigned long cr5;
 170
 171        if (!vdso_enabled)
 172                return;
 173        cr5 = offsetof(struct lowcore, paste);
 174        __ctl_load(cr5, 5, 5);
 175}
 176
 177/*
 178 * This is called from binfmt_elf, we create the special vma for the
 179 * vDSO and insert it into the mm struct tree
 180 */
 181int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 182{
 183        struct mm_struct *mm = current->mm;
 184        struct page **vdso_pagelist;
 185        unsigned long vdso_pages;
 186        unsigned long vdso_base;
 187        int rc;
 188
 189        if (!vdso_enabled)
 190                return 0;
 191        /*
 192         * Only map the vdso for dynamically linked elf binaries.
 193         */
 194        if (!uses_interp)
 195                return 0;
 196
 197        vdso_pagelist = vdso64_pagelist;
 198        vdso_pages = vdso64_pages;
 199#ifdef CONFIG_COMPAT
 200        if (is_compat_task()) {
 201                vdso_pagelist = vdso32_pagelist;
 202                vdso_pages = vdso32_pages;
 203        }
 204#endif
 205        /*
 206         * vDSO has a problem and was disabled, just don't "enable" it for
 207         * the process
 208         */
 209        if (vdso_pages == 0)
 210                return 0;
 211
 212        current->mm->context.vdso_base = 0;
 213
 214        /*
 215         * pick a base address for the vDSO in process space. We try to put
 216         * it at vdso_base which is the "natural" base for it, but we might
 217         * fail and end up putting it elsewhere.
 218         */
 219        if (down_write_killable(&mm->mmap_sem))
 220                return -EINTR;
 221        vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0);
 222        if (IS_ERR_VALUE(vdso_base)) {
 223                rc = vdso_base;
 224                goto out_up;
 225        }
 226
 227        /*
 228         * Put vDSO base into mm struct. We need to do this before calling
 229         * install_special_mapping or the perf counter mmap tracking code
 230         * will fail to recognise it as a vDSO (since arch_vma_name fails).
 231         */
 232        current->mm->context.vdso_base = vdso_base;
 233
 234        /*
 235         * our vma flags don't have VM_WRITE so by default, the process
 236         * isn't allowed to write those pages.
 237         * gdb can break that with ptrace interface, and thus trigger COW
 238         * on those pages but it's then your responsibility to never do that
 239         * on the "data" page of the vDSO or you'll stop getting kernel
 240         * updates and your nice userland gettimeofday will be totally dead.
 241         * It's fine to use that for setting breakpoints in the vDSO code
 242         * pages though.
 243         */
 244        rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
 245                                     VM_READ|VM_EXEC|
 246                                     VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
 247                                     vdso_pagelist);
 248        if (rc)
 249                current->mm->context.vdso_base = 0;
 250out_up:
 251        up_write(&mm->mmap_sem);
 252        return rc;
 253}
 254
 255const char *arch_vma_name(struct vm_area_struct *vma)
 256{
 257        if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base)
 258                return "[vdso]";
 259        return NULL;
 260}
 261
 262static int __init vdso_init(void)
 263{
 264        int i;
 265
 266        if (!vdso_enabled)
 267                return 0;
 268        vdso_init_data(vdso_data);
 269#ifdef CONFIG_COMPAT
 270        /* Calculate the size of the 32 bit vDSO */
 271        vdso32_pages = ((&vdso32_end - &vdso32_start
 272                         + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
 273
 274        /* Make sure pages are in the correct state */
 275        vdso32_pagelist = kzalloc(sizeof(struct page *) * (vdso32_pages + 1),
 276                                  GFP_KERNEL);
 277        BUG_ON(vdso32_pagelist == NULL);
 278        for (i = 0; i < vdso32_pages - 1; i++) {
 279                struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
 280                ClearPageReserved(pg);
 281                get_page(pg);
 282                vdso32_pagelist[i] = pg;
 283        }
 284        vdso32_pagelist[vdso32_pages - 1] = virt_to_page(vdso_data);
 285        vdso32_pagelist[vdso32_pages] = NULL;
 286#endif
 287
 288        /* Calculate the size of the 64 bit vDSO */
 289        vdso64_pages = ((&vdso64_end - &vdso64_start
 290                         + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
 291
 292        /* Make sure pages are in the correct state */
 293        vdso64_pagelist = kzalloc(sizeof(struct page *) * (vdso64_pages + 1),
 294                                  GFP_KERNEL);
 295        BUG_ON(vdso64_pagelist == NULL);
 296        for (i = 0; i < vdso64_pages - 1; i++) {
 297                struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
 298                ClearPageReserved(pg);
 299                get_page(pg);
 300                vdso64_pagelist[i] = pg;
 301        }
 302        vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);
 303        vdso64_pagelist[vdso64_pages] = NULL;
 304        if (vdso_alloc_per_cpu(&S390_lowcore))
 305                BUG();
 306        vdso_init_cr5();
 307
 308        get_page(virt_to_page(vdso_data));
 309
 310        return 0;
 311}
 312early_initcall(vdso_init);
 313