linux/arch/s390/kernel/vdso.c
<<
>>
Prefs
   1/*
   2 * vdso setup for s390
   3 *
   4 *  Copyright IBM Corp. 2008
   5 *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License (version 2 only)
   9 * as published by the Free Software Foundation.
  10 */
  11
  12#include <linux/module.h>
  13#include <linux/errno.h>
  14#include <linux/sched.h>
  15#include <linux/kernel.h>
  16#include <linux/mm.h>
  17#include <linux/smp.h>
  18#include <linux/stddef.h>
  19#include <linux/unistd.h>
  20#include <linux/slab.h>
  21#include <linux/user.h>
  22#include <linux/elf.h>
  23#include <linux/security.h>
  24#include <linux/bootmem.h>
  25#include <linux/compat.h>
  26#include <asm/pgtable.h>
  27#include <asm/system.h>
  28#include <asm/processor.h>
  29#include <asm/mmu.h>
  30#include <asm/mmu_context.h>
  31#include <asm/sections.h>
  32#include <asm/vdso.h>
  33
  34#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
  35extern char vdso32_start, vdso32_end;
  36static void *vdso32_kbase = &vdso32_start;
  37static unsigned int vdso32_pages;
  38static struct page **vdso32_pagelist;
  39#endif
  40
  41#ifdef CONFIG_64BIT
  42extern char vdso64_start, vdso64_end;
  43static void *vdso64_kbase = &vdso64_start;
  44static unsigned int vdso64_pages;
  45static struct page **vdso64_pagelist;
  46#endif /* CONFIG_64BIT */
  47
  48/*
  49 * Should the kernel map a VDSO page into processes and pass its
  50 * address down to glibc upon exec()?
  51 */
  52unsigned int __read_mostly vdso_enabled = 1;
  53
  54static int __init vdso_setup(char *s)
  55{
  56        unsigned long val;
  57        int rc;
  58
  59        rc = 0;
  60        if (strncmp(s, "on", 3) == 0)
  61                vdso_enabled = 1;
  62        else if (strncmp(s, "off", 4) == 0)
  63                vdso_enabled = 0;
  64        else {
  65                rc = strict_strtoul(s, 0, &val);
  66                vdso_enabled = rc ? 0 : !!val;
  67        }
  68        return !rc;
  69}
  70__setup("vdso=", vdso_setup);
  71
  72/*
  73 * The vdso data page
  74 */
  75static union {
  76        struct vdso_data        data;
  77        u8                      page[PAGE_SIZE];
  78} vdso_data_store __page_aligned_data;
  79struct vdso_data *vdso_data = &vdso_data_store.data;
  80
  81/*
  82 * Setup vdso data page.
  83 */
  84static void vdso_init_data(struct vdso_data *vd)
  85{
  86        unsigned int facility_list;
  87
  88        facility_list = stfl();
  89        vd->ectg_available = switch_amode && (facility_list & 1);
  90}
  91
  92#ifdef CONFIG_64BIT
  93/*
  94 * Setup per cpu vdso data page.
  95 */
  96static void vdso_init_per_cpu_data(int cpu, struct vdso_per_cpu_data *vpcd)
  97{
  98}
  99
 100/*
 101 * Allocate/free per cpu vdso data.
 102 */
 103#ifdef CONFIG_64BIT
 104#define SEGMENT_ORDER   2
 105#else
 106#define SEGMENT_ORDER   1
 107#endif
 108
 109int vdso_alloc_per_cpu(int cpu, struct _lowcore *lowcore)
 110{
 111        unsigned long segment_table, page_table, page_frame;
 112        u32 *psal, *aste;
 113        int i;
 114
 115        lowcore->vdso_per_cpu_data = __LC_PASTE;
 116
 117        if (!switch_amode || !vdso_enabled)
 118                return 0;
 119
 120        segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
 121        page_table = get_zeroed_page(GFP_KERNEL | GFP_DMA);
 122        page_frame = get_zeroed_page(GFP_KERNEL);
 123        if (!segment_table || !page_table || !page_frame)
 124                goto out;
 125
 126        clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY,
 127                    PAGE_SIZE << SEGMENT_ORDER);
 128        clear_table((unsigned long *) page_table, _PAGE_TYPE_EMPTY,
 129                    256*sizeof(unsigned long));
 130
 131        *(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table;
 132        *(unsigned long *) page_table = _PAGE_RO + page_frame;
 133
 134        psal = (u32 *) (page_table + 256*sizeof(unsigned long));
 135        aste = psal + 32;
 136
 137        for (i = 4; i < 32; i += 4)
 138                psal[i] = 0x80000000;
 139
 140        lowcore->paste[4] = (u32)(addr_t) psal;
 141        psal[0] = 0x20000000;
 142        psal[2] = (u32)(addr_t) aste;
 143        *(unsigned long *) (aste + 2) = segment_table +
 144                _ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT;
 145        aste[4] = (u32)(addr_t) psal;
 146        lowcore->vdso_per_cpu_data = page_frame;
 147
 148        vdso_init_per_cpu_data(cpu, (struct vdso_per_cpu_data *) page_frame);
 149        return 0;
 150
 151out:
 152        free_page(page_frame);
 153        free_page(page_table);
 154        free_pages(segment_table, SEGMENT_ORDER);
 155        return -ENOMEM;
 156}
 157
 158void vdso_free_per_cpu(int cpu, struct _lowcore *lowcore)
 159{
 160        unsigned long segment_table, page_table, page_frame;
 161        u32 *psal, *aste;
 162
 163        if (!switch_amode || !vdso_enabled)
 164                return;
 165
 166        psal = (u32 *)(addr_t) lowcore->paste[4];
 167        aste = (u32 *)(addr_t) psal[2];
 168        segment_table = *(unsigned long *)(aste + 2) & PAGE_MASK;
 169        page_table = *(unsigned long *) segment_table;
 170        page_frame = *(unsigned long *) page_table;
 171
 172        free_page(page_frame);
 173        free_page(page_table);
 174        free_pages(segment_table, SEGMENT_ORDER);
 175}
 176
 177static void __vdso_init_cr5(void *dummy)
 178{
 179        unsigned long cr5;
 180
 181        cr5 = offsetof(struct _lowcore, paste);
 182        __ctl_load(cr5, 5, 5);
 183}
 184
 185static void vdso_init_cr5(void)
 186{
 187        if (switch_amode && vdso_enabled)
 188                on_each_cpu(__vdso_init_cr5, NULL, 1);
 189}
 190#endif /* CONFIG_64BIT */
 191
 192/*
 193 * This is called from binfmt_elf, we create the special vma for the
 194 * vDSO and insert it into the mm struct tree
 195 */
 196int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 197{
 198        struct mm_struct *mm = current->mm;
 199        struct page **vdso_pagelist;
 200        unsigned long vdso_pages;
 201        unsigned long vdso_base;
 202        int rc;
 203
 204        if (!vdso_enabled)
 205                return 0;
 206        /*
 207         * Only map the vdso for dynamically linked elf binaries.
 208         */
 209        if (!uses_interp)
 210                return 0;
 211
 212        vdso_base = mm->mmap_base;
 213#ifdef CONFIG_64BIT
 214        vdso_pagelist = vdso64_pagelist;
 215        vdso_pages = vdso64_pages;
 216#ifdef CONFIG_COMPAT
 217        if (is_compat_task()) {
 218                vdso_pagelist = vdso32_pagelist;
 219                vdso_pages = vdso32_pages;
 220        }
 221#endif
 222#else
 223        vdso_pagelist = vdso32_pagelist;
 224        vdso_pages = vdso32_pages;
 225#endif
 226
 227        /*
 228         * vDSO has a problem and was disabled, just don't "enable" it for
 229         * the process
 230         */
 231        if (vdso_pages == 0)
 232                return 0;
 233
 234        current->mm->context.vdso_base = 0;
 235
 236        /*
 237         * pick a base address for the vDSO in process space. We try to put
 238         * it at vdso_base which is the "natural" base for it, but we might
 239         * fail and end up putting it elsewhere.
 240         */
 241        down_write(&mm->mmap_sem);
 242        vdso_base = get_unmapped_area(NULL, vdso_base,
 243                                      vdso_pages << PAGE_SHIFT, 0, 0);
 244        if (IS_ERR_VALUE(vdso_base)) {
 245                rc = vdso_base;
 246                goto out_up;
 247        }
 248
 249        /*
 250         * Put vDSO base into mm struct. We need to do this before calling
 251         * install_special_mapping or the perf counter mmap tracking code
 252         * will fail to recognise it as a vDSO (since arch_vma_name fails).
 253         */
 254        current->mm->context.vdso_base = vdso_base;
 255
 256        /*
 257         * our vma flags don't have VM_WRITE so by default, the process
 258         * isn't allowed to write those pages.
 259         * gdb can break that with ptrace interface, and thus trigger COW
 260         * on those pages but it's then your responsibility to never do that
 261         * on the "data" page of the vDSO or you'll stop getting kernel
 262         * updates and your nice userland gettimeofday will be totally dead.
 263         * It's fine to use that for setting breakpoints in the vDSO code
 264         * pages though
 265         *
 266         * Make sure the vDSO gets into every core dump.
 267         * Dumping its contents makes post-mortem fully interpretable later
 268         * without matching up the same kernel and hardware config to see
 269         * what PC values meant.
 270         */
 271        rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
 272                                     VM_READ|VM_EXEC|
 273                                     VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC|
 274                                     VM_ALWAYSDUMP,
 275                                     vdso_pagelist);
 276        if (rc)
 277                current->mm->context.vdso_base = 0;
 278out_up:
 279        up_write(&mm->mmap_sem);
 280        return rc;
 281}
 282
 283const char *arch_vma_name(struct vm_area_struct *vma)
 284{
 285        if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base)
 286                return "[vdso]";
 287        return NULL;
 288}
 289
 290static int __init vdso_init(void)
 291{
 292        int i;
 293
 294        if (!vdso_enabled)
 295                return 0;
 296        vdso_init_data(vdso_data);
 297#if defined(CONFIG_32BIT) || defined(CONFIG_COMPAT)
 298        /* Calculate the size of the 32 bit vDSO */
 299        vdso32_pages = ((&vdso32_end - &vdso32_start
 300                         + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
 301
 302        /* Make sure pages are in the correct state */
 303        vdso32_pagelist = kzalloc(sizeof(struct page *) * (vdso32_pages + 1),
 304                                  GFP_KERNEL);
 305        BUG_ON(vdso32_pagelist == NULL);
 306        for (i = 0; i < vdso32_pages - 1; i++) {
 307                struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
 308                ClearPageReserved(pg);
 309                get_page(pg);
 310                vdso32_pagelist[i] = pg;
 311        }
 312        vdso32_pagelist[vdso32_pages - 1] = virt_to_page(vdso_data);
 313        vdso32_pagelist[vdso32_pages] = NULL;
 314#endif
 315
 316#ifdef CONFIG_64BIT
 317        /* Calculate the size of the 64 bit vDSO */
 318        vdso64_pages = ((&vdso64_end - &vdso64_start
 319                         + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
 320
 321        /* Make sure pages are in the correct state */
 322        vdso64_pagelist = kzalloc(sizeof(struct page *) * (vdso64_pages + 1),
 323                                  GFP_KERNEL);
 324        BUG_ON(vdso64_pagelist == NULL);
 325        for (i = 0; i < vdso64_pages - 1; i++) {
 326                struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
 327                ClearPageReserved(pg);
 328                get_page(pg);
 329                vdso64_pagelist[i] = pg;
 330        }
 331        vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);
 332        vdso64_pagelist[vdso64_pages] = NULL;
 333#ifndef CONFIG_SMP
 334        if (vdso_alloc_per_cpu(0, &S390_lowcore))
 335                BUG();
 336#endif
 337        vdso_init_cr5();
 338#endif /* CONFIG_64BIT */
 339
 340        get_page(virt_to_page(vdso_data));
 341
 342        smp_wmb();
 343
 344        return 0;
 345}
 346arch_initcall(vdso_init);
 347
 348int in_gate_area_no_task(unsigned long addr)
 349{
 350        return 0;
 351}
 352
 353int in_gate_area(struct task_struct *task, unsigned long addr)
 354{
 355        return 0;
 356}
 357
 358struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
 359{
 360        return NULL;
 361}
 362