linux/arch/riscv/kernel/vdso.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
   3 *                    <benh@kernel.crashing.org>
   4 * Copyright (C) 2012 ARM Limited
   5 * Copyright (C) 2015 Regents of the University of California
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  18 */
  19
  20#include <linux/mm.h>
  21#include <linux/slab.h>
  22#include <linux/binfmts.h>
  23#include <linux/err.h>
  24
  25#include <asm/vdso.h>
  26
  27extern char vdso_start[], vdso_end[];
  28
  29static unsigned int vdso_pages;
  30static struct page **vdso_pagelist;
  31
  32/*
  33 * The vDSO data page.
  34 */
  35static union {
  36        struct vdso_data        data;
  37        u8                      page[PAGE_SIZE];
  38} vdso_data_store __page_aligned_data;
  39struct vdso_data *vdso_data = &vdso_data_store.data;
  40
  41static int __init vdso_init(void)
  42{
  43        unsigned int i;
  44
  45        vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
  46        vdso_pagelist =
  47                kcalloc(vdso_pages + 1, sizeof(struct page *), GFP_KERNEL);
  48        if (unlikely(vdso_pagelist == NULL)) {
  49                pr_err("vdso: pagelist allocation failed\n");
  50                return -ENOMEM;
  51        }
  52
  53        for (i = 0; i < vdso_pages; i++) {
  54                struct page *pg;
  55
  56                pg = virt_to_page(vdso_start + (i << PAGE_SHIFT));
  57                ClearPageReserved(pg);
  58                vdso_pagelist[i] = pg;
  59        }
  60        vdso_pagelist[i] = virt_to_page(vdso_data);
  61
  62        return 0;
  63}
  64arch_initcall(vdso_init);
  65
  66int arch_setup_additional_pages(struct linux_binprm *bprm,
  67        int uses_interp)
  68{
  69        struct mm_struct *mm = current->mm;
  70        unsigned long vdso_base, vdso_len;
  71        int ret;
  72
  73        vdso_len = (vdso_pages + 1) << PAGE_SHIFT;
  74
  75        down_write(&mm->mmap_sem);
  76        vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0);
  77        if (IS_ERR_VALUE(vdso_base)) {
  78                ret = vdso_base;
  79                goto end;
  80        }
  81
  82        /*
  83         * Put vDSO base into mm struct. We need to do this before calling
  84         * install_special_mapping or the perf counter mmap tracking code
  85         * will fail to recognise it as a vDSO (since arch_vma_name fails).
  86         */
  87        mm->context.vdso = (void *)vdso_base;
  88
  89        ret = install_special_mapping(mm, vdso_base, vdso_len,
  90                (VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
  91                vdso_pagelist);
  92
  93        if (unlikely(ret))
  94                mm->context.vdso = NULL;
  95
  96end:
  97        up_write(&mm->mmap_sem);
  98        return ret;
  99}
 100
 101const char *arch_vma_name(struct vm_area_struct *vma)
 102{
 103        if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso))
 104                return "[vdso]";
 105        return NULL;
 106}
 107
 108/*
 109 * Function stubs to prevent linker errors when AT_SYSINFO_EHDR is defined
 110 */
 111
 112int in_gate_area_no_mm(unsigned long addr)
 113{
 114        return 0;
 115}
 116
 117int in_gate_area(struct mm_struct *mm, unsigned long addr)
 118{
 119        return 0;
 120}
 121
 122struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
 123{
 124        return NULL;
 125}
 126