linux/arch/arm64/mm/pageattr.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 and
   6 * only version 2 as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  11 * GNU General Public License for more details.
  12 */
  13#include <linux/kernel.h>
  14#include <linux/mm.h>
  15#include <linux/module.h>
  16#include <linux/sched.h>
  17#include <linux/vmalloc.h>
  18
  19#include <asm/pgtable.h>
  20#include <asm/tlbflush.h>
  21
  22struct page_change_data {
  23        pgprot_t set_mask;
  24        pgprot_t clear_mask;
  25};
  26
  27static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
  28                        void *data)
  29{
  30        struct page_change_data *cdata = data;
  31        pte_t pte = *ptep;
  32
  33        pte = clear_pte_bit(pte, cdata->clear_mask);
  34        pte = set_pte_bit(pte, cdata->set_mask);
  35
  36        set_pte(ptep, pte);
  37        return 0;
  38}
  39
  40/*
  41 * This function assumes that the range is mapped with PAGE_SIZE pages.
  42 */
  43static int __change_memory_common(unsigned long start, unsigned long size,
  44                                pgprot_t set_mask, pgprot_t clear_mask)
  45{
  46        struct page_change_data data;
  47        int ret;
  48
  49        data.set_mask = set_mask;
  50        data.clear_mask = clear_mask;
  51
  52        ret = apply_to_page_range(&init_mm, start, size, change_page_range,
  53                                        &data);
  54
  55        flush_tlb_kernel_range(start, start + size);
  56        return ret;
  57}
  58
  59static int change_memory_common(unsigned long addr, int numpages,
  60                                pgprot_t set_mask, pgprot_t clear_mask)
  61{
  62        unsigned long start = addr;
  63        unsigned long size = PAGE_SIZE*numpages;
  64        unsigned long end = start + size;
  65        struct vm_struct *area;
  66
  67        if (!PAGE_ALIGNED(addr)) {
  68                start &= PAGE_MASK;
  69                end = start + size;
  70                WARN_ON_ONCE(1);
  71        }
  72
  73        /*
  74         * Kernel VA mappings are always live, and splitting live section
  75         * mappings into page mappings may cause TLB conflicts. This means
  76         * we have to ensure that changing the permission bits of the range
  77         * we are operating on does not result in such splitting.
  78         *
  79         * Let's restrict ourselves to mappings created by vmalloc (or vmap).
  80         * Those are guaranteed to consist entirely of page mappings, and
  81         * splitting is never needed.
  82         *
  83         * So check whether the [addr, addr + size) interval is entirely
  84         * covered by precisely one VM area that has the VM_ALLOC flag set.
  85         */
  86        area = find_vm_area((void *)addr);
  87        if (!area ||
  88            end > (unsigned long)area->addr + area->size ||
  89            !(area->flags & VM_ALLOC))
  90                return -EINVAL;
  91
  92        if (!numpages)
  93                return 0;
  94
  95        return __change_memory_common(start, size, set_mask, clear_mask);
  96}
  97
  98int set_memory_ro(unsigned long addr, int numpages)
  99{
 100        return change_memory_common(addr, numpages,
 101                                        __pgprot(PTE_RDONLY),
 102                                        __pgprot(PTE_WRITE));
 103}
 104
 105int set_memory_rw(unsigned long addr, int numpages)
 106{
 107        return change_memory_common(addr, numpages,
 108                                        __pgprot(PTE_WRITE),
 109                                        __pgprot(PTE_RDONLY));
 110}
 111
 112int set_memory_nx(unsigned long addr, int numpages)
 113{
 114        return change_memory_common(addr, numpages,
 115                                        __pgprot(PTE_PXN),
 116                                        __pgprot(0));
 117}
 118EXPORT_SYMBOL_GPL(set_memory_nx);
 119
 120int set_memory_x(unsigned long addr, int numpages)
 121{
 122        return change_memory_common(addr, numpages,
 123                                        __pgprot(0),
 124                                        __pgprot(PTE_PXN));
 125}
 126EXPORT_SYMBOL_GPL(set_memory_x);
 127
 128#ifdef CONFIG_DEBUG_PAGEALLOC
 129void __kernel_map_pages(struct page *page, int numpages, int enable)
 130{
 131        unsigned long addr = (unsigned long) page_address(page);
 132
 133        if (enable)
 134                __change_memory_common(addr, PAGE_SIZE * numpages,
 135                                        __pgprot(PTE_VALID),
 136                                        __pgprot(0));
 137        else
 138                __change_memory_common(addr, PAGE_SIZE * numpages,
 139                                        __pgprot(0),
 140                                        __pgprot(PTE_VALID));
 141}
 142#ifdef CONFIG_HIBERNATION
 143/*
 144 * When built with CONFIG_DEBUG_PAGEALLOC and CONFIG_HIBERNATION, this function
 145 * is used to determine if a linear map page has been marked as not-valid by
 146 * CONFIG_DEBUG_PAGEALLOC. Walk the page table and check the PTE_VALID bit.
 147 * This is based on kern_addr_valid(), which almost does what we need.
 148 *
 149 * Because this is only called on the kernel linear map,  p?d_sect() implies
 150 * p?d_present(). When debug_pagealloc is enabled, sections mappings are
 151 * disabled.
 152 */
 153bool kernel_page_present(struct page *page)
 154{
 155        pgd_t *pgd;
 156        pud_t *pud;
 157        pmd_t *pmd;
 158        pte_t *pte;
 159        unsigned long addr = (unsigned long)page_address(page);
 160
 161        pgd = pgd_offset_k(addr);
 162        if (pgd_none(*pgd))
 163                return false;
 164
 165        pud = pud_offset(pgd, addr);
 166        if (pud_none(*pud))
 167                return false;
 168        if (pud_sect(*pud))
 169                return true;
 170
 171        pmd = pmd_offset(pud, addr);
 172        if (pmd_none(*pmd))
 173                return false;
 174        if (pmd_sect(*pmd))
 175                return true;
 176
 177        pte = pte_offset_kernel(pmd, addr);
 178        return pte_valid(*pte);
 179}
 180#endif /* CONFIG_HIBERNATION */
 181#endif /* CONFIG_DEBUG_PAGEALLOC */
 182