linux/arch/arm/mm/pageattr.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 and
   6 * only version 2 as published by the Free Software Foundation.
   7 *
   8 * This program is distributed in the hope that it will be useful,
   9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  11 * GNU General Public License for more details.
  12 */
  13#include <linux/mm.h>
  14#include <linux/module.h>
  15
  16#include <asm/pgtable.h>
  17#include <asm/tlbflush.h>
  18
  19struct page_change_data {
  20        pgprot_t set_mask;
  21        pgprot_t clear_mask;
  22};
  23
  24static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
  25                        void *data)
  26{
  27        struct page_change_data *cdata = data;
  28        pte_t pte = *ptep;
  29
  30        pte = clear_pte_bit(pte, cdata->clear_mask);
  31        pte = set_pte_bit(pte, cdata->set_mask);
  32
  33        set_pte_ext(ptep, pte, 0);
  34        return 0;
  35}
  36
  37static int change_memory_common(unsigned long addr, int numpages,
  38                                pgprot_t set_mask, pgprot_t clear_mask)
  39{
  40        unsigned long start = addr;
  41        unsigned long size = PAGE_SIZE*numpages;
  42        unsigned long end = start + size;
  43        int ret;
  44        struct page_change_data data;
  45
  46        if (!IS_ALIGNED(addr, PAGE_SIZE)) {
  47                start &= PAGE_MASK;
  48                end = start + size;
  49                WARN_ON_ONCE(1);
  50        }
  51
  52        if (!numpages)
  53                return 0;
  54
  55        if (start < MODULES_VADDR || start >= MODULES_END)
  56                return -EINVAL;
  57
  58        if (end < MODULES_VADDR || start >= MODULES_END)
  59                return -EINVAL;
  60
  61        data.set_mask = set_mask;
  62        data.clear_mask = clear_mask;
  63
  64        ret = apply_to_page_range(&init_mm, start, size, change_page_range,
  65                                        &data);
  66
  67        flush_tlb_kernel_range(start, end);
  68        return ret;
  69}
  70
  71int set_memory_ro(unsigned long addr, int numpages)
  72{
  73        return change_memory_common(addr, numpages,
  74                                        __pgprot(L_PTE_RDONLY),
  75                                        __pgprot(0));
  76}
  77
  78int set_memory_rw(unsigned long addr, int numpages)
  79{
  80        return change_memory_common(addr, numpages,
  81                                        __pgprot(0),
  82                                        __pgprot(L_PTE_RDONLY));
  83}
  84
  85int set_memory_nx(unsigned long addr, int numpages)
  86{
  87        return change_memory_common(addr, numpages,
  88                                        __pgprot(L_PTE_XN),
  89                                        __pgprot(0));
  90}
  91
  92int set_memory_x(unsigned long addr, int numpages)
  93{
  94        return change_memory_common(addr, numpages,
  95                                        __pgprot(0),
  96                                        __pgprot(L_PTE_XN));
  97}
  98