linux/arch/arm64/mm/pageattr.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
   4 */
   5#include <linux/kernel.h>
   6#include <linux/mm.h>
   7#include <linux/module.h>
   8#include <linux/sched.h>
   9#include <linux/vmalloc.h>
  10
  11#include <asm/pgtable.h>
  12#include <asm/set_memory.h>
  13#include <asm/tlbflush.h>
  14
  15struct page_change_data {
  16        pgprot_t set_mask;
  17        pgprot_t clear_mask;
  18};
  19
  20bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED);
  21
  22static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
  23{
  24        struct page_change_data *cdata = data;
  25        pte_t pte = READ_ONCE(*ptep);
  26
  27        pte = clear_pte_bit(pte, cdata->clear_mask);
  28        pte = set_pte_bit(pte, cdata->set_mask);
  29
  30        set_pte(ptep, pte);
  31        return 0;
  32}
  33
  34/*
  35 * This function assumes that the range is mapped with PAGE_SIZE pages.
  36 */
  37static int __change_memory_common(unsigned long start, unsigned long size,
  38                                pgprot_t set_mask, pgprot_t clear_mask)
  39{
  40        struct page_change_data data;
  41        int ret;
  42
  43        data.set_mask = set_mask;
  44        data.clear_mask = clear_mask;
  45
  46        ret = apply_to_page_range(&init_mm, start, size, change_page_range,
  47                                        &data);
  48
  49        flush_tlb_kernel_range(start, start + size);
  50        return ret;
  51}
  52
  53static int change_memory_common(unsigned long addr, int numpages,
  54                                pgprot_t set_mask, pgprot_t clear_mask)
  55{
  56        unsigned long start = addr;
  57        unsigned long size = PAGE_SIZE*numpages;
  58        unsigned long end = start + size;
  59        struct vm_struct *area;
  60        int i;
  61
  62        if (!PAGE_ALIGNED(addr)) {
  63                start &= PAGE_MASK;
  64                end = start + size;
  65                WARN_ON_ONCE(1);
  66        }
  67
  68        /*
  69         * Kernel VA mappings are always live, and splitting live section
  70         * mappings into page mappings may cause TLB conflicts. This means
  71         * we have to ensure that changing the permission bits of the range
  72         * we are operating on does not result in such splitting.
  73         *
  74         * Let's restrict ourselves to mappings created by vmalloc (or vmap).
  75         * Those are guaranteed to consist entirely of page mappings, and
  76         * splitting is never needed.
  77         *
  78         * So check whether the [addr, addr + size) interval is entirely
  79         * covered by precisely one VM area that has the VM_ALLOC flag set.
  80         */
  81        area = find_vm_area((void *)addr);
  82        if (!area ||
  83            end > (unsigned long)area->addr + area->size ||
  84            !(area->flags & VM_ALLOC))
  85                return -EINVAL;
  86
  87        if (!numpages)
  88                return 0;
  89
  90        /*
  91         * If we are manipulating read-only permissions, apply the same
  92         * change to the linear mapping of the pages that back this VM area.
  93         */
  94        if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY ||
  95                            pgprot_val(clear_mask) == PTE_RDONLY)) {
  96                for (i = 0; i < area->nr_pages; i++) {
  97                        __change_memory_common((u64)page_address(area->pages[i]),
  98                                               PAGE_SIZE, set_mask, clear_mask);
  99                }
 100        }
 101
 102        /*
 103         * Get rid of potentially aliasing lazily unmapped vm areas that may
 104         * have permissions set that deviate from the ones we are setting here.
 105         */
 106        vm_unmap_aliases();
 107
 108        return __change_memory_common(start, size, set_mask, clear_mask);
 109}
 110
 111int set_memory_ro(unsigned long addr, int numpages)
 112{
 113        return change_memory_common(addr, numpages,
 114                                        __pgprot(PTE_RDONLY),
 115                                        __pgprot(PTE_WRITE));
 116}
 117
 118int set_memory_rw(unsigned long addr, int numpages)
 119{
 120        return change_memory_common(addr, numpages,
 121                                        __pgprot(PTE_WRITE),
 122                                        __pgprot(PTE_RDONLY));
 123}
 124
 125int set_memory_nx(unsigned long addr, int numpages)
 126{
 127        return change_memory_common(addr, numpages,
 128                                        __pgprot(PTE_PXN),
 129                                        __pgprot(0));
 130}
 131EXPORT_SYMBOL_GPL(set_memory_nx);
 132
 133int set_memory_x(unsigned long addr, int numpages)
 134{
 135        return change_memory_common(addr, numpages,
 136                                        __pgprot(0),
 137                                        __pgprot(PTE_PXN));
 138}
 139EXPORT_SYMBOL_GPL(set_memory_x);
 140
 141int set_memory_valid(unsigned long addr, int numpages, int enable)
 142{
 143        if (enable)
 144                return __change_memory_common(addr, PAGE_SIZE * numpages,
 145                                        __pgprot(PTE_VALID),
 146                                        __pgprot(0));
 147        else
 148                return __change_memory_common(addr, PAGE_SIZE * numpages,
 149                                        __pgprot(0),
 150                                        __pgprot(PTE_VALID));
 151}
 152
 153int set_direct_map_invalid_noflush(struct page *page)
 154{
 155        struct page_change_data data = {
 156                .set_mask = __pgprot(0),
 157                .clear_mask = __pgprot(PTE_VALID),
 158        };
 159
 160        if (!rodata_full)
 161                return 0;
 162
 163        return apply_to_page_range(&init_mm,
 164                                   (unsigned long)page_address(page),
 165                                   PAGE_SIZE, change_page_range, &data);
 166}
 167
 168int set_direct_map_default_noflush(struct page *page)
 169{
 170        struct page_change_data data = {
 171                .set_mask = __pgprot(PTE_VALID | PTE_WRITE),
 172                .clear_mask = __pgprot(PTE_RDONLY),
 173        };
 174
 175        if (!rodata_full)
 176                return 0;
 177
 178        return apply_to_page_range(&init_mm,
 179                                   (unsigned long)page_address(page),
 180                                   PAGE_SIZE, change_page_range, &data);
 181}
 182
 183void __kernel_map_pages(struct page *page, int numpages, int enable)
 184{
 185        if (!debug_pagealloc_enabled() && !rodata_full)
 186                return;
 187
 188        set_memory_valid((unsigned long)page_address(page), numpages, enable);
 189}
 190
 191/*
 192 * This function is used to determine if a linear map page has been marked as
 193 * not-valid. Walk the page table and check the PTE_VALID bit. This is based
 194 * on kern_addr_valid(), which almost does what we need.
 195 *
 196 * Because this is only called on the kernel linear map,  p?d_sect() implies
 197 * p?d_present(). When debug_pagealloc is enabled, sections mappings are
 198 * disabled.
 199 */
 200bool kernel_page_present(struct page *page)
 201{
 202        pgd_t *pgdp;
 203        pud_t *pudp, pud;
 204        pmd_t *pmdp, pmd;
 205        pte_t *ptep;
 206        unsigned long addr = (unsigned long)page_address(page);
 207
 208        if (!debug_pagealloc_enabled() && !rodata_full)
 209                return true;
 210
 211        pgdp = pgd_offset_k(addr);
 212        if (pgd_none(READ_ONCE(*pgdp)))
 213                return false;
 214
 215        pudp = pud_offset(pgdp, addr);
 216        pud = READ_ONCE(*pudp);
 217        if (pud_none(pud))
 218                return false;
 219        if (pud_sect(pud))
 220                return true;
 221
 222        pmdp = pmd_offset(pudp, addr);
 223        pmd = READ_ONCE(*pmdp);
 224        if (pmd_none(pmd))
 225                return false;
 226        if (pmd_sect(pmd))
 227                return true;
 228
 229        ptep = pte_offset_kernel(pmdp, addr);
 230        return pte_valid(READ_ONCE(*ptep));
 231}
 232