linux/arch/riscv/mm/pageattr.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2019 SiFive
   4 */
   5
   6#include <linux/pagewalk.h>
   7#include <linux/pgtable.h>
   8#include <asm/tlbflush.h>
   9#include <asm/bitops.h>
  10#include <asm/set_memory.h>
  11
  12struct pageattr_masks {
  13        pgprot_t set_mask;
  14        pgprot_t clear_mask;
  15};
  16
  17static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk)
  18{
  19        struct pageattr_masks *masks = walk->private;
  20        unsigned long new_val = val;
  21
  22        new_val &= ~(pgprot_val(masks->clear_mask));
  23        new_val |= (pgprot_val(masks->set_mask));
  24
  25        return new_val;
  26}
  27
  28static int pageattr_pgd_entry(pgd_t *pgd, unsigned long addr,
  29                              unsigned long next, struct mm_walk *walk)
  30{
  31        pgd_t val = READ_ONCE(*pgd);
  32
  33        if (pgd_leaf(val)) {
  34                val = __pgd(set_pageattr_masks(pgd_val(val), walk));
  35                set_pgd(pgd, val);
  36        }
  37
  38        return 0;
  39}
  40
  41static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
  42                              unsigned long next, struct mm_walk *walk)
  43{
  44        p4d_t val = READ_ONCE(*p4d);
  45
  46        if (p4d_leaf(val)) {
  47                val = __p4d(set_pageattr_masks(p4d_val(val), walk));
  48                set_p4d(p4d, val);
  49        }
  50
  51        return 0;
  52}
  53
  54static int pageattr_pud_entry(pud_t *pud, unsigned long addr,
  55                              unsigned long next, struct mm_walk *walk)
  56{
  57        pud_t val = READ_ONCE(*pud);
  58
  59        if (pud_leaf(val)) {
  60                val = __pud(set_pageattr_masks(pud_val(val), walk));
  61                set_pud(pud, val);
  62        }
  63
  64        return 0;
  65}
  66
  67static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr,
  68                              unsigned long next, struct mm_walk *walk)
  69{
  70        pmd_t val = READ_ONCE(*pmd);
  71
  72        if (pmd_leaf(val)) {
  73                val = __pmd(set_pageattr_masks(pmd_val(val), walk));
  74                set_pmd(pmd, val);
  75        }
  76
  77        return 0;
  78}
  79
  80static int pageattr_pte_entry(pte_t *pte, unsigned long addr,
  81                              unsigned long next, struct mm_walk *walk)
  82{
  83        pte_t val = READ_ONCE(*pte);
  84
  85        val = __pte(set_pageattr_masks(pte_val(val), walk));
  86        set_pte(pte, val);
  87
  88        return 0;
  89}
  90
  91static int pageattr_pte_hole(unsigned long addr, unsigned long next,
  92                             int depth, struct mm_walk *walk)
  93{
  94        /* Nothing to do here */
  95        return 0;
  96}
  97
  98static const struct mm_walk_ops pageattr_ops = {
  99        .pgd_entry = pageattr_pgd_entry,
 100        .p4d_entry = pageattr_p4d_entry,
 101        .pud_entry = pageattr_pud_entry,
 102        .pmd_entry = pageattr_pmd_entry,
 103        .pte_entry = pageattr_pte_entry,
 104        .pte_hole = pageattr_pte_hole,
 105};
 106
 107static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
 108                        pgprot_t clear_mask)
 109{
 110        int ret;
 111        unsigned long start = addr;
 112        unsigned long end = start + PAGE_SIZE * numpages;
 113        struct pageattr_masks masks = {
 114                .set_mask = set_mask,
 115                .clear_mask = clear_mask
 116        };
 117
 118        if (!numpages)
 119                return 0;
 120
 121        mmap_read_lock(&init_mm);
 122        ret =  walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
 123                                     &masks);
 124        mmap_read_unlock(&init_mm);
 125
 126        flush_tlb_kernel_range(start, end);
 127
 128        return ret;
 129}
 130
 131int set_memory_rw_nx(unsigned long addr, int numpages)
 132{
 133        return __set_memory(addr, numpages, __pgprot(_PAGE_READ | _PAGE_WRITE),
 134                            __pgprot(_PAGE_EXEC));
 135}
 136
 137int set_memory_ro(unsigned long addr, int numpages)
 138{
 139        return __set_memory(addr, numpages, __pgprot(_PAGE_READ),
 140                            __pgprot(_PAGE_WRITE));
 141}
 142
 143int set_memory_rw(unsigned long addr, int numpages)
 144{
 145        return __set_memory(addr, numpages, __pgprot(_PAGE_READ | _PAGE_WRITE),
 146                            __pgprot(0));
 147}
 148
 149int set_memory_x(unsigned long addr, int numpages)
 150{
 151        return __set_memory(addr, numpages, __pgprot(_PAGE_EXEC), __pgprot(0));
 152}
 153
 154int set_memory_nx(unsigned long addr, int numpages)
 155{
 156        return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_EXEC));
 157}
 158
 159int set_direct_map_invalid_noflush(struct page *page)
 160{
 161        int ret;
 162        unsigned long start = (unsigned long)page_address(page);
 163        unsigned long end = start + PAGE_SIZE;
 164        struct pageattr_masks masks = {
 165                .set_mask = __pgprot(0),
 166                .clear_mask = __pgprot(_PAGE_PRESENT)
 167        };
 168
 169        mmap_read_lock(&init_mm);
 170        ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
 171        mmap_read_unlock(&init_mm);
 172
 173        return ret;
 174}
 175
 176int set_direct_map_default_noflush(struct page *page)
 177{
 178        int ret;
 179        unsigned long start = (unsigned long)page_address(page);
 180        unsigned long end = start + PAGE_SIZE;
 181        struct pageattr_masks masks = {
 182                .set_mask = PAGE_KERNEL,
 183                .clear_mask = __pgprot(0)
 184        };
 185
 186        mmap_read_lock(&init_mm);
 187        ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
 188        mmap_read_unlock(&init_mm);
 189
 190        return ret;
 191}
 192
 193#ifdef CONFIG_DEBUG_PAGEALLOC
 194void __kernel_map_pages(struct page *page, int numpages, int enable)
 195{
 196        if (!debug_pagealloc_enabled())
 197                return;
 198
 199        if (enable)
 200                __set_memory((unsigned long)page_address(page), numpages,
 201                             __pgprot(_PAGE_PRESENT), __pgprot(0));
 202        else
 203                __set_memory((unsigned long)page_address(page), numpages,
 204                             __pgprot(0), __pgprot(_PAGE_PRESENT));
 205}
 206#endif
 207
 208bool kernel_page_present(struct page *page)
 209{
 210        unsigned long addr = (unsigned long)page_address(page);
 211        pgd_t *pgd;
 212        pud_t *pud;
 213        p4d_t *p4d;
 214        pmd_t *pmd;
 215        pte_t *pte;
 216
 217        pgd = pgd_offset_k(addr);
 218        if (!pgd_present(*pgd))
 219                return false;
 220
 221        p4d = p4d_offset(pgd, addr);
 222        if (!p4d_present(*p4d))
 223                return false;
 224
 225        pud = pud_offset(p4d, addr);
 226        if (!pud_present(*pud))
 227                return false;
 228
 229        pmd = pmd_offset(pud, addr);
 230        if (!pmd_present(*pmd))
 231                return false;
 232
 233        pte = pte_offset_kernel(pmd, addr);
 234        return pte_present(*pte);
 235}
 236