linux/arch/powerpc/mm/pgtable_32.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * This file contains the routines setting up the linux page tables.
   4 *  -- paulus
   5 *
   6 *  Derived from arch/ppc/mm/init.c:
   7 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   8 *
   9 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  10 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
  11 *    Copyright (C) 1996 Paul Mackerras
  12 *
  13 *  Derived from "arch/i386/mm/init.c"
  14 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  15 */
  16
  17#include <linux/kernel.h>
  18#include <linux/module.h>
  19#include <linux/types.h>
  20#include <linux/mm.h>
  21#include <linux/vmalloc.h>
  22#include <linux/init.h>
  23#include <linux/highmem.h>
  24#include <linux/memblock.h>
  25#include <linux/slab.h>
  26
  27#include <asm/pgtable.h>
  28#include <asm/pgalloc.h>
  29#include <asm/fixmap.h>
  30#include <asm/setup.h>
  31#include <asm/sections.h>
  32
  33#include <mm/mmu_decl.h>
  34
  35extern char etext[], _stext[], _sinittext[], _einittext[];
  36
  37static void __init *early_alloc_pgtable(unsigned long size)
  38{
  39        void *ptr = memblock_alloc(size, size);
  40
  41        if (!ptr)
  42                panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
  43                      __func__, size, size);
  44
  45        return ptr;
  46}
  47
  48static pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va)
  49{
  50        if (pmd_none(*pmdp)) {
  51                pte_t *ptep = early_alloc_pgtable(PTE_FRAG_SIZE);
  52
  53                pmd_populate_kernel(&init_mm, pmdp, ptep);
  54        }
  55        return pte_offset_kernel(pmdp, va);
  56}
  57
  58
  59int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
  60{
  61        pmd_t *pd;
  62        pte_t *pg;
  63        int err = -ENOMEM;
  64
  65        /* Use upper 10 bits of VA to index the first level map */
  66        pd = pmd_offset(pud_offset(pgd_offset_k(va), va), va);
  67        /* Use middle 10 bits of VA to index the second-level map */
  68        if (likely(slab_is_available()))
  69                pg = pte_alloc_kernel(pd, va);
  70        else
  71                pg = early_pte_alloc_kernel(pd, va);
  72        if (pg != 0) {
  73                err = 0;
  74                /* The PTE should never be already set nor present in the
  75                 * hash table
  76                 */
  77                BUG_ON((pte_present(*pg) | pte_hashpte(*pg)) && pgprot_val(prot));
  78                set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, prot));
  79        }
  80        smp_wmb();
  81        return err;
  82}
  83
  84/*
  85 * Map in a chunk of physical memory starting at start.
  86 */
  87static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
  88{
  89        unsigned long v, s;
  90        phys_addr_t p;
  91        int ktext;
  92
  93        s = offset;
  94        v = PAGE_OFFSET + s;
  95        p = memstart_addr + s;
  96        for (; s < top; s += PAGE_SIZE) {
  97                ktext = ((char *)v >= _stext && (char *)v < etext) ||
  98                        ((char *)v >= _sinittext && (char *)v < _einittext);
  99                map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL);
 100#ifdef CONFIG_PPC_BOOK3S_32
 101                if (ktext)
 102                        hash_preload(&init_mm, v);
 103#endif
 104                v += PAGE_SIZE;
 105                p += PAGE_SIZE;
 106        }
 107}
 108
 109void __init mapin_ram(void)
 110{
 111        struct memblock_region *reg;
 112
 113        for_each_memblock(memory, reg) {
 114                phys_addr_t base = reg->base;
 115                phys_addr_t top = min(base + reg->size, total_lowmem);
 116
 117                if (base >= top)
 118                        continue;
 119                base = mmu_mapin_ram(base, top);
 120                __mapin_ram_chunk(base, top);
 121        }
 122}
 123
 124/* Scan the real Linux page tables and return a PTE pointer for
 125 * a virtual address in a context.
 126 * Returns true (1) if PTE was found, zero otherwise.  The pointer to
 127 * the PTE pointer is unmodified if PTE is not found.
 128 */
 129static int
 130get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp)
 131{
 132        pgd_t   *pgd;
 133        pud_t   *pud;
 134        pmd_t   *pmd;
 135        pte_t   *pte;
 136        int     retval = 0;
 137
 138        pgd = pgd_offset(mm, addr & PAGE_MASK);
 139        if (pgd) {
 140                pud = pud_offset(pgd, addr & PAGE_MASK);
 141                if (pud && pud_present(*pud)) {
 142                        pmd = pmd_offset(pud, addr & PAGE_MASK);
 143                        if (pmd_present(*pmd)) {
 144                                pte = pte_offset_map(pmd, addr & PAGE_MASK);
 145                                if (pte) {
 146                                        retval = 1;
 147                                        *ptep = pte;
 148                                        if (pmdp)
 149                                                *pmdp = pmd;
 150                                        /* XXX caller needs to do pte_unmap, yuck */
 151                                }
 152                        }
 153                }
 154        }
 155        return(retval);
 156}
 157
 158static int __change_page_attr_noflush(struct page *page, pgprot_t prot)
 159{
 160        pte_t *kpte;
 161        pmd_t *kpmd;
 162        unsigned long address;
 163
 164        BUG_ON(PageHighMem(page));
 165        address = (unsigned long)page_address(page);
 166
 167        if (v_block_mapped(address))
 168                return 0;
 169        if (!get_pteptr(&init_mm, address, &kpte, &kpmd))
 170                return -EINVAL;
 171        __set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0);
 172        pte_unmap(kpte);
 173
 174        return 0;
 175}
 176
 177/*
 178 * Change the page attributes of an page in the linear mapping.
 179 *
 180 * THIS DOES NOTHING WITH BAT MAPPINGS, DEBUG USE ONLY
 181 */
 182static int change_page_attr(struct page *page, int numpages, pgprot_t prot)
 183{
 184        int i, err = 0;
 185        unsigned long flags;
 186        struct page *start = page;
 187
 188        local_irq_save(flags);
 189        for (i = 0; i < numpages; i++, page++) {
 190                err = __change_page_attr_noflush(page, prot);
 191                if (err)
 192                        break;
 193        }
 194        wmb();
 195        local_irq_restore(flags);
 196        flush_tlb_kernel_range((unsigned long)page_address(start),
 197                               (unsigned long)page_address(page));
 198        return err;
 199}
 200
 201void mark_initmem_nx(void)
 202{
 203        struct page *page = virt_to_page(_sinittext);
 204        unsigned long numpages = PFN_UP((unsigned long)_einittext) -
 205                                 PFN_DOWN((unsigned long)_sinittext);
 206
 207        if (v_block_mapped((unsigned long)_stext + 1))
 208                mmu_mark_initmem_nx();
 209        else
 210                change_page_attr(page, numpages, PAGE_KERNEL);
 211}
 212
 213#ifdef CONFIG_STRICT_KERNEL_RWX
 214void mark_rodata_ro(void)
 215{
 216        struct page *page;
 217        unsigned long numpages;
 218
 219        if (v_block_mapped((unsigned long)_sinittext)) {
 220                mmu_mark_rodata_ro();
 221                return;
 222        }
 223
 224        page = virt_to_page(_stext);
 225        numpages = PFN_UP((unsigned long)_etext) -
 226                   PFN_DOWN((unsigned long)_stext);
 227
 228        change_page_attr(page, numpages, PAGE_KERNEL_ROX);
 229        /*
 230         * mark .rodata as read only. Use __init_begin rather than __end_rodata
 231         * to cover NOTES and EXCEPTION_TABLE.
 232         */
 233        page = virt_to_page(__start_rodata);
 234        numpages = PFN_UP((unsigned long)__init_begin) -
 235                   PFN_DOWN((unsigned long)__start_rodata);
 236
 237        change_page_attr(page, numpages, PAGE_KERNEL_RO);
 238
 239        // mark_initmem_nx() should have already run by now
 240        ptdump_check_wx();
 241}
 242#endif
 243
 244#ifdef CONFIG_DEBUG_PAGEALLOC
 245void __kernel_map_pages(struct page *page, int numpages, int enable)
 246{
 247        if (PageHighMem(page))
 248                return;
 249
 250        change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
 251}
 252#endif /* CONFIG_DEBUG_PAGEALLOC */
 253