linux/arch/powerpc/mm/pgtable_32.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * This file contains the routines setting up the linux page tables.
   4 *  -- paulus
   5 *
   6 *  Derived from arch/ppc/mm/init.c:
   7 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   8 *
   9 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  10 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
  11 *    Copyright (C) 1996 Paul Mackerras
  12 *
  13 *  Derived from "arch/i386/mm/init.c"
  14 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
  15 */
  16
  17#include <linux/kernel.h>
  18#include <linux/module.h>
  19#include <linux/types.h>
  20#include <linux/mm.h>
  21#include <linux/vmalloc.h>
  22#include <linux/init.h>
  23#include <linux/highmem.h>
  24#include <linux/memblock.h>
  25#include <linux/slab.h>
  26
  27#include <asm/pgtable.h>
  28#include <asm/pgalloc.h>
  29#include <asm/fixmap.h>
  30#include <asm/io.h>
  31#include <asm/setup.h>
  32#include <asm/sections.h>
  33
  34#include <mm/mmu_decl.h>
  35
  36unsigned long ioremap_bot;
  37EXPORT_SYMBOL(ioremap_bot);     /* aka VMALLOC_END */
  38
  39extern char etext[], _stext[], _sinittext[], _einittext[];
  40
  41void __iomem *
  42ioremap(phys_addr_t addr, unsigned long size)
  43{
  44        pgprot_t prot = pgprot_noncached(PAGE_KERNEL);
  45
  46        return __ioremap_caller(addr, size, prot, __builtin_return_address(0));
  47}
  48EXPORT_SYMBOL(ioremap);
  49
  50void __iomem *
  51ioremap_wc(phys_addr_t addr, unsigned long size)
  52{
  53        pgprot_t prot = pgprot_noncached_wc(PAGE_KERNEL);
  54
  55        return __ioremap_caller(addr, size, prot, __builtin_return_address(0));
  56}
  57EXPORT_SYMBOL(ioremap_wc);
  58
  59void __iomem *
  60ioremap_wt(phys_addr_t addr, unsigned long size)
  61{
  62        pgprot_t prot = pgprot_cached_wthru(PAGE_KERNEL);
  63
  64        return __ioremap_caller(addr, size, prot, __builtin_return_address(0));
  65}
  66EXPORT_SYMBOL(ioremap_wt);
  67
  68void __iomem *
  69ioremap_coherent(phys_addr_t addr, unsigned long size)
  70{
  71        pgprot_t prot = pgprot_cached(PAGE_KERNEL);
  72
  73        return __ioremap_caller(addr, size, prot, __builtin_return_address(0));
  74}
  75EXPORT_SYMBOL(ioremap_coherent);
  76
  77void __iomem *
  78ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags)
  79{
  80        pte_t pte = __pte(flags);
  81
  82        /* writeable implies dirty for kernel addresses */
  83        if (pte_write(pte))
  84                pte = pte_mkdirty(pte);
  85
  86        /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
  87        pte = pte_exprotect(pte);
  88        pte = pte_mkprivileged(pte);
  89
  90        return __ioremap_caller(addr, size, pte_pgprot(pte), __builtin_return_address(0));
  91}
  92EXPORT_SYMBOL(ioremap_prot);
  93
  94void __iomem *
  95__ioremap(phys_addr_t addr, unsigned long size, unsigned long flags)
  96{
  97        return __ioremap_caller(addr, size, __pgprot(flags), __builtin_return_address(0));
  98}
  99
 100void __iomem *
 101__ioremap_caller(phys_addr_t addr, unsigned long size, pgprot_t prot, void *caller)
 102{
 103        unsigned long v, i;
 104        phys_addr_t p;
 105        int err;
 106
 107        /*
 108         * Choose an address to map it to.
 109         * Once the vmalloc system is running, we use it.
 110         * Before then, we use space going down from IOREMAP_TOP
 111         * (ioremap_bot records where we're up to).
 112         */
 113        p = addr & PAGE_MASK;
 114        size = PAGE_ALIGN(addr + size) - p;
 115
 116        /*
 117         * If the address lies within the first 16 MB, assume it's in ISA
 118         * memory space
 119         */
 120        if (p < 16*1024*1024)
 121                p += _ISA_MEM_BASE;
 122
 123#ifndef CONFIG_CRASH_DUMP
 124        /*
 125         * Don't allow anybody to remap normal RAM that we're using.
 126         * mem_init() sets high_memory so only do the check after that.
 127         */
 128        if (slab_is_available() && p <= virt_to_phys(high_memory - 1) &&
 129            page_is_ram(__phys_to_pfn(p))) {
 130                printk("__ioremap(): phys addr 0x%llx is RAM lr %ps\n",
 131                       (unsigned long long)p, __builtin_return_address(0));
 132                return NULL;
 133        }
 134#endif
 135
 136        if (size == 0)
 137                return NULL;
 138
 139        /*
 140         * Is it already mapped?  Perhaps overlapped by a previous
 141         * mapping.
 142         */
 143        v = p_block_mapped(p);
 144        if (v)
 145                goto out;
 146
 147        if (slab_is_available()) {
 148                struct vm_struct *area;
 149                area = get_vm_area_caller(size, VM_IOREMAP, caller);
 150                if (area == 0)
 151                        return NULL;
 152                area->phys_addr = p;
 153                v = (unsigned long) area->addr;
 154        } else {
 155                v = (ioremap_bot -= size);
 156        }
 157
 158        /*
 159         * Should check if it is a candidate for a BAT mapping
 160         */
 161
 162        err = 0;
 163        for (i = 0; i < size && err == 0; i += PAGE_SIZE)
 164                err = map_kernel_page(v + i, p + i, prot);
 165        if (err) {
 166                if (slab_is_available())
 167                        vunmap((void *)v);
 168                return NULL;
 169        }
 170
 171out:
 172        return (void __iomem *) (v + ((unsigned long)addr & ~PAGE_MASK));
 173}
 174EXPORT_SYMBOL(__ioremap);
 175
 176void iounmap(volatile void __iomem *addr)
 177{
 178        /*
 179         * If mapped by BATs then there is nothing to do.
 180         * Calling vfree() generates a benign warning.
 181         */
 182        if (v_block_mapped((unsigned long)addr))
 183                return;
 184
 185        if (addr > high_memory && (unsigned long) addr < ioremap_bot)
 186                vunmap((void *) (PAGE_MASK & (unsigned long)addr));
 187}
 188EXPORT_SYMBOL(iounmap);
 189
 190static void __init *early_alloc_pgtable(unsigned long size)
 191{
 192        void *ptr = memblock_alloc(size, size);
 193
 194        if (!ptr)
 195                panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
 196                      __func__, size, size);
 197
 198        return ptr;
 199}
 200
 201static pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va)
 202{
 203        if (pmd_none(*pmdp)) {
 204                pte_t *ptep = early_alloc_pgtable(PTE_FRAG_SIZE);
 205
 206                pmd_populate_kernel(&init_mm, pmdp, ptep);
 207        }
 208        return pte_offset_kernel(pmdp, va);
 209}
 210
 211
 212int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
 213{
 214        pmd_t *pd;
 215        pte_t *pg;
 216        int err = -ENOMEM;
 217
 218        /* Use upper 10 bits of VA to index the first level map */
 219        pd = pmd_offset(pud_offset(pgd_offset_k(va), va), va);
 220        /* Use middle 10 bits of VA to index the second-level map */
 221        if (likely(slab_is_available()))
 222                pg = pte_alloc_kernel(pd, va);
 223        else
 224                pg = early_pte_alloc_kernel(pd, va);
 225        if (pg != 0) {
 226                err = 0;
 227                /* The PTE should never be already set nor present in the
 228                 * hash table
 229                 */
 230                BUG_ON((pte_present(*pg) | pte_hashpte(*pg)) && pgprot_val(prot));
 231                set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, prot));
 232        }
 233        smp_wmb();
 234        return err;
 235}
 236
 237/*
 238 * Map in a chunk of physical memory starting at start.
 239 */
 240static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
 241{
 242        unsigned long v, s;
 243        phys_addr_t p;
 244        int ktext;
 245
 246        s = offset;
 247        v = PAGE_OFFSET + s;
 248        p = memstart_addr + s;
 249        for (; s < top; s += PAGE_SIZE) {
 250                ktext = ((char *)v >= _stext && (char *)v < etext) ||
 251                        ((char *)v >= _sinittext && (char *)v < _einittext);
 252                map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL);
 253#ifdef CONFIG_PPC_BOOK3S_32
 254                if (ktext)
 255                        hash_preload(&init_mm, v, false, 0x300);
 256#endif
 257                v += PAGE_SIZE;
 258                p += PAGE_SIZE;
 259        }
 260}
 261
 262void __init mapin_ram(void)
 263{
 264        struct memblock_region *reg;
 265
 266        for_each_memblock(memory, reg) {
 267                phys_addr_t base = reg->base;
 268                phys_addr_t top = min(base + reg->size, total_lowmem);
 269
 270                if (base >= top)
 271                        continue;
 272                base = mmu_mapin_ram(base, top);
 273                if (IS_ENABLED(CONFIG_BDI_SWITCH))
 274                        __mapin_ram_chunk(reg->base, top);
 275                else
 276                        __mapin_ram_chunk(base, top);
 277        }
 278}
 279
 280/* Scan the real Linux page tables and return a PTE pointer for
 281 * a virtual address in a context.
 282 * Returns true (1) if PTE was found, zero otherwise.  The pointer to
 283 * the PTE pointer is unmodified if PTE is not found.
 284 */
 285static int
 286get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp)
 287{
 288        pgd_t   *pgd;
 289        pud_t   *pud;
 290        pmd_t   *pmd;
 291        pte_t   *pte;
 292        int     retval = 0;
 293
 294        pgd = pgd_offset(mm, addr & PAGE_MASK);
 295        if (pgd) {
 296                pud = pud_offset(pgd, addr & PAGE_MASK);
 297                if (pud && pud_present(*pud)) {
 298                        pmd = pmd_offset(pud, addr & PAGE_MASK);
 299                        if (pmd_present(*pmd)) {
 300                                pte = pte_offset_map(pmd, addr & PAGE_MASK);
 301                                if (pte) {
 302                                        retval = 1;
 303                                        *ptep = pte;
 304                                        if (pmdp)
 305                                                *pmdp = pmd;
 306                                        /* XXX caller needs to do pte_unmap, yuck */
 307                                }
 308                        }
 309                }
 310        }
 311        return(retval);
 312}
 313
 314static int __change_page_attr_noflush(struct page *page, pgprot_t prot)
 315{
 316        pte_t *kpte;
 317        pmd_t *kpmd;
 318        unsigned long address;
 319
 320        BUG_ON(PageHighMem(page));
 321        address = (unsigned long)page_address(page);
 322
 323        if (v_block_mapped(address))
 324                return 0;
 325        if (!get_pteptr(&init_mm, address, &kpte, &kpmd))
 326                return -EINVAL;
 327        __set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0);
 328        pte_unmap(kpte);
 329
 330        return 0;
 331}
 332
 333/*
 334 * Change the page attributes of an page in the linear mapping.
 335 *
 336 * THIS DOES NOTHING WITH BAT MAPPINGS, DEBUG USE ONLY
 337 */
 338static int change_page_attr(struct page *page, int numpages, pgprot_t prot)
 339{
 340        int i, err = 0;
 341        unsigned long flags;
 342        struct page *start = page;
 343
 344        local_irq_save(flags);
 345        for (i = 0; i < numpages; i++, page++) {
 346                err = __change_page_attr_noflush(page, prot);
 347                if (err)
 348                        break;
 349        }
 350        wmb();
 351        local_irq_restore(flags);
 352        flush_tlb_kernel_range((unsigned long)page_address(start),
 353                               (unsigned long)page_address(page));
 354        return err;
 355}
 356
 357void mark_initmem_nx(void)
 358{
 359        struct page *page = virt_to_page(_sinittext);
 360        unsigned long numpages = PFN_UP((unsigned long)_einittext) -
 361                                 PFN_DOWN((unsigned long)_sinittext);
 362
 363        if (v_block_mapped((unsigned long)_stext) + 1)
 364                mmu_mark_initmem_nx();
 365        else
 366                change_page_attr(page, numpages, PAGE_KERNEL);
 367}
 368
 369#ifdef CONFIG_STRICT_KERNEL_RWX
 370void mark_rodata_ro(void)
 371{
 372        struct page *page;
 373        unsigned long numpages;
 374
 375        if (v_block_mapped((unsigned long)_sinittext)) {
 376                mmu_mark_rodata_ro();
 377                return;
 378        }
 379
 380        page = virt_to_page(_stext);
 381        numpages = PFN_UP((unsigned long)_etext) -
 382                   PFN_DOWN((unsigned long)_stext);
 383
 384        change_page_attr(page, numpages, PAGE_KERNEL_ROX);
 385        /*
 386         * mark .rodata as read only. Use __init_begin rather than __end_rodata
 387         * to cover NOTES and EXCEPTION_TABLE.
 388         */
 389        page = virt_to_page(__start_rodata);
 390        numpages = PFN_UP((unsigned long)__init_begin) -
 391                   PFN_DOWN((unsigned long)__start_rodata);
 392
 393        change_page_attr(page, numpages, PAGE_KERNEL_RO);
 394
 395        // mark_initmem_nx() should have already run by now
 396        ptdump_check_wx();
 397}
 398#endif
 399
 400#ifdef CONFIG_DEBUG_PAGEALLOC
 401void __kernel_map_pages(struct page *page, int numpages, int enable)
 402{
 403        if (PageHighMem(page))
 404                return;
 405
 406        change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
 407}
 408#endif /* CONFIG_DEBUG_PAGEALLOC */
 409