linux/arch/powerpc/mm/nohash/8xx.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * This file contains the routines for initializing the MMU
   4 * on the 8xx series of chips.
   5 *  -- christophe
   6 *
   7 *  Derived from arch/powerpc/mm/40x_mmu.c:
   8 */
   9
  10#include <linux/memblock.h>
  11#include <linux/mmu_context.h>
  12#include <linux/hugetlb.h>
  13#include <asm/fixmap.h>
  14#include <asm/code-patching.h>
  15#include <asm/inst.h>
  16#include <asm/pgalloc.h>
  17
  18#include <mm/mmu_decl.h>
  19
  20#define IMMR_SIZE (FIX_IMMR_SIZE << PAGE_SHIFT)
  21
  22extern int __map_without_ltlbs;
  23
  24static unsigned long block_mapped_ram;
  25
  26/*
  27 * Return PA for this VA if it is in an area mapped with LTLBs or fixmap.
  28 * Otherwise, returns 0
  29 */
  30phys_addr_t v_block_mapped(unsigned long va)
  31{
  32        unsigned long p = PHYS_IMMR_BASE;
  33
  34        if (va >= VIRT_IMMR_BASE && va < VIRT_IMMR_BASE + IMMR_SIZE)
  35                return p + va - VIRT_IMMR_BASE;
  36        if (__map_without_ltlbs)
  37                return 0;
  38        if (va >= PAGE_OFFSET && va < PAGE_OFFSET + block_mapped_ram)
  39                return __pa(va);
  40        return 0;
  41}
  42
  43/*
  44 * Return VA for a given PA mapped with LTLBs or fixmap
  45 * Return 0 if not mapped
  46 */
  47unsigned long p_block_mapped(phys_addr_t pa)
  48{
  49        unsigned long p = PHYS_IMMR_BASE;
  50
  51        if (pa >= p && pa < p + IMMR_SIZE)
  52                return VIRT_IMMR_BASE + pa - p;
  53        if (__map_without_ltlbs)
  54                return 0;
  55        if (pa < block_mapped_ram)
  56                return (unsigned long)__va(pa);
  57        return 0;
  58}
  59
  60static pte_t __init *early_hugepd_alloc_kernel(hugepd_t *pmdp, unsigned long va)
  61{
  62        if (hpd_val(*pmdp) == 0) {
  63                pte_t *ptep = memblock_alloc(sizeof(pte_basic_t), SZ_4K);
  64
  65                if (!ptep)
  66                        return NULL;
  67
  68                hugepd_populate_kernel((hugepd_t *)pmdp, ptep, PAGE_SHIFT_8M);
  69                hugepd_populate_kernel((hugepd_t *)pmdp + 1, ptep, PAGE_SHIFT_8M);
  70        }
  71        return hugepte_offset(*(hugepd_t *)pmdp, va, PGDIR_SHIFT);
  72}
  73
  74static int __ref __early_map_kernel_hugepage(unsigned long va, phys_addr_t pa,
  75                                             pgprot_t prot, int psize, bool new)
  76{
  77        pmd_t *pmdp = pmd_off_k(va);
  78        pte_t *ptep;
  79
  80        if (WARN_ON(psize != MMU_PAGE_512K && psize != MMU_PAGE_8M))
  81                return -EINVAL;
  82
  83        if (new) {
  84                if (WARN_ON(slab_is_available()))
  85                        return -EINVAL;
  86
  87                if (psize == MMU_PAGE_512K)
  88                        ptep = early_pte_alloc_kernel(pmdp, va);
  89                else
  90                        ptep = early_hugepd_alloc_kernel((hugepd_t *)pmdp, va);
  91        } else {
  92                if (psize == MMU_PAGE_512K)
  93                        ptep = pte_offset_kernel(pmdp, va);
  94                else
  95                        ptep = hugepte_offset(*(hugepd_t *)pmdp, va, PGDIR_SHIFT);
  96        }
  97
  98        if (WARN_ON(!ptep))
  99                return -ENOMEM;
 100
 101        /* The PTE should never be already present */
 102        if (new && WARN_ON(pte_present(*ptep) && pgprot_val(prot)))
 103                return -EINVAL;
 104
 105        set_huge_pte_at(&init_mm, va, ptep, pte_mkhuge(pfn_pte(pa >> PAGE_SHIFT, prot)));
 106
 107        return 0;
 108}
 109
 110/*
 111 * MMU_init_hw does the chip-specific initialization of the MMU hardware.
 112 */
 113void __init MMU_init_hw(void)
 114{
 115}
 116
 117static bool immr_is_mapped __initdata;
 118
 119void __init mmu_mapin_immr(void)
 120{
 121        if (immr_is_mapped)
 122                return;
 123
 124        immr_is_mapped = true;
 125
 126        __early_map_kernel_hugepage(VIRT_IMMR_BASE, PHYS_IMMR_BASE,
 127                                    PAGE_KERNEL_NCG, MMU_PAGE_512K, true);
 128}
 129
 130static void mmu_mapin_ram_chunk(unsigned long offset, unsigned long top,
 131                                pgprot_t prot, bool new)
 132{
 133        unsigned long v = PAGE_OFFSET + offset;
 134        unsigned long p = offset;
 135
 136        WARN_ON(!IS_ALIGNED(offset, SZ_512K) || !IS_ALIGNED(top, SZ_512K));
 137
 138        for (; p < ALIGN(p, SZ_8M) && p < top; p += SZ_512K, v += SZ_512K)
 139                __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
 140        for (; p < ALIGN_DOWN(top, SZ_8M) && p < top; p += SZ_8M, v += SZ_8M)
 141                __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_8M, new);
 142        for (; p < ALIGN_DOWN(top, SZ_512K) && p < top; p += SZ_512K, v += SZ_512K)
 143                __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
 144
 145        if (!new)
 146                flush_tlb_kernel_range(PAGE_OFFSET + v, PAGE_OFFSET + top);
 147}
 148
 149unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
 150{
 151        unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
 152        unsigned long sinittext = __pa(_sinittext);
 153        bool strict_boundary = strict_kernel_rwx_enabled() || debug_pagealloc_enabled();
 154        unsigned long boundary = strict_boundary ? sinittext : etext8;
 155        unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
 156
 157        WARN_ON(top < einittext8);
 158
 159        mmu_mapin_immr();
 160
 161        if (__map_without_ltlbs)
 162                return 0;
 163
 164        mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, true);
 165        if (debug_pagealloc_enabled()) {
 166                top = boundary;
 167        } else {
 168                mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_TEXT, true);
 169                mmu_mapin_ram_chunk(einittext8, top, PAGE_KERNEL, true);
 170        }
 171
 172        if (top > SZ_32M)
 173                memblock_set_current_limit(top);
 174
 175        block_mapped_ram = top;
 176
 177        return top;
 178}
 179
 180void mmu_mark_initmem_nx(void)
 181{
 182        unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
 183        unsigned long sinittext = __pa(_sinittext);
 184        unsigned long boundary = strict_kernel_rwx_enabled() ? sinittext : etext8;
 185        unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
 186
 187        mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, false);
 188        mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false);
 189
 190        if (IS_ENABLED(CONFIG_PIN_TLB_TEXT))
 191                mmu_pin_tlb(block_mapped_ram, false);
 192}
 193
 194#ifdef CONFIG_STRICT_KERNEL_RWX
 195void mmu_mark_rodata_ro(void)
 196{
 197        unsigned long sinittext = __pa(_sinittext);
 198
 199        mmu_mapin_ram_chunk(0, sinittext, PAGE_KERNEL_ROX, false);
 200        if (IS_ENABLED(CONFIG_PIN_TLB_DATA))
 201                mmu_pin_tlb(block_mapped_ram, true);
 202}
 203#endif
 204
 205void __init setup_initial_memory_limit(phys_addr_t first_memblock_base,
 206                                       phys_addr_t first_memblock_size)
 207{
 208        /* We don't currently support the first MEMBLOCK not mapping 0
 209         * physical on those processors
 210         */
 211        BUG_ON(first_memblock_base != 0);
 212
 213        /* 8xx can only access 32MB at the moment */
 214        memblock_set_current_limit(min_t(u64, first_memblock_size, SZ_32M));
 215}
 216
 217/*
 218 * Set up to use a given MMU context.
 219 * id is context number, pgd is PGD pointer.
 220 *
 221 * We place the physical address of the new task page directory loaded
 222 * into the MMU base register, and set the ASID compare register with
 223 * the new "context."
 224 */
 225void set_context(unsigned long id, pgd_t *pgd)
 226{
 227        s16 offset = (s16)(__pa(swapper_pg_dir));
 228
 229        /* Context switch the PTE pointer for the Abatron BDI2000.
 230         * The PGDIR is passed as second argument.
 231         */
 232        if (IS_ENABLED(CONFIG_BDI_SWITCH))
 233                abatron_pteptrs[1] = pgd;
 234
 235        /* Register M_TWB will contain base address of level 1 table minus the
 236         * lower part of the kernel PGDIR base address, so that all accesses to
 237         * level 1 table are done relative to lower part of kernel PGDIR base
 238         * address.
 239         */
 240        mtspr(SPRN_M_TWB, __pa(pgd) - offset);
 241
 242        /* Update context */
 243        mtspr(SPRN_M_CASID, id - 1);
 244        /* sync */
 245        mb();
 246}
 247
 248void flush_instruction_cache(void)
 249{
 250        isync();
 251        mtspr(SPRN_IC_CST, IDC_INVALL);
 252        isync();
 253}
 254
 255#ifdef CONFIG_PPC_KUEP
 256void __init setup_kuep(bool disabled)
 257{
 258        if (disabled)
 259                return;
 260
 261        pr_info("Activating Kernel Userspace Execution Prevention\n");
 262
 263        mtspr(SPRN_MI_AP, MI_APG_KUEP);
 264}
 265#endif
 266
 267#ifdef CONFIG_PPC_KUAP
 268void __init setup_kuap(bool disabled)
 269{
 270        pr_info("Activating Kernel Userspace Access Protection\n");
 271
 272        if (disabled)
 273                pr_warn("KUAP cannot be disabled yet on 8xx when compiled in\n");
 274
 275        mtspr(SPRN_MD_AP, MD_APG_KUAP);
 276}
 277#endif
 278