linux/arch/powerpc/mm/nohash/8xx.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * This file contains the routines for initializing the MMU
   4 * on the 8xx series of chips.
   5 *  -- christophe
   6 *
   7 *  Derived from arch/powerpc/mm/40x_mmu.c:
   8 */
   9
  10#include <linux/memblock.h>
  11#include <linux/mmu_context.h>
  12#include <linux/hugetlb.h>
  13#include <asm/fixmap.h>
  14#include <asm/code-patching.h>
  15#include <asm/inst.h>
  16
  17#include <mm/mmu_decl.h>
  18
  19#define IMMR_SIZE (FIX_IMMR_SIZE << PAGE_SHIFT)
  20
  21extern int __map_without_ltlbs;
  22
  23static unsigned long block_mapped_ram;
  24
  25/*
  26 * Return PA for this VA if it is in an area mapped with LTLBs or fixmap.
  27 * Otherwise, returns 0
  28 */
  29phys_addr_t v_block_mapped(unsigned long va)
  30{
  31        unsigned long p = PHYS_IMMR_BASE;
  32
  33        if (va >= VIRT_IMMR_BASE && va < VIRT_IMMR_BASE + IMMR_SIZE)
  34                return p + va - VIRT_IMMR_BASE;
  35        if (__map_without_ltlbs)
  36                return 0;
  37        if (va >= PAGE_OFFSET && va < PAGE_OFFSET + block_mapped_ram)
  38                return __pa(va);
  39        return 0;
  40}
  41
  42/*
  43 * Return VA for a given PA mapped with LTLBs or fixmap
  44 * Return 0 if not mapped
  45 */
  46unsigned long p_block_mapped(phys_addr_t pa)
  47{
  48        unsigned long p = PHYS_IMMR_BASE;
  49
  50        if (pa >= p && pa < p + IMMR_SIZE)
  51                return VIRT_IMMR_BASE + pa - p;
  52        if (__map_without_ltlbs)
  53                return 0;
  54        if (pa < block_mapped_ram)
  55                return (unsigned long)__va(pa);
  56        return 0;
  57}
  58
  59static pte_t __init *early_hugepd_alloc_kernel(hugepd_t *pmdp, unsigned long va)
  60{
  61        if (hpd_val(*pmdp) == 0) {
  62                pte_t *ptep = memblock_alloc(sizeof(pte_basic_t), SZ_4K);
  63
  64                if (!ptep)
  65                        return NULL;
  66
  67                hugepd_populate_kernel((hugepd_t *)pmdp, ptep, PAGE_SHIFT_8M);
  68                hugepd_populate_kernel((hugepd_t *)pmdp + 1, ptep, PAGE_SHIFT_8M);
  69        }
  70        return hugepte_offset(*(hugepd_t *)pmdp, va, PGDIR_SHIFT);
  71}
  72
  73static int __ref __early_map_kernel_hugepage(unsigned long va, phys_addr_t pa,
  74                                             pgprot_t prot, int psize, bool new)
  75{
  76        pmd_t *pmdp = pmd_off_k(va);
  77        pte_t *ptep;
  78
  79        if (WARN_ON(psize != MMU_PAGE_512K && psize != MMU_PAGE_8M))
  80                return -EINVAL;
  81
  82        if (new) {
  83                if (WARN_ON(slab_is_available()))
  84                        return -EINVAL;
  85
  86                if (psize == MMU_PAGE_512K)
  87                        ptep = early_pte_alloc_kernel(pmdp, va);
  88                else
  89                        ptep = early_hugepd_alloc_kernel((hugepd_t *)pmdp, va);
  90        } else {
  91                if (psize == MMU_PAGE_512K)
  92                        ptep = pte_offset_kernel(pmdp, va);
  93                else
  94                        ptep = hugepte_offset(*(hugepd_t *)pmdp, va, PGDIR_SHIFT);
  95        }
  96
  97        if (WARN_ON(!ptep))
  98                return -ENOMEM;
  99
 100        /* The PTE should never be already present */
 101        if (new && WARN_ON(pte_present(*ptep) && pgprot_val(prot)))
 102                return -EINVAL;
 103
 104        set_huge_pte_at(&init_mm, va, ptep, pte_mkhuge(pfn_pte(pa >> PAGE_SHIFT, prot)));
 105
 106        return 0;
 107}
 108
 109/*
 110 * MMU_init_hw does the chip-specific initialization of the MMU hardware.
 111 */
 112void __init MMU_init_hw(void)
 113{
 114}
 115
 116static bool immr_is_mapped __initdata;
 117
 118void __init mmu_mapin_immr(void)
 119{
 120        if (immr_is_mapped)
 121                return;
 122
 123        immr_is_mapped = true;
 124
 125        __early_map_kernel_hugepage(VIRT_IMMR_BASE, PHYS_IMMR_BASE,
 126                                    PAGE_KERNEL_NCG, MMU_PAGE_512K, true);
 127}
 128
 129static void mmu_mapin_ram_chunk(unsigned long offset, unsigned long top,
 130                                pgprot_t prot, bool new)
 131{
 132        unsigned long v = PAGE_OFFSET + offset;
 133        unsigned long p = offset;
 134
 135        WARN_ON(!IS_ALIGNED(offset, SZ_512K) || !IS_ALIGNED(top, SZ_512K));
 136
 137        for (; p < ALIGN(p, SZ_8M) && p < top; p += SZ_512K, v += SZ_512K)
 138                __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
 139        for (; p < ALIGN_DOWN(top, SZ_8M) && p < top; p += SZ_8M, v += SZ_8M)
 140                __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_8M, new);
 141        for (; p < ALIGN_DOWN(top, SZ_512K) && p < top; p += SZ_512K, v += SZ_512K)
 142                __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new);
 143
 144        if (!new)
 145                flush_tlb_kernel_range(PAGE_OFFSET + v, PAGE_OFFSET + top);
 146}
 147
 148unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
 149{
 150        unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
 151        unsigned long sinittext = __pa(_sinittext);
 152        bool strict_boundary = strict_kernel_rwx_enabled() || debug_pagealloc_enabled();
 153        unsigned long boundary = strict_boundary ? sinittext : etext8;
 154        unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
 155
 156        WARN_ON(top < einittext8);
 157
 158        mmu_mapin_immr();
 159
 160        if (__map_without_ltlbs)
 161                return 0;
 162
 163        mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, true);
 164        if (debug_pagealloc_enabled()) {
 165                top = boundary;
 166        } else {
 167                mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL_TEXT, true);
 168                mmu_mapin_ram_chunk(einittext8, top, PAGE_KERNEL, true);
 169        }
 170
 171        if (top > SZ_32M)
 172                memblock_set_current_limit(top);
 173
 174        block_mapped_ram = top;
 175
 176        return top;
 177}
 178
 179void mmu_mark_initmem_nx(void)
 180{
 181        unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M);
 182        unsigned long sinittext = __pa(_sinittext);
 183        unsigned long boundary = strict_kernel_rwx_enabled() ? sinittext : etext8;
 184        unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M);
 185
 186        mmu_mapin_ram_chunk(0, boundary, PAGE_KERNEL_TEXT, false);
 187        mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false);
 188
 189        if (IS_ENABLED(CONFIG_PIN_TLB_TEXT))
 190                mmu_pin_tlb(block_mapped_ram, false);
 191}
 192
 193#ifdef CONFIG_STRICT_KERNEL_RWX
 194void mmu_mark_rodata_ro(void)
 195{
 196        unsigned long sinittext = __pa(_sinittext);
 197
 198        mmu_mapin_ram_chunk(0, sinittext, PAGE_KERNEL_ROX, false);
 199        if (IS_ENABLED(CONFIG_PIN_TLB_DATA))
 200                mmu_pin_tlb(block_mapped_ram, true);
 201}
 202#endif
 203
 204void __init setup_initial_memory_limit(phys_addr_t first_memblock_base,
 205                                       phys_addr_t first_memblock_size)
 206{
 207        /* We don't currently support the first MEMBLOCK not mapping 0
 208         * physical on those processors
 209         */
 210        BUG_ON(first_memblock_base != 0);
 211
 212        /* 8xx can only access 32MB at the moment */
 213        memblock_set_current_limit(min_t(u64, first_memblock_size, SZ_32M));
 214}
 215
 216/*
 217 * Set up to use a given MMU context.
 218 * id is context number, pgd is PGD pointer.
 219 *
 220 * We place the physical address of the new task page directory loaded
 221 * into the MMU base register, and set the ASID compare register with
 222 * the new "context."
 223 */
 224void set_context(unsigned long id, pgd_t *pgd)
 225{
 226        s16 offset = (s16)(__pa(swapper_pg_dir));
 227
 228        /* Context switch the PTE pointer for the Abatron BDI2000.
 229         * The PGDIR is passed as second argument.
 230         */
 231        if (IS_ENABLED(CONFIG_BDI_SWITCH))
 232                abatron_pteptrs[1] = pgd;
 233
 234        /* Register M_TWB will contain base address of level 1 table minus the
 235         * lower part of the kernel PGDIR base address, so that all accesses to
 236         * level 1 table are done relative to lower part of kernel PGDIR base
 237         * address.
 238         */
 239        mtspr(SPRN_M_TWB, __pa(pgd) - offset);
 240
 241        /* Update context */
 242        mtspr(SPRN_M_CASID, id - 1);
 243        /* sync */
 244        mb();
 245}
 246
 247void flush_instruction_cache(void)
 248{
 249        isync();
 250        mtspr(SPRN_IC_CST, IDC_INVALL);
 251        isync();
 252}
 253
 254#ifdef CONFIG_PPC_KUEP
 255void __init setup_kuep(bool disabled)
 256{
 257        if (disabled)
 258                return;
 259
 260        pr_info("Activating Kernel Userspace Execution Prevention\n");
 261
 262        mtspr(SPRN_MI_AP, MI_APG_KUEP);
 263}
 264#endif
 265
 266#ifdef CONFIG_PPC_KUAP
 267void __init setup_kuap(bool disabled)
 268{
 269        pr_info("Activating Kernel Userspace Access Protection\n");
 270
 271        if (disabled)
 272                pr_warn("KUAP cannot be disabled yet on 8xx when compiled in\n");
 273
 274        mtspr(SPRN_MD_AP, MD_APG_KUAP);
 275}
 276#endif
 277