linux/arch/powerpc/include/asm/nohash/32/pgtable.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H
   3#define _ASM_POWERPC_NOHASH_32_PGTABLE_H
   4
   5#include <asm-generic/pgtable-nopmd.h>
   6
   7#ifndef __ASSEMBLY__
   8#include <linux/sched.h>
   9#include <linux/threads.h>
  10#include <asm/mmu.h>                    /* For sub-arch specific PPC_PIN_SIZE */
  11
  12#ifdef CONFIG_44x
  13extern int icache_44x_need_flush;
  14#endif
  15
  16#endif /* __ASSEMBLY__ */
  17
  18#define PTE_INDEX_SIZE  PTE_SHIFT
  19#define PMD_INDEX_SIZE  0
  20#define PUD_INDEX_SIZE  0
  21#define PGD_INDEX_SIZE  (32 - PGDIR_SHIFT)
  22
  23#define PMD_CACHE_INDEX PMD_INDEX_SIZE
  24#define PUD_CACHE_INDEX PUD_INDEX_SIZE
  25
  26#ifndef __ASSEMBLY__
  27#define PTE_TABLE_SIZE  (sizeof(pte_t) << PTE_INDEX_SIZE)
  28#define PMD_TABLE_SIZE  0
  29#define PUD_TABLE_SIZE  0
  30#define PGD_TABLE_SIZE  (sizeof(pgd_t) << PGD_INDEX_SIZE)
  31
  32#define PMD_MASKED_BITS (PTE_TABLE_SIZE - 1)
  33#endif  /* __ASSEMBLY__ */
  34
  35#define PTRS_PER_PTE    (1 << PTE_INDEX_SIZE)
  36#define PTRS_PER_PGD    (1 << PGD_INDEX_SIZE)
  37
  38/*
  39 * The normal case is that PTEs are 32-bits and we have a 1-page
  40 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages.  -- paulus
  41 *
  42 * For any >32-bit physical address platform, we can use the following
  43 * two level page table layout where the pgdir is 8KB and the MS 13 bits
  44 * are an index to the second level table.  The combined pgdir/pmd first
  45 * level has 2048 entries and the second level has 512 64-bit PTE entries.
  46 * -Matt
  47 */
  48/* PGDIR_SHIFT determines what a top-level page table entry can map */
  49#define PGDIR_SHIFT     (PAGE_SHIFT + PTE_INDEX_SIZE)
  50#define PGDIR_SIZE      (1UL << PGDIR_SHIFT)
  51#define PGDIR_MASK      (~(PGDIR_SIZE-1))
  52
  53/* Bits to mask out from a PGD to get to the PUD page */
  54#define PGD_MASKED_BITS         0
  55
  56#define USER_PTRS_PER_PGD       (TASK_SIZE / PGDIR_SIZE)
  57
  58#define pte_ERROR(e) \
  59        pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
  60                (unsigned long long)pte_val(e))
  61#define pgd_ERROR(e) \
  62        pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
  63
  64#ifndef __ASSEMBLY__
  65
  66int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
  67void unmap_kernel_page(unsigned long va);
  68
  69#endif /* !__ASSEMBLY__ */
  70
  71
  72/*
  73 * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
  74 * value (for now) on others, from where we can start layout kernel
  75 * virtual space that goes below PKMAP and FIXMAP
  76 */
  77#include <asm/fixmap.h>
  78
  79/*
  80 * ioremap_bot starts at that address. Early ioremaps move down from there,
  81 * until mem_init() at which point this becomes the top of the vmalloc
  82 * and ioremap space
  83 */
  84#ifdef CONFIG_HIGHMEM
  85#define IOREMAP_TOP     PKMAP_BASE
  86#else
  87#define IOREMAP_TOP     FIXADDR_START
  88#endif
  89
  90/* PPC32 shares vmalloc area with ioremap */
  91#define IOREMAP_START   VMALLOC_START
  92#define IOREMAP_END     VMALLOC_END
  93
  94/*
  95 * Just any arbitrary offset to the start of the vmalloc VM area: the
  96 * current 16MB value just means that there will be a 64MB "hole" after the
  97 * physical memory until the kernel virtual memory starts.  That means that
  98 * any out-of-bounds memory accesses will hopefully be caught.
  99 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
 100 * area for the same reason. ;)
 101 *
 102 * We no longer map larger than phys RAM with the BATs so we don't have
 103 * to worry about the VMALLOC_OFFSET causing problems.  We do have to worry
 104 * about clashes between our early calls to ioremap() that start growing down
 105 * from IOREMAP_TOP being run into the VM area allocations (growing upwards
 106 * from VMALLOC_START).  For this reason we have ioremap_bot to check when
 107 * we actually run into our mappings setup in the early boot with the VM
 108 * system.  This really does become a problem for machines with good amounts
 109 * of RAM.  -- Cort
 110 */
 111#define VMALLOC_OFFSET (0x1000000) /* 16M */
 112#ifdef PPC_PIN_SIZE
 113#define VMALLOC_START (((ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
 114#else
 115#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
 116#endif
 117
 118#ifdef CONFIG_KASAN_VMALLOC
 119#define VMALLOC_END     ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
 120#else
 121#define VMALLOC_END     ioremap_bot
 122#endif
 123
 124/*
 125 * Bits in a linux-style PTE.  These match the bits in the
 126 * (hardware-defined) PowerPC PTE as closely as possible.
 127 */
 128
 129#if defined(CONFIG_40x)
 130#include <asm/nohash/32/pte-40x.h>
 131#elif defined(CONFIG_44x)
 132#include <asm/nohash/32/pte-44x.h>
 133#elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT)
 134#include <asm/nohash/pte-book3e.h>
 135#elif defined(CONFIG_FSL_BOOKE)
 136#include <asm/nohash/32/pte-fsl-booke.h>
 137#elif defined(CONFIG_PPC_8xx)
 138#include <asm/nohash/32/pte-8xx.h>
 139#endif
 140
 141/*
 142 * Location of the PFN in the PTE. Most 32-bit platforms use the same
 143 * as _PAGE_SHIFT here (ie, naturally aligned).
 144 * Platform who don't just pre-define the value so we don't override it here.
 145 */
 146#ifndef PTE_RPN_SHIFT
 147#define PTE_RPN_SHIFT   (PAGE_SHIFT)
 148#endif
 149
 150/*
 151 * The mask covered by the RPN must be a ULL on 32-bit platforms with
 152 * 64-bit PTEs.
 153 */
 154#if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
 155#define PTE_RPN_MASK    (~((1ULL << PTE_RPN_SHIFT) - 1))
 156#define MAX_POSSIBLE_PHYSMEM_BITS 36
 157#else
 158#define PTE_RPN_MASK    (~((1UL << PTE_RPN_SHIFT) - 1))
 159#define MAX_POSSIBLE_PHYSMEM_BITS 32
 160#endif
 161
 162/*
 163 * _PAGE_CHG_MASK masks of bits that are to be preserved across
 164 * pgprot changes.
 165 */
 166#define _PAGE_CHG_MASK  (PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL)
 167
 168#ifndef __ASSEMBLY__
 169
 170#define pte_clear(mm, addr, ptep) \
 171        do { pte_update(mm, addr, ptep, ~0, 0, 0); } while (0)
 172
 173#ifndef pte_mkwrite
 174static inline pte_t pte_mkwrite(pte_t pte)
 175{
 176        return __pte(pte_val(pte) | _PAGE_RW);
 177}
 178#endif
 179
 180static inline pte_t pte_mkdirty(pte_t pte)
 181{
 182        return __pte(pte_val(pte) | _PAGE_DIRTY);
 183}
 184
 185static inline pte_t pte_mkyoung(pte_t pte)
 186{
 187        return __pte(pte_val(pte) | _PAGE_ACCESSED);
 188}
 189
 190#ifndef pte_wrprotect
 191static inline pte_t pte_wrprotect(pte_t pte)
 192{
 193        return __pte(pte_val(pte) & ~_PAGE_RW);
 194}
 195#endif
 196
 197#ifndef pte_mkexec
 198static inline pte_t pte_mkexec(pte_t pte)
 199{
 200        return __pte(pte_val(pte) | _PAGE_EXEC);
 201}
 202#endif
 203
 204#define pmd_none(pmd)           (!pmd_val(pmd))
 205#define pmd_bad(pmd)            (pmd_val(pmd) & _PMD_BAD)
 206#define pmd_present(pmd)        (pmd_val(pmd) & _PMD_PRESENT_MASK)
 207static inline void pmd_clear(pmd_t *pmdp)
 208{
 209        *pmdp = __pmd(0);
 210}
 211
 212/*
 213 * PTE updates. This function is called whenever an existing
 214 * valid PTE is updated. This does -not- include set_pte_at()
 215 * which nowadays only sets a new PTE.
 216 *
 217 * Depending on the type of MMU, we may need to use atomic updates
 218 * and the PTE may be either 32 or 64 bit wide. In the later case,
 219 * when using atomic updates, only the low part of the PTE is
 220 * accessed atomically.
 221 *
 222 * In addition, on 44x, we also maintain a global flag indicating
 223 * that an executable user mapping was modified, which is needed
 224 * to properly flush the virtually tagged instruction cache of
 225 * those implementations.
 226 *
 227 * On the 8xx, the page tables are a bit special. For 16k pages, we have
 228 * 4 identical entries. For 512k pages, we have 128 entries as if it was
 229 * 4k pages, but they are flagged as 512k pages for the hardware.
 230 * For other page sizes, we have a single entry in the table.
 231 */
 232#ifdef CONFIG_PPC_8xx
 233static pmd_t *pmd_off(struct mm_struct *mm, unsigned long addr);
 234static int hugepd_ok(hugepd_t hpd);
 235
 236static int number_of_cells_per_pte(pmd_t *pmd, pte_basic_t val, int huge)
 237{
 238        if (!huge)
 239                return PAGE_SIZE / SZ_4K;
 240        else if (hugepd_ok(*((hugepd_t *)pmd)))
 241                return 1;
 242        else if (IS_ENABLED(CONFIG_PPC_4K_PAGES) && !(val & _PAGE_HUGE))
 243                return SZ_16K / SZ_4K;
 244        else
 245                return SZ_512K / SZ_4K;
 246}
 247
 248static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
 249                                     unsigned long clr, unsigned long set, int huge)
 250{
 251        pte_basic_t *entry = (pte_basic_t *)p;
 252        pte_basic_t old = pte_val(*p);
 253        pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
 254        int num, i;
 255        pmd_t *pmd = pmd_off(mm, addr);
 256
 257        num = number_of_cells_per_pte(pmd, new, huge);
 258
 259        for (i = 0; i < num; i++, entry++, new += SZ_4K)
 260                *entry = new;
 261
 262        return old;
 263}
 264
 265#ifdef CONFIG_PPC_16K_PAGES
 266#define __HAVE_ARCH_PTEP_GET
 267static inline pte_t ptep_get(pte_t *ptep)
 268{
 269        pte_basic_t val = READ_ONCE(ptep->pte);
 270        pte_t pte = {val, val, val, val};
 271
 272        return pte;
 273}
 274#endif /* CONFIG_PPC_16K_PAGES */
 275
 276#else
 277static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
 278                                     unsigned long clr, unsigned long set, int huge)
 279{
 280        pte_basic_t old = pte_val(*p);
 281        pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
 282
 283        *p = __pte(new);
 284
 285#ifdef CONFIG_44x
 286        if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
 287                icache_44x_need_flush = 1;
 288#endif
 289        return old;
 290}
 291#endif
 292
 293#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
 294static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
 295                                              unsigned long addr, pte_t *ptep)
 296{
 297        unsigned long old;
 298        old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
 299        return (old & _PAGE_ACCESSED) != 0;
 300}
 301#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
 302        __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep)
 303
 304#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 305static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
 306                                       pte_t *ptep)
 307{
 308        return __pte(pte_update(mm, addr, ptep, ~0, 0, 0));
 309}
 310
 311#define __HAVE_ARCH_PTEP_SET_WRPROTECT
 312#ifndef ptep_set_wrprotect
 313static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
 314                                      pte_t *ptep)
 315{
 316        pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
 317}
 318#endif
 319
 320#ifndef __ptep_set_access_flags
 321static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
 322                                           pte_t *ptep, pte_t entry,
 323                                           unsigned long address,
 324                                           int psize)
 325{
 326        unsigned long set = pte_val(entry) &
 327                            (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
 328        int huge = psize > mmu_virtual_psize ? 1 : 0;
 329
 330        pte_update(vma->vm_mm, address, ptep, 0, set, huge);
 331
 332        flush_tlb_page(vma, address);
 333}
 334#endif
 335
 336static inline int pte_young(pte_t pte)
 337{
 338        return pte_val(pte) & _PAGE_ACCESSED;
 339}
 340
 341/*
 342 * Note that on Book E processors, the pmd contains the kernel virtual
 343 * (lowmem) address of the pte page.  The physical address is less useful
 344 * because everything runs with translation enabled (even the TLB miss
 345 * handler).  On everything else the pmd contains the physical address
 346 * of the pte page.  -- paulus
 347 */
 348#ifndef CONFIG_BOOKE
 349#define pmd_pfn(pmd)            (pmd_val(pmd) >> PAGE_SHIFT)
 350#else
 351#define pmd_page_vaddr(pmd)     \
 352        ((unsigned long)(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
 353#define pmd_pfn(pmd)            (__pa(pmd_val(pmd)) >> PAGE_SHIFT)
 354#endif
 355
 356#define pmd_page(pmd)           pfn_to_page(pmd_pfn(pmd))
 357/*
 358 * Encode and decode a swap entry.
 359 * Note that the bits we use in a PTE for representing a swap entry
 360 * must not include the _PAGE_PRESENT bit.
 361 *   -- paulus
 362 */
 363#define __swp_type(entry)               ((entry).val & 0x1f)
 364#define __swp_offset(entry)             ((entry).val >> 5)
 365#define __swp_entry(type, offset)       ((swp_entry_t) { (type) | ((offset) << 5) })
 366#define __pte_to_swp_entry(pte)         ((swp_entry_t) { pte_val(pte) >> 3 })
 367#define __swp_entry_to_pte(x)           ((pte_t) { (x).val << 3 })
 368
 369#endif /* !__ASSEMBLY__ */
 370
 371#endif /* __ASM_POWERPC_NOHASH_32_PGTABLE_H */
 372