linux/arch/powerpc/include/asm/nohash/32/pgtable.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H
   3#define _ASM_POWERPC_NOHASH_32_PGTABLE_H
   4
   5#define __ARCH_USE_5LEVEL_HACK
   6#include <asm-generic/pgtable-nopmd.h>
   7
   8#ifndef __ASSEMBLY__
   9#include <linux/sched.h>
  10#include <linux/threads.h>
  11#include <asm/mmu.h>                    /* For sub-arch specific PPC_PIN_SIZE */
  12#include <asm/asm-405.h>
  13
  14extern unsigned long ioremap_bot;
  15
  16#ifdef CONFIG_44x
  17extern int icache_44x_need_flush;
  18#endif
  19
  20#endif /* __ASSEMBLY__ */
  21
  22#define PTE_INDEX_SIZE  PTE_SHIFT
  23#define PMD_INDEX_SIZE  0
  24#define PUD_INDEX_SIZE  0
  25#define PGD_INDEX_SIZE  (32 - PGDIR_SHIFT)
  26
  27#define PMD_CACHE_INDEX PMD_INDEX_SIZE
  28#define PUD_CACHE_INDEX PUD_INDEX_SIZE
  29
  30#ifndef __ASSEMBLY__
  31#define PTE_TABLE_SIZE  (sizeof(pte_t) << PTE_INDEX_SIZE)
  32#define PMD_TABLE_SIZE  0
  33#define PUD_TABLE_SIZE  0
  34#define PGD_TABLE_SIZE  (sizeof(pgd_t) << PGD_INDEX_SIZE)
  35#endif  /* __ASSEMBLY__ */
  36
  37#define PTRS_PER_PTE    (1 << PTE_INDEX_SIZE)
  38#define PTRS_PER_PGD    (1 << PGD_INDEX_SIZE)
  39
  40/*
  41 * The normal case is that PTEs are 32-bits and we have a 1-page
  42 * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages.  -- paulus
  43 *
  44 * For any >32-bit physical address platform, we can use the following
  45 * two level page table layout where the pgdir is 8KB and the MS 13 bits
  46 * are an index to the second level table.  The combined pgdir/pmd first
  47 * level has 2048 entries and the second level has 512 64-bit PTE entries.
  48 * -Matt
  49 */
  50/* PGDIR_SHIFT determines what a top-level page table entry can map */
  51#define PGDIR_SHIFT     (PAGE_SHIFT + PTE_INDEX_SIZE)
  52#define PGDIR_SIZE      (1UL << PGDIR_SHIFT)
  53#define PGDIR_MASK      (~(PGDIR_SIZE-1))
  54
  55/* Bits to mask out from a PGD to get to the PUD page */
  56#define PGD_MASKED_BITS         0
  57
  58#define USER_PTRS_PER_PGD       (TASK_SIZE / PGDIR_SIZE)
  59#define FIRST_USER_ADDRESS      0UL
  60
  61#define pte_ERROR(e) \
  62        pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
  63                (unsigned long long)pte_val(e))
  64#define pgd_ERROR(e) \
  65        pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
  66
  67/*
  68 * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
  69 * value (for now) on others, from where we can start layout kernel
  70 * virtual space that goes below PKMAP and FIXMAP
  71 */
  72#ifdef CONFIG_HIGHMEM
  73#define KVIRT_TOP       PKMAP_BASE
  74#else
  75#define KVIRT_TOP       (0xfe000000UL)  /* for now, could be FIXMAP_BASE ? */
  76#endif
  77
  78/*
  79 * ioremap_bot starts at that address. Early ioremaps move down from there,
  80 * until mem_init() at which point this becomes the top of the vmalloc
  81 * and ioremap space
  82 */
  83#ifdef CONFIG_NOT_COHERENT_CACHE
  84#define IOREMAP_TOP     ((KVIRT_TOP - CONFIG_CONSISTENT_SIZE) & PAGE_MASK)
  85#else
  86#define IOREMAP_TOP     KVIRT_TOP
  87#endif
  88
  89/*
  90 * Just any arbitrary offset to the start of the vmalloc VM area: the
  91 * current 16MB value just means that there will be a 64MB "hole" after the
  92 * physical memory until the kernel virtual memory starts.  That means that
  93 * any out-of-bounds memory accesses will hopefully be caught.
  94 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
  95 * area for the same reason. ;)
  96 *
  97 * We no longer map larger than phys RAM with the BATs so we don't have
  98 * to worry about the VMALLOC_OFFSET causing problems.  We do have to worry
  99 * about clashes between our early calls to ioremap() that start growing down
 100 * from IOREMAP_TOP being run into the VM area allocations (growing upwards
 101 * from VMALLOC_START).  For this reason we have ioremap_bot to check when
 102 * we actually run into our mappings setup in the early boot with the VM
 103 * system.  This really does become a problem for machines with good amounts
 104 * of RAM.  -- Cort
 105 */
 106#define VMALLOC_OFFSET (0x1000000) /* 16M */
 107#ifdef PPC_PIN_SIZE
 108#define VMALLOC_START (((_ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
 109#else
 110#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
 111#endif
 112#define VMALLOC_END     ioremap_bot
 113
 114/*
 115 * Bits in a linux-style PTE.  These match the bits in the
 116 * (hardware-defined) PowerPC PTE as closely as possible.
 117 */
 118
 119#if defined(CONFIG_40x)
 120#include <asm/nohash/32/pte-40x.h>
 121#elif defined(CONFIG_44x)
 122#include <asm/nohash/32/pte-44x.h>
 123#elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT)
 124#include <asm/nohash/pte-book3e.h>
 125#elif defined(CONFIG_FSL_BOOKE)
 126#include <asm/nohash/32/pte-fsl-booke.h>
 127#elif defined(CONFIG_PPC_8xx)
 128#include <asm/nohash/32/pte-8xx.h>
 129#endif
 130
 131/*
 132 * Location of the PFN in the PTE. Most 32-bit platforms use the same
 133 * as _PAGE_SHIFT here (ie, naturally aligned).
 134 * Platform who don't just pre-define the value so we don't override it here.
 135 */
 136#ifndef PTE_RPN_SHIFT
 137#define PTE_RPN_SHIFT   (PAGE_SHIFT)
 138#endif
 139
 140/*
 141 * The mask covered by the RPN must be a ULL on 32-bit platforms with
 142 * 64-bit PTEs.
 143 */
 144#if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
 145#define PTE_RPN_MASK    (~((1ULL << PTE_RPN_SHIFT) - 1))
 146#else
 147#define PTE_RPN_MASK    (~((1UL << PTE_RPN_SHIFT) - 1))
 148#endif
 149
 150/*
 151 * _PAGE_CHG_MASK masks of bits that are to be preserved across
 152 * pgprot changes.
 153 */
 154#define _PAGE_CHG_MASK  (PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL)
 155
 156#ifndef __ASSEMBLY__
 157
 158#define pte_clear(mm, addr, ptep) \
 159        do { pte_update(ptep, ~0, 0); } while (0)
 160
 161#ifndef pte_mkwrite
 162static inline pte_t pte_mkwrite(pte_t pte)
 163{
 164        return __pte(pte_val(pte) | _PAGE_RW);
 165}
 166#endif
 167
 168static inline pte_t pte_mkdirty(pte_t pte)
 169{
 170        return __pte(pte_val(pte) | _PAGE_DIRTY);
 171}
 172
 173static inline pte_t pte_mkyoung(pte_t pte)
 174{
 175        return __pte(pte_val(pte) | _PAGE_ACCESSED);
 176}
 177
 178#ifndef pte_wrprotect
 179static inline pte_t pte_wrprotect(pte_t pte)
 180{
 181        return __pte(pte_val(pte) & ~_PAGE_RW);
 182}
 183#endif
 184
 185static inline pte_t pte_mkexec(pte_t pte)
 186{
 187        return __pte(pte_val(pte) | _PAGE_EXEC);
 188}
 189
 190#define pmd_none(pmd)           (!pmd_val(pmd))
 191#define pmd_bad(pmd)            (pmd_val(pmd) & _PMD_BAD)
 192#define pmd_present(pmd)        (pmd_val(pmd) & _PMD_PRESENT_MASK)
 193static inline void pmd_clear(pmd_t *pmdp)
 194{
 195        *pmdp = __pmd(0);
 196}
 197
 198
 199
 200/*
 201 * PTE updates. This function is called whenever an existing
 202 * valid PTE is updated. This does -not- include set_pte_at()
 203 * which nowadays only sets a new PTE.
 204 *
 205 * Depending on the type of MMU, we may need to use atomic updates
 206 * and the PTE may be either 32 or 64 bit wide. In the later case,
 207 * when using atomic updates, only the low part of the PTE is
 208 * accessed atomically.
 209 *
 210 * In addition, on 44x, we also maintain a global flag indicating
 211 * that an executable user mapping was modified, which is needed
 212 * to properly flush the virtually tagged instruction cache of
 213 * those implementations.
 214 */
 215#ifndef CONFIG_PTE_64BIT
 216static inline unsigned long pte_update(pte_t *p,
 217                                       unsigned long clr,
 218                                       unsigned long set)
 219{
 220#ifdef PTE_ATOMIC_UPDATES
 221        unsigned long old, tmp;
 222
 223        __asm__ __volatile__("\
 2241:      lwarx   %0,0,%3\n\
 225        andc    %1,%0,%4\n\
 226        or      %1,%1,%5\n"
 227        PPC405_ERR77(0,%3)
 228"       stwcx.  %1,0,%3\n\
 229        bne-    1b"
 230        : "=&r" (old), "=&r" (tmp), "=m" (*p)
 231        : "r" (p), "r" (clr), "r" (set), "m" (*p)
 232        : "cc" );
 233#else /* PTE_ATOMIC_UPDATES */
 234        unsigned long old = pte_val(*p);
 235        unsigned long new = (old & ~clr) | set;
 236
 237#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)
 238        p->pte = p->pte1 = p->pte2 = p->pte3 = new;
 239#else
 240        *p = __pte(new);
 241#endif
 242#endif /* !PTE_ATOMIC_UPDATES */
 243
 244#ifdef CONFIG_44x
 245        if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
 246                icache_44x_need_flush = 1;
 247#endif
 248        return old;
 249}
 250#else /* CONFIG_PTE_64BIT */
 251static inline unsigned long long pte_update(pte_t *p,
 252                                            unsigned long clr,
 253                                            unsigned long set)
 254{
 255#ifdef PTE_ATOMIC_UPDATES
 256        unsigned long long old;
 257        unsigned long tmp;
 258
 259        __asm__ __volatile__("\
 2601:      lwarx   %L0,0,%4\n\
 261        lwzx    %0,0,%3\n\
 262        andc    %1,%L0,%5\n\
 263        or      %1,%1,%6\n"
 264        PPC405_ERR77(0,%3)
 265"       stwcx.  %1,0,%4\n\
 266        bne-    1b"
 267        : "=&r" (old), "=&r" (tmp), "=m" (*p)
 268        : "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
 269        : "cc" );
 270#else /* PTE_ATOMIC_UPDATES */
 271        unsigned long long old = pte_val(*p);
 272        *p = __pte((old & ~(unsigned long long)clr) | set);
 273#endif /* !PTE_ATOMIC_UPDATES */
 274
 275#ifdef CONFIG_44x
 276        if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
 277                icache_44x_need_flush = 1;
 278#endif
 279        return old;
 280}
 281#endif /* CONFIG_PTE_64BIT */
 282
 283#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
 284static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
 285{
 286        unsigned long old;
 287        old = pte_update(ptep, _PAGE_ACCESSED, 0);
 288        return (old & _PAGE_ACCESSED) != 0;
 289}
 290#define ptep_test_and_clear_young(__vma, __addr, __ptep) \
 291        __ptep_test_and_clear_young((__vma)->vm_mm->context.id, __addr, __ptep)
 292
 293#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 294static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
 295                                       pte_t *ptep)
 296{
 297        return __pte(pte_update(ptep, ~0, 0));
 298}
 299
 300#define __HAVE_ARCH_PTEP_SET_WRPROTECT
 301static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
 302                                      pte_t *ptep)
 303{
 304        unsigned long clr = ~pte_val(pte_wrprotect(__pte(~0)));
 305        unsigned long set = pte_val(pte_wrprotect(__pte(0)));
 306
 307        pte_update(ptep, clr, set);
 308}
 309
 310static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
 311                                           pte_t *ptep, pte_t entry,
 312                                           unsigned long address,
 313                                           int psize)
 314{
 315        pte_t pte_set = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte_mkexec(__pte(0)))));
 316        pte_t pte_clr = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte_mkexec(__pte(~0)))));
 317        unsigned long set = pte_val(entry) & pte_val(pte_set);
 318        unsigned long clr = ~pte_val(entry) & ~pte_val(pte_clr);
 319
 320        pte_update(ptep, clr, set);
 321
 322        flush_tlb_page(vma, address);
 323}
 324
 325static inline int pte_young(pte_t pte)
 326{
 327        return pte_val(pte) & _PAGE_ACCESSED;
 328}
 329
 330#define __HAVE_ARCH_PTE_SAME
 331#define pte_same(A,B)   ((pte_val(A) ^ pte_val(B)) == 0)
 332
 333/*
 334 * Note that on Book E processors, the pmd contains the kernel virtual
 335 * (lowmem) address of the pte page.  The physical address is less useful
 336 * because everything runs with translation enabled (even the TLB miss
 337 * handler).  On everything else the pmd contains the physical address
 338 * of the pte page.  -- paulus
 339 */
 340#ifndef CONFIG_BOOKE
 341#define pmd_page_vaddr(pmd)     \
 342        ((unsigned long)__va(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
 343#define pmd_page(pmd)           \
 344        pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
 345#else
 346#define pmd_page_vaddr(pmd)     \
 347        ((unsigned long)(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
 348#define pmd_page(pmd)           \
 349        pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
 350#endif
 351
 352/* to find an entry in a kernel page-table-directory */
 353#define pgd_offset_k(address) pgd_offset(&init_mm, address)
 354
 355/* to find an entry in a page-table-directory */
 356#define pgd_index(address)       ((address) >> PGDIR_SHIFT)
 357#define pgd_offset(mm, address)  ((mm)->pgd + pgd_index(address))
 358
 359/* Find an entry in the third-level page table.. */
 360#define pte_index(address)              \
 361        (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 362#define pte_offset_kernel(dir, addr)    \
 363        (pmd_bad(*(dir)) ? NULL : (pte_t *)pmd_page_vaddr(*(dir)) + \
 364                                  pte_index(addr))
 365#define pte_offset_map(dir, addr)               \
 366        ((pte_t *)(kmap_atomic(pmd_page(*(dir))) + \
 367                   (pmd_page_vaddr(*(dir)) & ~PAGE_MASK)) + pte_index(addr))
 368#define pte_unmap(pte)          kunmap_atomic(pte)
 369
 370/*
 371 * Encode and decode a swap entry.
 372 * Note that the bits we use in a PTE for representing a swap entry
 373 * must not include the _PAGE_PRESENT bit.
 374 *   -- paulus
 375 */
 376#define __swp_type(entry)               ((entry).val & 0x1f)
 377#define __swp_offset(entry)             ((entry).val >> 5)
 378#define __swp_entry(type, offset)       ((swp_entry_t) { (type) | ((offset) << 5) })
 379#define __pte_to_swp_entry(pte)         ((swp_entry_t) { pte_val(pte) >> 3 })
 380#define __swp_entry_to_pte(x)           ((pte_t) { (x).val << 3 })
 381
 382int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
 383
 384#endif /* !__ASSEMBLY__ */
 385
 386#endif /* __ASM_POWERPC_NOHASH_32_PGTABLE_H */
 387