linux/arch/powerpc/include/asm/nohash/64/pgtable.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_H
   3#define _ASM_POWERPC_NOHASH_64_PGTABLE_H
   4/*
   5 * This file contains the functions and defines necessary to modify and use
   6 * the ppc64 non-hashed page table.
   7 */
   8
   9#include <linux/sizes.h>
  10
  11#include <asm/nohash/64/pgtable-4k.h>
  12#include <asm/barrier.h>
  13#include <asm/asm-const.h>
  14
  15/*
  16 * Size of EA range mapped by our pagetables.
  17 */
  18#define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
  19                            PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
  20#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE)
  21
  22#define PMD_CACHE_INDEX PMD_INDEX_SIZE
  23#define PUD_CACHE_INDEX PUD_INDEX_SIZE
  24
  25/*
  26 * Define the address range of the kernel non-linear virtual area
  27 */
  28#define KERN_VIRT_START ASM_CONST(0x8000000000000000)
  29#define KERN_VIRT_SIZE  ASM_CONST(0x0000100000000000)
  30
  31/*
  32 * The vmalloc space starts at the beginning of that region, and
  33 * occupies a quarter of it on Book3E
  34 * (we keep a quarter for the virtual memmap)
  35 */
  36#define VMALLOC_START   KERN_VIRT_START
  37#define VMALLOC_SIZE    (KERN_VIRT_SIZE >> 2)
  38#define VMALLOC_END     (VMALLOC_START + VMALLOC_SIZE)
  39
  40/*
  41 * The second half of the kernel virtual space is used for IO mappings,
  42 * it's itself carved into the PIO region (ISA and PHB IO space) and
  43 * the ioremap space
  44 *
  45 *  ISA_IO_BASE = KERN_IO_START, 64K reserved area
  46 *  PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces
  47 * IOREMAP_BASE = ISA_IO_BASE + 2G to VMALLOC_START + PGTABLE_RANGE
  48 */
  49#define KERN_IO_START   (KERN_VIRT_START + (KERN_VIRT_SIZE >> 1))
  50#define FULL_IO_SIZE    0x80000000ul
  51#define  ISA_IO_BASE    (KERN_IO_START)
  52#define  ISA_IO_END     (KERN_IO_START + 0x10000ul)
  53#define  PHB_IO_BASE    (ISA_IO_END)
  54#define  PHB_IO_END     (KERN_IO_START + FULL_IO_SIZE)
  55#define IOREMAP_BASE    (PHB_IO_END)
  56#define IOREMAP_START   (ioremap_bot)
  57#define IOREMAP_END     (KERN_VIRT_START + KERN_VIRT_SIZE - FIXADDR_SIZE)
  58#define FIXADDR_SIZE    SZ_32M
  59
  60
  61/*
  62 * Region IDs
  63 */
  64#define REGION_SHIFT            60UL
  65#define REGION_MASK             (0xfUL << REGION_SHIFT)
  66#define REGION_ID(ea)           (((unsigned long)(ea)) >> REGION_SHIFT)
  67
  68#define VMALLOC_REGION_ID       (REGION_ID(VMALLOC_START))
  69#define KERNEL_REGION_ID        (REGION_ID(PAGE_OFFSET))
  70#define USER_REGION_ID          (0UL)
  71
  72/*
  73 * Defines the address of the vmemap area, in its own region on
  74 * after the vmalloc space on Book3E
  75 */
  76#define VMEMMAP_BASE            VMALLOC_END
  77#define VMEMMAP_END             KERN_IO_START
  78#define vmemmap                 ((struct page *)VMEMMAP_BASE)
  79
  80
  81/*
  82 * Include the PTE bits definitions
  83 */
  84#include <asm/nohash/pte-book3e.h>
  85
  86#define _PAGE_SAO       0
  87
  88#define PTE_RPN_MASK    (~((1UL << PTE_RPN_SHIFT) - 1))
  89
  90/*
  91 * _PAGE_CHG_MASK masks of bits that are to be preserved across
  92 * pgprot changes.
  93 */
  94#define _PAGE_CHG_MASK  (PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL)
  95
  96#define H_PAGE_4K_PFN 0
  97
  98#ifndef __ASSEMBLY__
  99/* pte_clear moved to later in this file */
 100
 101static inline pte_t pte_mkwrite(pte_t pte)
 102{
 103        return __pte(pte_val(pte) | _PAGE_RW);
 104}
 105
 106static inline pte_t pte_mkdirty(pte_t pte)
 107{
 108        return __pte(pte_val(pte) | _PAGE_DIRTY);
 109}
 110
 111static inline pte_t pte_mkyoung(pte_t pte)
 112{
 113        return __pte(pte_val(pte) | _PAGE_ACCESSED);
 114}
 115
 116static inline pte_t pte_wrprotect(pte_t pte)
 117{
 118        return __pte(pte_val(pte) & ~_PAGE_RW);
 119}
 120
 121static inline pte_t pte_mkexec(pte_t pte)
 122{
 123        return __pte(pte_val(pte) | _PAGE_EXEC);
 124}
 125
 126#define PMD_BAD_BITS            (PTE_TABLE_SIZE-1)
 127#define PUD_BAD_BITS            (PMD_TABLE_SIZE-1)
 128
 129static inline void pmd_set(pmd_t *pmdp, unsigned long val)
 130{
 131        *pmdp = __pmd(val);
 132}
 133
 134static inline void pmd_clear(pmd_t *pmdp)
 135{
 136        *pmdp = __pmd(0);
 137}
 138
 139static inline pte_t pmd_pte(pmd_t pmd)
 140{
 141        return __pte(pmd_val(pmd));
 142}
 143
 144#define pmd_none(pmd)           (!pmd_val(pmd))
 145#define pmd_bad(pmd)            (!is_kernel_addr(pmd_val(pmd)) \
 146                                 || (pmd_val(pmd) & PMD_BAD_BITS))
 147#define pmd_present(pmd)        (!pmd_none(pmd))
 148#define pmd_page_vaddr(pmd)     (pmd_val(pmd) & ~PMD_MASKED_BITS)
 149extern struct page *pmd_page(pmd_t pmd);
 150
 151static inline void pud_set(pud_t *pudp, unsigned long val)
 152{
 153        *pudp = __pud(val);
 154}
 155
 156static inline void pud_clear(pud_t *pudp)
 157{
 158        *pudp = __pud(0);
 159}
 160
 161#define pud_none(pud)           (!pud_val(pud))
 162#define pud_bad(pud)            (!is_kernel_addr(pud_val(pud)) \
 163                                 || (pud_val(pud) & PUD_BAD_BITS))
 164#define pud_present(pud)        (pud_val(pud) != 0)
 165
 166static inline pmd_t *pud_pgtable(pud_t pud)
 167{
 168        return (pmd_t *)(pud_val(pud) & ~PUD_MASKED_BITS);
 169}
 170
 171extern struct page *pud_page(pud_t pud);
 172
 173static inline pte_t pud_pte(pud_t pud)
 174{
 175        return __pte(pud_val(pud));
 176}
 177
 178static inline pud_t pte_pud(pte_t pte)
 179{
 180        return __pud(pte_val(pte));
 181}
 182#define pud_write(pud)          pte_write(pud_pte(pud))
 183#define p4d_write(pgd)          pte_write(p4d_pte(p4d))
 184
 185static inline void p4d_set(p4d_t *p4dp, unsigned long val)
 186{
 187        *p4dp = __p4d(val);
 188}
 189
 190/* Atomic PTE updates */
 191static inline unsigned long pte_update(struct mm_struct *mm,
 192                                       unsigned long addr,
 193                                       pte_t *ptep, unsigned long clr,
 194                                       unsigned long set,
 195                                       int huge)
 196{
 197        unsigned long old = pte_val(*ptep);
 198        *ptep = __pte((old & ~clr) | set);
 199
 200        /* huge pages use the old page table lock */
 201        if (!huge)
 202                assert_pte_locked(mm, addr);
 203
 204        return old;
 205}
 206
 207static inline int pte_young(pte_t pte)
 208{
 209        return pte_val(pte) & _PAGE_ACCESSED;
 210}
 211
 212static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
 213                                              unsigned long addr, pte_t *ptep)
 214{
 215        unsigned long old;
 216
 217        if (pte_young(*ptep))
 218                return 0;
 219        old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
 220        return (old & _PAGE_ACCESSED) != 0;
 221}
 222#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
 223#define ptep_test_and_clear_young(__vma, __addr, __ptep)                   \
 224({                                                                         \
 225        int __r;                                                           \
 226        __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \
 227        __r;                                                               \
 228})
 229
 230#define __HAVE_ARCH_PTEP_SET_WRPROTECT
 231static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
 232                                      pte_t *ptep)
 233{
 234
 235        if ((pte_val(*ptep) & _PAGE_RW) == 0)
 236                return;
 237
 238        pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
 239}
 240
 241#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
 242static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
 243                                           unsigned long addr, pte_t *ptep)
 244{
 245        if ((pte_val(*ptep) & _PAGE_RW) == 0)
 246                return;
 247
 248        pte_update(mm, addr, ptep, _PAGE_RW, 0, 1);
 249}
 250
 251#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
 252#define ptep_clear_flush_young(__vma, __address, __ptep)                \
 253({                                                                      \
 254        int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \
 255                                                  __ptep);              \
 256        __young;                                                        \
 257})
 258
 259#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
 260static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
 261                                       unsigned long addr, pte_t *ptep)
 262{
 263        unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
 264        return __pte(old);
 265}
 266
 267static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
 268                             pte_t * ptep)
 269{
 270        pte_update(mm, addr, ptep, ~0UL, 0, 0);
 271}
 272
 273
 274/* Set the dirty and/or accessed bits atomically in a linux PTE */
 275static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
 276                                           pte_t *ptep, pte_t entry,
 277                                           unsigned long address,
 278                                           int psize)
 279{
 280        unsigned long bits = pte_val(entry) &
 281                (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
 282
 283        unsigned long old = pte_val(*ptep);
 284        *ptep = __pte(old | bits);
 285
 286        flush_tlb_page(vma, address);
 287}
 288
 289#define __HAVE_ARCH_PTE_SAME
 290#define pte_same(A,B)   ((pte_val(A) ^ pte_val(B)) == 0)
 291
 292#define pte_ERROR(e) \
 293        pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
 294#define pmd_ERROR(e) \
 295        pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
 296#define pgd_ERROR(e) \
 297        pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
 298
 299/* Encode and de-code a swap entry */
 300#define MAX_SWAPFILES_CHECK() do { \
 301        BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS); \
 302        } while (0)
 303
 304#define SWP_TYPE_BITS 5
 305#define __swp_type(x)           (((x).val >> _PAGE_BIT_SWAP_TYPE) \
 306                                & ((1UL << SWP_TYPE_BITS) - 1))
 307#define __swp_offset(x)         ((x).val >> PTE_RPN_SHIFT)
 308#define __swp_entry(type, offset)       ((swp_entry_t) { \
 309                                        ((type) << _PAGE_BIT_SWAP_TYPE) \
 310                                        | ((offset) << PTE_RPN_SHIFT) })
 311
 312#define __pte_to_swp_entry(pte)         ((swp_entry_t) { pte_val((pte)) })
 313#define __swp_entry_to_pte(x)           __pte((x).val)
 314
 315int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot);
 316extern int __meminit vmemmap_create_mapping(unsigned long start,
 317                                            unsigned long page_size,
 318                                            unsigned long phys);
 319extern void vmemmap_remove_mapping(unsigned long start,
 320                                   unsigned long page_size);
 321#endif /* __ASSEMBLY__ */
 322
 323#endif /* _ASM_POWERPC_NOHASH_64_PGTABLE_H */
 324