linux/arch/powerpc/include/asm/book3s/64/hash.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_POWERPC_BOOK3S_64_HASH_H
   3#define _ASM_POWERPC_BOOK3S_64_HASH_H
   4#ifdef __KERNEL__
   5
   6/*
   7 * Common bits between 4K and 64K pages in a linux-style PTE.
   8 * Additional bits may be defined in pgtable-hash64-*.h
   9 *
  10 */
  11#define H_PTE_NONE_MASK         _PAGE_HPTEFLAGS
  12
  13#ifdef CONFIG_PPC_64K_PAGES
  14#include <asm/book3s/64/hash-64k.h>
  15#else
  16#include <asm/book3s/64/hash-4k.h>
  17#endif
  18
  19/*
  20 * Size of EA range mapped by our pagetables.
  21 */
  22#define H_PGTABLE_EADDR_SIZE    (H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE + \
  23                                 H_PUD_INDEX_SIZE + H_PGD_INDEX_SIZE + PAGE_SHIFT)
  24#define H_PGTABLE_RANGE         (ASM_CONST(1) << H_PGTABLE_EADDR_SIZE)
  25
  26/*
  27 * We store the slot details in the second half of page table.
  28 * Increase the pud level table so that hugetlb ptes can be stored
  29 * at pud level.
  30 */
  31#if defined(CONFIG_HUGETLB_PAGE) &&  defined(CONFIG_PPC_64K_PAGES)
  32#define H_PUD_CACHE_INDEX       (H_PUD_INDEX_SIZE + 1)
  33#else
  34#define H_PUD_CACHE_INDEX       (H_PUD_INDEX_SIZE)
  35#endif
  36
  37/*
  38 * Define the address range of the kernel non-linear virtual area. In contrast
  39 * to the linear mapping, this is managed using the kernel page tables and then
  40 * inserted into the hash page table to actually take effect, similarly to user
  41 * mappings.
  42 */
  43#define H_KERN_VIRT_START ASM_CONST(0xD000000000000000)
  44/*
  45 * Allow virtual mapping of one context size.
  46 * 512TB for 64K page size
  47 * 64TB for 4K page size
  48 */
  49#define H_KERN_VIRT_SIZE (1UL << MAX_EA_BITS_PER_CONTEXT)
  50/*
  51 * 8TB IO mapping size
  52 */
  53#define H_KERN_IO_SIZE ASM_CONST(0x80000000000) /* 8T */
  54
  55/*
  56 * The vmalloc space starts at the beginning of the kernel non-linear virtual
  57 * region, and occupies 504T (64K) or 56T (4K)
  58 */
  59#define H_VMALLOC_START H_KERN_VIRT_START
  60#define H_VMALLOC_SIZE (H_KERN_VIRT_SIZE - H_KERN_IO_SIZE)
  61#define H_VMALLOC_END (H_VMALLOC_START + H_VMALLOC_SIZE)
  62
  63#define H_KERN_IO_START H_VMALLOC_END
  64
  65/*
  66 * Region IDs
  67 */
  68#define REGION_SHIFT            60UL
  69#define REGION_MASK             (0xfUL << REGION_SHIFT)
  70#define REGION_ID(ea)           (((unsigned long)(ea)) >> REGION_SHIFT)
  71
  72#define VMALLOC_REGION_ID       (REGION_ID(H_VMALLOC_START))
  73#define KERNEL_REGION_ID        (REGION_ID(PAGE_OFFSET))
  74#define VMEMMAP_REGION_ID       (0xfUL) /* Server only */
  75#define USER_REGION_ID          (0UL)
  76
  77/*
  78 * Defines the address of the vmemap area, in its own region on
  79 * hash table CPUs.
  80 */
  81#define H_VMEMMAP_BASE          (VMEMMAP_REGION_ID << REGION_SHIFT)
  82
  83#ifdef CONFIG_PPC_MM_SLICES
  84#define HAVE_ARCH_UNMAPPED_AREA
  85#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
  86#endif /* CONFIG_PPC_MM_SLICES */
  87
  88
  89/* PTEIDX nibble */
  90#define _PTEIDX_SECONDARY       0x8
  91#define _PTEIDX_GROUP_IX        0x7
  92
  93#define H_PMD_BAD_BITS          (PTE_TABLE_SIZE-1)
  94#define H_PUD_BAD_BITS          (PMD_TABLE_SIZE-1)
  95
  96#ifndef __ASSEMBLY__
  97#define hash__pmd_bad(pmd)              (pmd_val(pmd) & H_PMD_BAD_BITS)
  98#define hash__pud_bad(pud)              (pud_val(pud) & H_PUD_BAD_BITS)
  99static inline int hash__pgd_bad(pgd_t pgd)
 100{
 101        return (pgd_val(pgd) == 0);
 102}
 103#ifdef CONFIG_STRICT_KERNEL_RWX
 104extern void hash__mark_rodata_ro(void);
 105extern void hash__mark_initmem_nx(void);
 106#endif
 107
 108extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
 109                            pte_t *ptep, unsigned long pte, int huge);
 110extern unsigned long htab_convert_pte_flags(unsigned long pteflags);
 111/* Atomic PTE updates */
 112static inline unsigned long hash__pte_update(struct mm_struct *mm,
 113                                         unsigned long addr,
 114                                         pte_t *ptep, unsigned long clr,
 115                                         unsigned long set,
 116                                         int huge)
 117{
 118        __be64 old_be, tmp_be;
 119        unsigned long old;
 120
 121        __asm__ __volatile__(
 122        "1:     ldarx   %0,0,%3         # pte_update\n\
 123        and.    %1,%0,%6\n\
 124        bne-    1b \n\
 125        andc    %1,%0,%4 \n\
 126        or      %1,%1,%7\n\
 127        stdcx.  %1,0,%3 \n\
 128        bne-    1b"
 129        : "=&r" (old_be), "=&r" (tmp_be), "=m" (*ptep)
 130        : "r" (ptep), "r" (cpu_to_be64(clr)), "m" (*ptep),
 131          "r" (cpu_to_be64(H_PAGE_BUSY)), "r" (cpu_to_be64(set))
 132        : "cc" );
 133        /* huge pages use the old page table lock */
 134        if (!huge)
 135                assert_pte_locked(mm, addr);
 136
 137        old = be64_to_cpu(old_be);
 138        if (old & H_PAGE_HASHPTE)
 139                hpte_need_flush(mm, addr, ptep, old, huge);
 140
 141        return old;
 142}
 143
 144/* Set the dirty and/or accessed bits atomically in a linux PTE, this
 145 * function doesn't need to flush the hash entry
 146 */
 147static inline void hash__ptep_set_access_flags(pte_t *ptep, pte_t entry)
 148{
 149        __be64 old, tmp, val, mask;
 150
 151        mask = cpu_to_be64(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_READ | _PAGE_WRITE |
 152                           _PAGE_EXEC | _PAGE_SOFT_DIRTY);
 153
 154        val = pte_raw(entry) & mask;
 155
 156        __asm__ __volatile__(
 157        "1:     ldarx   %0,0,%4\n\
 158                and.    %1,%0,%6\n\
 159                bne-    1b \n\
 160                or      %0,%3,%0\n\
 161                stdcx.  %0,0,%4\n\
 162                bne-    1b"
 163        :"=&r" (old), "=&r" (tmp), "=m" (*ptep)
 164        :"r" (val), "r" (ptep), "m" (*ptep), "r" (cpu_to_be64(H_PAGE_BUSY))
 165        :"cc");
 166}
 167
 168static inline int hash__pte_same(pte_t pte_a, pte_t pte_b)
 169{
 170        return (((pte_raw(pte_a) ^ pte_raw(pte_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0);
 171}
 172
 173static inline int hash__pte_none(pte_t pte)
 174{
 175        return (pte_val(pte) & ~H_PTE_NONE_MASK) == 0;
 176}
 177
 178unsigned long pte_get_hash_gslot(unsigned long vpn, unsigned long shift,
 179                int ssize, real_pte_t rpte, unsigned int subpg_index);
 180
 181/* This low level function performs the actual PTE insertion
 182 * Setting the PTE depends on the MMU type and other factors. It's
 183 * an horrible mess that I'm not going to try to clean up now but
 184 * I'm keeping it in one place rather than spread around
 185 */
 186static inline void hash__set_pte_at(struct mm_struct *mm, unsigned long addr,
 187                                  pte_t *ptep, pte_t pte, int percpu)
 188{
 189        /*
 190         * Anything else just stores the PTE normally. That covers all 64-bit
 191         * cases, and 32-bit non-hash with 32-bit PTEs.
 192         */
 193        *ptep = pte;
 194}
 195
 196#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 197extern void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
 198                                   pmd_t *pmdp, unsigned long old_pmd);
 199#else
 200static inline void hpte_do_hugepage_flush(struct mm_struct *mm,
 201                                          unsigned long addr, pmd_t *pmdp,
 202                                          unsigned long old_pmd)
 203{
 204        WARN(1, "%s called with THP disabled\n", __func__);
 205}
 206#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 207
 208
 209int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot);
 210extern int __meminit hash__vmemmap_create_mapping(unsigned long start,
 211                                              unsigned long page_size,
 212                                              unsigned long phys);
 213extern void hash__vmemmap_remove_mapping(unsigned long start,
 214                                     unsigned long page_size);
 215
 216int hash__create_section_mapping(unsigned long start, unsigned long end, int nid);
 217int hash__remove_section_mapping(unsigned long start, unsigned long end);
 218
 219#endif /* !__ASSEMBLY__ */
 220#endif /* __KERNEL__ */
 221#endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */
 222