linux/arch/powerpc/include/asm/book3s/64/hash-64k.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_POWERPC_BOOK3S_64_HASH_64K_H
   3#define _ASM_POWERPC_BOOK3S_64_HASH_64K_H
   4
   5#define H_PTE_INDEX_SIZE   8  // size: 8B <<  8 = 2KB, maps 2^8  x 64KB = 16MB
   6#define H_PMD_INDEX_SIZE  10  // size: 8B << 10 = 8KB, maps 2^10 x 16MB = 16GB
   7#define H_PUD_INDEX_SIZE  10  // size: 8B << 10 = 8KB, maps 2^10 x 16GB = 16TB
   8#define H_PGD_INDEX_SIZE   8  // size: 8B <<  8 = 2KB, maps 2^8  x 16TB =  4PB
   9
  10/*
  11 * If we store section details in page->flags we can't increase the MAX_PHYSMEM_BITS
  12 * if we increase SECTIONS_WIDTH we will not store node details in page->flags and
  13 * page_to_nid does a page->section->node lookup
  14 * Hence only increase for VMEMMAP. Further depending on SPARSEMEM_EXTREME reduce
  15 * memory requirements with large number of sections.
  16 * 51 bits is the max physical real address on POWER9
  17 */
  18#if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_SPARSEMEM_EXTREME)
  19#define H_MAX_PHYSMEM_BITS      51
  20#else
  21#define H_MAX_PHYSMEM_BITS      46
  22#endif
  23
  24/*
  25 * Each context is 512TB size. SLB miss for first context/default context
  26 * is handled in the hotpath.
  27 */
  28#define MAX_EA_BITS_PER_CONTEXT         49
  29#define REGION_SHIFT            MAX_EA_BITS_PER_CONTEXT
  30
  31/*
  32 * We use one context for each MAP area.
  33 */
  34#define H_KERN_MAP_SIZE         (1UL << MAX_EA_BITS_PER_CONTEXT)
  35
  36/*
  37 * Define the address range of the kernel non-linear virtual area
  38 * 2PB
  39 */
  40#define H_KERN_VIRT_START       ASM_CONST(0xc008000000000000)
  41
  42/*
  43 * 64k aligned address free up few of the lower bits of RPN for us
  44 * We steal that here. For more deatils look at pte_pfn/pfn_pte()
  45 */
  46#define H_PAGE_COMBO    _RPAGE_RPN0 /* this is a combo 4k page */
  47#define H_PAGE_4K_PFN   _RPAGE_RPN1 /* PFN is for a single 4k page */
  48#define H_PAGE_BUSY     _RPAGE_RSV1     /* software: PTE & hash are busy */
  49#define H_PAGE_HASHPTE  _RPAGE_RPN43    /* PTE has associated HPTE */
  50
  51/* memory key bits. */
  52#define H_PTE_PKEY_BIT4         _RPAGE_PKEY_BIT4
  53#define H_PTE_PKEY_BIT3         _RPAGE_PKEY_BIT3
  54#define H_PTE_PKEY_BIT2         _RPAGE_PKEY_BIT2
  55#define H_PTE_PKEY_BIT1         _RPAGE_PKEY_BIT1
  56#define H_PTE_PKEY_BIT0         _RPAGE_PKEY_BIT0
  57
  58/*
  59 * We need to differentiate between explicit huge page and THP huge
  60 * page, since THP huge page also need to track real subpage details
  61 */
  62#define H_PAGE_THP_HUGE  H_PAGE_4K_PFN
  63
  64/* PTE flags to conserve for HPTE identification */
  65#define _PAGE_HPTEFLAGS (H_PAGE_BUSY | H_PAGE_HASHPTE | H_PAGE_COMBO)
  66/*
  67 * We use a 2K PTE page fragment and another 2K for storing
  68 * real_pte_t hash index
  69 * 8 bytes per each pte entry and another 8 bytes for storing
  70 * slot details.
  71 */
  72#define H_PTE_FRAG_SIZE_SHIFT  (H_PTE_INDEX_SIZE + 3 + 1)
  73#define H_PTE_FRAG_NR   (PAGE_SIZE >> H_PTE_FRAG_SIZE_SHIFT)
  74
  75#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
  76#define H_PMD_FRAG_SIZE_SHIFT  (H_PMD_INDEX_SIZE + 3 + 1)
  77#else
  78#define H_PMD_FRAG_SIZE_SHIFT  (H_PMD_INDEX_SIZE + 3)
  79#endif
  80#define H_PMD_FRAG_NR   (PAGE_SIZE >> H_PMD_FRAG_SIZE_SHIFT)
  81
  82#ifndef __ASSEMBLY__
  83#include <asm/errno.h>
  84
  85/*
  86 * With 64K pages on hash table, we have a special PTE format that
  87 * uses a second "half" of the page table to encode sub-page information
  88 * in order to deal with 64K made of 4K HW pages. Thus we override the
  89 * generic accessors and iterators here
  90 */
  91#define __real_pte __real_pte
  92static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep, int offset)
  93{
  94        real_pte_t rpte;
  95        unsigned long *hidxp;
  96
  97        rpte.pte = pte;
  98
  99        /*
 100         * Ensure that we do not read the hidx before we read the PTE. Because
 101         * the writer side is expected to finish writing the hidx first followed
 102         * by the PTE, by using smp_wmb(). pte_set_hash_slot() ensures that.
 103         */
 104        smp_rmb();
 105
 106        hidxp = (unsigned long *)(ptep + offset);
 107        rpte.hidx = *hidxp;
 108        return rpte;
 109}
 110
 111/*
 112 * shift the hidx representation by one-modulo-0xf; i.e hidx 0 is respresented
 113 * as 1, 1 as 2,... , and 0xf as 0.  This convention lets us represent a
 114 * invalid hidx 0xf with a 0x0 bit value. PTEs are anyway zero'd when
 115 * allocated. We dont have to zero them gain; thus save on the initialization.
 116 */
 117#define HIDX_UNSHIFT_BY_ONE(x) ((x + 0xfUL) & 0xfUL) /* shift backward by one */
 118#define HIDX_SHIFT_BY_ONE(x) ((x + 0x1UL) & 0xfUL)   /* shift forward by one */
 119#define HIDX_BITS(x, index)  (x << (index << 2))
 120#define BITS_TO_HIDX(x, index)  ((x >> (index << 2)) & 0xfUL)
 121#define INVALID_RPTE_HIDX  0x0UL
 122
 123static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index)
 124{
 125        return HIDX_UNSHIFT_BY_ONE(BITS_TO_HIDX(rpte.hidx, index));
 126}
 127
 128/*
 129 * Commit the hidx and return PTE bits that needs to be modified. The caller is
 130 * expected to modify the PTE bits accordingly and commit the PTE to memory.
 131 */
 132static inline unsigned long pte_set_hidx(pte_t *ptep, real_pte_t rpte,
 133                                         unsigned int subpg_index,
 134                                         unsigned long hidx, int offset)
 135{
 136        unsigned long *hidxp = (unsigned long *)(ptep + offset);
 137
 138        rpte.hidx &= ~HIDX_BITS(0xfUL, subpg_index);
 139        *hidxp = rpte.hidx  | HIDX_BITS(HIDX_SHIFT_BY_ONE(hidx), subpg_index);
 140
 141        /*
 142         * Anyone reading PTE must ensure hidx bits are read after reading the
 143         * PTE by using the read-side barrier smp_rmb(). __real_pte() can be
 144         * used for that.
 145         */
 146        smp_wmb();
 147
 148        /* No PTE bits to be modified, return 0x0UL */
 149        return 0x0UL;
 150}
 151
 152#define __rpte_to_pte(r)        ((r).pte)
 153extern bool __rpte_sub_valid(real_pte_t rpte, unsigned long index);
 154/*
 155 * Trick: we set __end to va + 64k, which happens works for
 156 * a 16M page as well as we want only one iteration
 157 */
 158#define pte_iterate_hashed_subpages(rpte, psize, vpn, index, shift)     \
 159        do {                                                            \
 160                unsigned long __end = vpn + (1UL << (PAGE_SHIFT - VPN_SHIFT));  \
 161                unsigned __split = (psize == MMU_PAGE_4K ||             \
 162                                    psize == MMU_PAGE_64K_AP);          \
 163                shift = mmu_psize_defs[psize].shift;                    \
 164                for (index = 0; vpn < __end; index++,                   \
 165                             vpn += (1L << (shift - VPN_SHIFT))) {      \
 166                if (!__split || __rpte_sub_valid(rpte, index))
 167
 168#define pte_iterate_hashed_end()  } } while(0)
 169
 170#define pte_pagesize_index(mm, addr, pte)       \
 171        (((pte) & H_PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K)
 172
 173extern int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
 174                           unsigned long pfn, unsigned long size, pgprot_t);
 175static inline int hash__remap_4k_pfn(struct vm_area_struct *vma, unsigned long addr,
 176                                 unsigned long pfn, pgprot_t prot)
 177{
 178        if (pfn > (PTE_RPN_MASK >> PAGE_SHIFT)) {
 179                WARN(1, "remap_4k_pfn called with wrong pfn value\n");
 180                return -EINVAL;
 181        }
 182        return remap_pfn_range(vma, addr, pfn, PAGE_SIZE,
 183                               __pgprot(pgprot_val(prot) | H_PAGE_4K_PFN));
 184}
 185
 186#define H_PTE_TABLE_SIZE        PTE_FRAG_SIZE
 187#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined (CONFIG_HUGETLB_PAGE)
 188#define H_PMD_TABLE_SIZE        ((sizeof(pmd_t) << PMD_INDEX_SIZE) + \
 189                                 (sizeof(unsigned long) << PMD_INDEX_SIZE))
 190#else
 191#define H_PMD_TABLE_SIZE        (sizeof(pmd_t) << PMD_INDEX_SIZE)
 192#endif
 193#ifdef CONFIG_HUGETLB_PAGE
 194#define H_PUD_TABLE_SIZE        ((sizeof(pud_t) << PUD_INDEX_SIZE) +    \
 195                                 (sizeof(unsigned long) << PUD_INDEX_SIZE))
 196#else
 197#define H_PUD_TABLE_SIZE        (sizeof(pud_t) << PUD_INDEX_SIZE)
 198#endif
 199#define H_PGD_TABLE_SIZE        (sizeof(pgd_t) << PGD_INDEX_SIZE)
 200
 201#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 202static inline char *get_hpte_slot_array(pmd_t *pmdp)
 203{
 204        /*
 205         * The hpte hindex is stored in the pgtable whose address is in the
 206         * second half of the PMD
 207         *
 208         * Order this load with the test for pmd_trans_huge in the caller
 209         */
 210        smp_rmb();
 211        return *(char **)(pmdp + PTRS_PER_PMD);
 212
 213
 214}
 215/*
 216 * The linux hugepage PMD now include the pmd entries followed by the address
 217 * to the stashed pgtable_t. The stashed pgtable_t contains the hpte bits.
 218 * [ 000 | 1 bit secondary | 3 bit hidx | 1 bit valid]. We use one byte per
 219 * each HPTE entry. With 16MB hugepage and 64K HPTE we need 256 entries and
 220 * with 4K HPTE we need 4096 entries. Both will fit in a 4K pgtable_t.
 221 *
 222 * The top three bits are intentionally left as zero. This memory location
 223 * are also used as normal page PTE pointers. So if we have any pointers
 224 * left around while we collapse a hugepage, we need to make sure
 225 * _PAGE_PRESENT bit of that is zero when we look at them
 226 */
 227static inline unsigned int hpte_valid(unsigned char *hpte_slot_array, int index)
 228{
 229        return hpte_slot_array[index] & 0x1;
 230}
 231
 232static inline unsigned int hpte_hash_index(unsigned char *hpte_slot_array,
 233                                           int index)
 234{
 235        return hpte_slot_array[index] >> 1;
 236}
 237
 238static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array,
 239                                        unsigned int index, unsigned int hidx)
 240{
 241        hpte_slot_array[index] = (hidx << 1) | 0x1;
 242}
 243
 244/*
 245 *
 246 * For core kernel code by design pmd_trans_huge is never run on any hugetlbfs
 247 * page. The hugetlbfs page table walking and mangling paths are totally
 248 * separated form the core VM paths and they're differentiated by
 249 *  VM_HUGETLB being set on vm_flags well before any pmd_trans_huge could run.
 250 *
 251 * pmd_trans_huge() is defined as false at build time if
 252 * CONFIG_TRANSPARENT_HUGEPAGE=n to optimize away code blocks at build
 253 * time in such case.
 254 *
 255 * For ppc64 we need to differntiate from explicit hugepages from THP, because
 256 * for THP we also track the subpage details at the pmd level. We don't do
 257 * that for explicit huge pages.
 258 *
 259 */
 260static inline int hash__pmd_trans_huge(pmd_t pmd)
 261{
 262        return !!((pmd_val(pmd) & (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP)) ==
 263                  (_PAGE_PTE | H_PAGE_THP_HUGE));
 264}
 265
 266static inline int hash__pmd_same(pmd_t pmd_a, pmd_t pmd_b)
 267{
 268        return (((pmd_raw(pmd_a) ^ pmd_raw(pmd_b)) & ~cpu_to_be64(_PAGE_HPTEFLAGS)) == 0);
 269}
 270
 271static inline pmd_t hash__pmd_mkhuge(pmd_t pmd)
 272{
 273        return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE));
 274}
 275
 276extern unsigned long hash__pmd_hugepage_update(struct mm_struct *mm,
 277                                           unsigned long addr, pmd_t *pmdp,
 278                                           unsigned long clr, unsigned long set);
 279extern pmd_t hash__pmdp_collapse_flush(struct vm_area_struct *vma,
 280                                   unsigned long address, pmd_t *pmdp);
 281extern void hash__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
 282                                         pgtable_t pgtable);
 283extern pgtable_t hash__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
 284extern pmd_t hash__pmdp_huge_get_and_clear(struct mm_struct *mm,
 285                                       unsigned long addr, pmd_t *pmdp);
 286extern int hash__has_transparent_hugepage(void);
 287#endif /*  CONFIG_TRANSPARENT_HUGEPAGE */
 288
 289static inline pmd_t hash__pmd_mkdevmap(pmd_t pmd)
 290{
 291        return __pmd(pmd_val(pmd) | (_PAGE_PTE | H_PAGE_THP_HUGE | _PAGE_DEVMAP));
 292}
 293
 294#endif  /* __ASSEMBLY__ */
 295
 296#endif /* _ASM_POWERPC_BOOK3S_64_HASH_64K_H */
 297