linux/arch/powerpc/include/asm/book3s/64/mmu.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_POWERPC_BOOK3S_64_MMU_H_
   3#define _ASM_POWERPC_BOOK3S_64_MMU_H_
   4
   5#include <asm/page.h>
   6
   7#ifndef __ASSEMBLY__
   8/*
   9 * Page size definition
  10 *
  11 *    shift : is the "PAGE_SHIFT" value for that page size
  12 *    sllp  : is a bit mask with the value of SLB L || LP to be or'ed
  13 *            directly to a slbmte "vsid" value
  14 *    penc  : is the HPTE encoding mask for the "LP" field:
  15 *
  16 */
  17struct mmu_psize_def {
  18        unsigned int    shift;  /* number of bits */
  19        int             penc[MMU_PAGE_COUNT];   /* HPTE encoding */
  20        unsigned int    tlbiel; /* tlbiel supported for that page size */
  21        unsigned long   avpnm;  /* bits to mask out in AVPN in the HPTE */
  22        union {
  23                unsigned long   sllp;   /* SLB L||LP (exact mask to use in slbmte) */
  24                unsigned long ap;       /* Ap encoding used by PowerISA 3.0 */
  25        };
  26};
  27extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
  28#endif /* __ASSEMBLY__ */
  29
  30/*
  31 * If we store section details in page->flags we can't increase the MAX_PHYSMEM_BITS
  32 * if we increase SECTIONS_WIDTH we will not store node details in page->flags and
  33 * page_to_nid does a page->section->node lookup
  34 * Hence only increase for VMEMMAP. Further depending on SPARSEMEM_EXTREME reduce
  35 * memory requirements with large number of sections.
  36 * 51 bits is the max physical real address on POWER9
  37 */
  38#if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_SPARSEMEM_EXTREME) &&  \
  39        defined(CONFIG_PPC_64K_PAGES)
  40#define MAX_PHYSMEM_BITS 51
  41#else
  42#define MAX_PHYSMEM_BITS 46
  43#endif
  44
  45/* 64-bit classic hash table MMU */
  46#include <asm/book3s/64/mmu-hash.h>
  47
  48#ifndef __ASSEMBLY__
  49/*
  50 * ISA 3.0 partition and process table entry format
  51 */
  52struct prtb_entry {
  53        __be64 prtb0;
  54        __be64 prtb1;
  55};
  56extern struct prtb_entry *process_tb;
  57
  58struct patb_entry {
  59        __be64 patb0;
  60        __be64 patb1;
  61};
  62extern struct patb_entry *partition_tb;
  63
  64/* Bits in patb0 field */
  65#define PATB_HR         (1UL << 63)
  66#define RPDB_MASK       0x0fffffffffffff00UL
  67#define RPDB_SHIFT      (1UL << 8)
  68#define RTS1_SHIFT      61              /* top 2 bits of radix tree size */
  69#define RTS1_MASK       (3UL << RTS1_SHIFT)
  70#define RTS2_SHIFT      5               /* bottom 3 bits of radix tree size */
  71#define RTS2_MASK       (7UL << RTS2_SHIFT)
  72#define RPDS_MASK       0x1f            /* root page dir. size field */
  73
  74/* Bits in patb1 field */
  75#define PATB_GR         (1UL << 63)     /* guest uses radix; must match HR */
  76#define PRTS_MASK       0x1f            /* process table size field */
  77#define PRTB_MASK       0x0ffffffffffff000UL
  78
  79/* Number of supported PID bits */
  80extern unsigned int mmu_pid_bits;
  81
  82/* Base PID to allocate from */
  83extern unsigned int mmu_base_pid;
  84
  85#define PRTB_SIZE_SHIFT (mmu_pid_bits + 4)
  86#define PRTB_ENTRIES    (1ul << mmu_pid_bits)
  87
  88/*
  89 * Power9 currently only support 64K partition table size.
  90 */
  91#define PATB_SIZE_SHIFT 16
  92
  93typedef unsigned long mm_context_id_t;
  94struct spinlock;
  95
  96/* Maximum possible number of NPUs in a system. */
  97#define NV_MAX_NPUS 8
  98
  99typedef struct {
 100        union {
 101                /*
 102                 * We use id as the PIDR content for radix. On hash we can use
 103                 * more than one id. The extended ids are used when we start
 104                 * having address above 512TB. We allocate one extended id
 105                 * for each 512TB. The new id is then used with the 49 bit
 106                 * EA to build a new VA. We always use ESID_BITS_1T_MASK bits
 107                 * from EA and new context ids to build the new VAs.
 108                 */
 109                mm_context_id_t id;
 110                mm_context_id_t extended_id[TASK_SIZE_USER64/TASK_CONTEXT_SIZE];
 111        };
 112
 113        /* Number of bits in the mm_cpumask */
 114        atomic_t active_cpus;
 115
 116        /* Number of users of the external (Nest) MMU */
 117        atomic_t copros;
 118
 119        struct hash_mm_context *hash_context;
 120
 121        unsigned long vdso_base;
 122        /*
 123         * pagetable fragment support
 124         */
 125        void *pte_frag;
 126        void *pmd_frag;
 127#ifdef CONFIG_SPAPR_TCE_IOMMU
 128        struct list_head iommu_group_mem_list;
 129#endif
 130
 131#ifdef CONFIG_PPC_MEM_KEYS
 132        /*
 133         * Each bit represents one protection key.
 134         * bit set   -> key allocated
 135         * bit unset -> key available for allocation
 136         */
 137        u32 pkey_allocation_map;
 138        s16 execute_only_pkey; /* key holding execute-only protection */
 139#endif
 140} mm_context_t;
 141
 142static inline u16 mm_ctx_user_psize(mm_context_t *ctx)
 143{
 144        return ctx->hash_context->user_psize;
 145}
 146
 147static inline void mm_ctx_set_user_psize(mm_context_t *ctx, u16 user_psize)
 148{
 149        ctx->hash_context->user_psize = user_psize;
 150}
 151
 152static inline unsigned char *mm_ctx_low_slices(mm_context_t *ctx)
 153{
 154        return ctx->hash_context->low_slices_psize;
 155}
 156
 157static inline unsigned char *mm_ctx_high_slices(mm_context_t *ctx)
 158{
 159        return ctx->hash_context->high_slices_psize;
 160}
 161
 162static inline unsigned long mm_ctx_slb_addr_limit(mm_context_t *ctx)
 163{
 164        return ctx->hash_context->slb_addr_limit;
 165}
 166
 167static inline void mm_ctx_set_slb_addr_limit(mm_context_t *ctx, unsigned long limit)
 168{
 169        ctx->hash_context->slb_addr_limit = limit;
 170}
 171
 172static inline struct slice_mask *slice_mask_for_size(mm_context_t *ctx, int psize)
 173{
 174#ifdef CONFIG_PPC_64K_PAGES
 175        if (psize == MMU_PAGE_64K)
 176                return &ctx->hash_context->mask_64k;
 177#endif
 178#ifdef CONFIG_HUGETLB_PAGE
 179        if (psize == MMU_PAGE_16M)
 180                return &ctx->hash_context->mask_16m;
 181        if (psize == MMU_PAGE_16G)
 182                return &ctx->hash_context->mask_16g;
 183#endif
 184        BUG_ON(psize != MMU_PAGE_4K);
 185
 186        return &ctx->hash_context->mask_4k;
 187}
 188
 189#ifdef CONFIG_PPC_SUBPAGE_PROT
 190static inline struct subpage_prot_table *mm_ctx_subpage_prot(mm_context_t *ctx)
 191{
 192        return ctx->hash_context->spt;
 193}
 194#endif
 195
 196/*
 197 * The current system page and segment sizes
 198 */
 199extern int mmu_linear_psize;
 200extern int mmu_virtual_psize;
 201extern int mmu_vmalloc_psize;
 202extern int mmu_vmemmap_psize;
 203extern int mmu_io_psize;
 204
 205/* MMU initialization */
 206void mmu_early_init_devtree(void);
 207void hash__early_init_devtree(void);
 208void radix__early_init_devtree(void);
 209extern void radix_init_native(void);
 210extern void hash__early_init_mmu(void);
 211extern void radix__early_init_mmu(void);
 212static inline void early_init_mmu(void)
 213{
 214        if (radix_enabled())
 215                return radix__early_init_mmu();
 216        return hash__early_init_mmu();
 217}
 218extern void hash__early_init_mmu_secondary(void);
 219extern void radix__early_init_mmu_secondary(void);
 220static inline void early_init_mmu_secondary(void)
 221{
 222        if (radix_enabled())
 223                return radix__early_init_mmu_secondary();
 224        return hash__early_init_mmu_secondary();
 225}
 226
 227extern void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
 228                                         phys_addr_t first_memblock_size);
 229extern void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
 230                                         phys_addr_t first_memblock_size);
 231static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base,
 232                                              phys_addr_t first_memblock_size)
 233{
 234        if (early_radix_enabled())
 235                return radix__setup_initial_memory_limit(first_memblock_base,
 236                                                   first_memblock_size);
 237        return hash__setup_initial_memory_limit(first_memblock_base,
 238                                           first_memblock_size);
 239}
 240
 241extern int (*register_process_table)(unsigned long base, unsigned long page_size,
 242                                     unsigned long tbl_size);
 243
 244#ifdef CONFIG_PPC_PSERIES
 245extern void radix_init_pseries(void);
 246#else
 247static inline void radix_init_pseries(void) { };
 248#endif
 249
 250static inline int get_user_context(mm_context_t *ctx, unsigned long ea)
 251{
 252        int index = ea >> MAX_EA_BITS_PER_CONTEXT;
 253
 254        if (likely(index < ARRAY_SIZE(ctx->extended_id)))
 255                return ctx->extended_id[index];
 256
 257        /* should never happen */
 258        WARN_ON(1);
 259        return 0;
 260}
 261
 262static inline unsigned long get_user_vsid(mm_context_t *ctx,
 263                                          unsigned long ea, int ssize)
 264{
 265        unsigned long context = get_user_context(ctx, ea);
 266
 267        return get_vsid(context, ea, ssize);
 268}
 269
 270#endif /* __ASSEMBLY__ */
 271#endif /* _ASM_POWERPC_BOOK3S_64_MMU_H_ */
 272