linux/arch/powerpc/include/asm/book3s/64/mmu.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_POWERPC_BOOK3S_64_MMU_H_
   3#define _ASM_POWERPC_BOOK3S_64_MMU_H_
   4
   5#include <asm/page.h>
   6
   7#ifndef __ASSEMBLY__
   8/*
   9 * Page size definition
  10 *
  11 *    shift : is the "PAGE_SHIFT" value for that page size
  12 *    sllp  : is a bit mask with the value of SLB L || LP to be or'ed
  13 *            directly to a slbmte "vsid" value
  14 *    penc  : is the HPTE encoding mask for the "LP" field:
  15 *
  16 */
  17struct mmu_psize_def {
  18        unsigned int    shift;  /* number of bits */
  19        int             penc[MMU_PAGE_COUNT];   /* HPTE encoding */
  20        unsigned int    tlbiel; /* tlbiel supported for that page size */
  21        unsigned long   avpnm;  /* bits to mask out in AVPN in the HPTE */
  22        union {
  23                unsigned long   sllp;   /* SLB L||LP (exact mask to use in slbmte) */
  24                unsigned long ap;       /* Ap encoding used by PowerISA 3.0 */
  25        };
  26};
  27extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
  28#endif /* __ASSEMBLY__ */
  29
  30/*
  31 * If we store section details in page->flags we can't increase the MAX_PHYSMEM_BITS
  32 * if we increase SECTIONS_WIDTH we will not store node details in page->flags and
  33 * page_to_nid does a page->section->node lookup
  34 * Hence only increase for VMEMMAP. Further depending on SPARSEMEM_EXTREME reduce
  35 * memory requirements with large number of sections.
  36 * 51 bits is the max physical real address on POWER9
  37 */
  38#if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_SPARSEMEM_EXTREME) &&  \
  39        defined(CONFIG_PPC_64K_PAGES)
  40#define MAX_PHYSMEM_BITS 51
  41#else
  42#define MAX_PHYSMEM_BITS 46
  43#endif
  44
  45/* 64-bit classic hash table MMU */
  46#include <asm/book3s/64/mmu-hash.h>
  47
  48#ifndef __ASSEMBLY__
  49/*
  50 * ISA 3.0 partition and process table entry format
  51 */
  52struct prtb_entry {
  53        __be64 prtb0;
  54        __be64 prtb1;
  55};
  56extern struct prtb_entry *process_tb;
  57
  58struct patb_entry {
  59        __be64 patb0;
  60        __be64 patb1;
  61};
  62extern struct patb_entry *partition_tb;
  63
  64/* Bits in patb0 field */
  65#define PATB_HR         (1UL << 63)
  66#define RPDB_MASK       0x0fffffffffffff00UL
  67#define RPDB_SHIFT      (1UL << 8)
  68#define RTS1_SHIFT      61              /* top 2 bits of radix tree size */
  69#define RTS1_MASK       (3UL << RTS1_SHIFT)
  70#define RTS2_SHIFT      5               /* bottom 3 bits of radix tree size */
  71#define RTS2_MASK       (7UL << RTS2_SHIFT)
  72#define RPDS_MASK       0x1f            /* root page dir. size field */
  73
  74/* Bits in patb1 field */
  75#define PATB_GR         (1UL << 63)     /* guest uses radix; must match HR */
  76#define PRTS_MASK       0x1f            /* process table size field */
  77#define PRTB_MASK       0x0ffffffffffff000UL
  78
  79/* Number of supported PID bits */
  80extern unsigned int mmu_pid_bits;
  81
  82/* Base PID to allocate from */
  83extern unsigned int mmu_base_pid;
  84
  85#define PRTB_SIZE_SHIFT (mmu_pid_bits + 4)
  86#define PRTB_ENTRIES    (1ul << mmu_pid_bits)
  87
  88/*
  89 * Power9 currently only support 64K partition table size.
  90 */
  91#define PATB_SIZE_SHIFT 16
  92
  93typedef unsigned long mm_context_id_t;
  94struct spinlock;
  95
  96/* Maximum possible number of NPUs in a system. */
  97#define NV_MAX_NPUS 8
  98
  99typedef struct {
 100        union {
 101                /*
 102                 * We use id as the PIDR content for radix. On hash we can use
 103                 * more than one id. The extended ids are used when we start
 104                 * having address above 512TB. We allocate one extended id
 105                 * for each 512TB. The new id is then used with the 49 bit
 106                 * EA to build a new VA. We always use ESID_BITS_1T_MASK bits
 107                 * from EA and new context ids to build the new VAs.
 108                 */
 109                mm_context_id_t id;
 110                mm_context_id_t extended_id[TASK_SIZE_USER64/TASK_CONTEXT_SIZE];
 111        };
 112
 113        /* Number of bits in the mm_cpumask */
 114        atomic_t active_cpus;
 115
 116        /* Number of users of the external (Nest) MMU */
 117        atomic_t copros;
 118
 119        /* NPU NMMU context */
 120        struct npu_context *npu_context;
 121        struct hash_mm_context *hash_context;
 122
 123        unsigned long vdso_base;
 124        /*
 125         * pagetable fragment support
 126         */
 127        void *pte_frag;
 128        void *pmd_frag;
 129#ifdef CONFIG_SPAPR_TCE_IOMMU
 130        struct list_head iommu_group_mem_list;
 131#endif
 132
 133#ifdef CONFIG_PPC_MEM_KEYS
 134        /*
 135         * Each bit represents one protection key.
 136         * bit set   -> key allocated
 137         * bit unset -> key available for allocation
 138         */
 139        u32 pkey_allocation_map;
 140        s16 execute_only_pkey; /* key holding execute-only protection */
 141#endif
 142} mm_context_t;
 143
 144static inline u16 mm_ctx_user_psize(mm_context_t *ctx)
 145{
 146        return ctx->hash_context->user_psize;
 147}
 148
 149static inline void mm_ctx_set_user_psize(mm_context_t *ctx, u16 user_psize)
 150{
 151        ctx->hash_context->user_psize = user_psize;
 152}
 153
 154static inline unsigned char *mm_ctx_low_slices(mm_context_t *ctx)
 155{
 156        return ctx->hash_context->low_slices_psize;
 157}
 158
 159static inline unsigned char *mm_ctx_high_slices(mm_context_t *ctx)
 160{
 161        return ctx->hash_context->high_slices_psize;
 162}
 163
 164static inline unsigned long mm_ctx_slb_addr_limit(mm_context_t *ctx)
 165{
 166        return ctx->hash_context->slb_addr_limit;
 167}
 168
 169static inline void mm_ctx_set_slb_addr_limit(mm_context_t *ctx, unsigned long limit)
 170{
 171        ctx->hash_context->slb_addr_limit = limit;
 172}
 173
 174static inline struct slice_mask *slice_mask_for_size(mm_context_t *ctx, int psize)
 175{
 176#ifdef CONFIG_PPC_64K_PAGES
 177        if (psize == MMU_PAGE_64K)
 178                return &ctx->hash_context->mask_64k;
 179#endif
 180#ifdef CONFIG_HUGETLB_PAGE
 181        if (psize == MMU_PAGE_16M)
 182                return &ctx->hash_context->mask_16m;
 183        if (psize == MMU_PAGE_16G)
 184                return &ctx->hash_context->mask_16g;
 185#endif
 186        BUG_ON(psize != MMU_PAGE_4K);
 187
 188        return &ctx->hash_context->mask_4k;
 189}
 190
 191#ifdef CONFIG_PPC_SUBPAGE_PROT
 192static inline struct subpage_prot_table *mm_ctx_subpage_prot(mm_context_t *ctx)
 193{
 194        return ctx->hash_context->spt;
 195}
 196#endif
 197
 198/*
 199 * The current system page and segment sizes
 200 */
 201extern int mmu_linear_psize;
 202extern int mmu_virtual_psize;
 203extern int mmu_vmalloc_psize;
 204extern int mmu_vmemmap_psize;
 205extern int mmu_io_psize;
 206
 207/* MMU initialization */
 208void mmu_early_init_devtree(void);
 209void hash__early_init_devtree(void);
 210void radix__early_init_devtree(void);
 211extern void radix_init_native(void);
 212extern void hash__early_init_mmu(void);
 213extern void radix__early_init_mmu(void);
 214static inline void early_init_mmu(void)
 215{
 216        if (radix_enabled())
 217                return radix__early_init_mmu();
 218        return hash__early_init_mmu();
 219}
 220extern void hash__early_init_mmu_secondary(void);
 221extern void radix__early_init_mmu_secondary(void);
 222static inline void early_init_mmu_secondary(void)
 223{
 224        if (radix_enabled())
 225                return radix__early_init_mmu_secondary();
 226        return hash__early_init_mmu_secondary();
 227}
 228
 229extern void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
 230                                         phys_addr_t first_memblock_size);
 231extern void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
 232                                         phys_addr_t first_memblock_size);
 233static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base,
 234                                              phys_addr_t first_memblock_size)
 235{
 236        if (early_radix_enabled())
 237                return radix__setup_initial_memory_limit(first_memblock_base,
 238                                                   first_memblock_size);
 239        return hash__setup_initial_memory_limit(first_memblock_base,
 240                                           first_memblock_size);
 241}
 242
 243extern int (*register_process_table)(unsigned long base, unsigned long page_size,
 244                                     unsigned long tbl_size);
 245
 246#ifdef CONFIG_PPC_PSERIES
 247extern void radix_init_pseries(void);
 248#else
 249static inline void radix_init_pseries(void) { };
 250#endif
 251
 252static inline int get_user_context(mm_context_t *ctx, unsigned long ea)
 253{
 254        int index = ea >> MAX_EA_BITS_PER_CONTEXT;
 255
 256        if (likely(index < ARRAY_SIZE(ctx->extended_id)))
 257                return ctx->extended_id[index];
 258
 259        /* should never happen */
 260        WARN_ON(1);
 261        return 0;
 262}
 263
 264static inline unsigned long get_user_vsid(mm_context_t *ctx,
 265                                          unsigned long ea, int ssize)
 266{
 267        unsigned long context = get_user_context(ctx, ea);
 268
 269        return get_vsid(context, ea, ssize);
 270}
 271
 272#endif /* __ASSEMBLY__ */
 273#endif /* _ASM_POWERPC_BOOK3S_64_MMU_H_ */
 274