linux/arch/powerpc/include/asm/book3s/64/mmu.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_POWERPC_BOOK3S_64_MMU_H_
   3#define _ASM_POWERPC_BOOK3S_64_MMU_H_
   4
   5#ifndef __ASSEMBLY__
   6/*
   7 * Page size definition
   8 *
   9 *    shift : is the "PAGE_SHIFT" value for that page size
  10 *    sllp  : is a bit mask with the value of SLB L || LP to be or'ed
  11 *            directly to a slbmte "vsid" value
  12 *    penc  : is the HPTE encoding mask for the "LP" field:
  13 *
  14 */
  15struct mmu_psize_def {
  16        unsigned int    shift;  /* number of bits */
  17        int             penc[MMU_PAGE_COUNT];   /* HPTE encoding */
  18        unsigned int    tlbiel; /* tlbiel supported for that page size */
  19        unsigned long   avpnm;  /* bits to mask out in AVPN in the HPTE */
  20        union {
  21                unsigned long   sllp;   /* SLB L||LP (exact mask to use in slbmte) */
  22                unsigned long ap;       /* Ap encoding used by PowerISA 3.0 */
  23        };
  24};
  25extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
  26
  27#endif /* __ASSEMBLY__ */
  28
  29/* 64-bit classic hash table MMU */
  30#include <asm/book3s/64/mmu-hash.h>
  31
  32#ifndef __ASSEMBLY__
  33/*
  34 * ISA 3.0 partition and process table entry format
  35 */
  36struct prtb_entry {
  37        __be64 prtb0;
  38        __be64 prtb1;
  39};
  40extern struct prtb_entry *process_tb;
  41
  42struct patb_entry {
  43        __be64 patb0;
  44        __be64 patb1;
  45};
  46extern struct patb_entry *partition_tb;
  47
  48/* Bits in patb0 field */
  49#define PATB_HR         (1UL << 63)
  50#define RPDB_MASK       0x0fffffffffffff00UL
  51#define RPDB_SHIFT      (1UL << 8)
  52#define RTS1_SHIFT      61              /* top 2 bits of radix tree size */
  53#define RTS1_MASK       (3UL << RTS1_SHIFT)
  54#define RTS2_SHIFT      5               /* bottom 3 bits of radix tree size */
  55#define RTS2_MASK       (7UL << RTS2_SHIFT)
  56#define RPDS_MASK       0x1f            /* root page dir. size field */
  57
  58/* Bits in patb1 field */
  59#define PATB_GR         (1UL << 63)     /* guest uses radix; must match HR */
  60#define PRTS_MASK       0x1f            /* process table size field */
  61#define PRTB_MASK       0x0ffffffffffff000UL
  62
  63/* Number of supported PID bits */
  64extern unsigned int mmu_pid_bits;
  65
  66/* Base PID to allocate from */
  67extern unsigned int mmu_base_pid;
  68
  69/*
  70 * memory block size used with radix translation.
  71 */
  72extern unsigned long __ro_after_init radix_mem_block_size;
  73
  74#define PRTB_SIZE_SHIFT (mmu_pid_bits + 4)
  75#define PRTB_ENTRIES    (1ul << mmu_pid_bits)
  76
  77/*
  78 * Power9 currently only support 64K partition table size.
  79 */
  80#define PATB_SIZE_SHIFT 16
  81
  82typedef unsigned long mm_context_id_t;
  83struct spinlock;
  84
  85/* Maximum possible number of NPUs in a system. */
  86#define NV_MAX_NPUS 8
  87
  88/*
  89 * One bit per slice. We have lower slices which cover 256MB segments
  90 * upto 4G range. That gets us 16 low slices. For the rest we track slices
  91 * in 1TB size.
  92 */
  93struct slice_mask {
  94        u64 low_slices;
  95        DECLARE_BITMAP(high_slices, SLICE_NUM_HIGH);
  96};
  97
  98typedef struct {
  99        union {
 100                /*
 101                 * We use id as the PIDR content for radix. On hash we can use
 102                 * more than one id. The extended ids are used when we start
 103                 * having address above 512TB. We allocate one extended id
 104                 * for each 512TB. The new id is then used with the 49 bit
 105                 * EA to build a new VA. We always use ESID_BITS_1T_MASK bits
 106                 * from EA and new context ids to build the new VAs.
 107                 */
 108                mm_context_id_t id;
 109                mm_context_id_t extended_id[TASK_SIZE_USER64/TASK_CONTEXT_SIZE];
 110        };
 111        u16 user_psize;         /* page size index */
 112
 113        /* Number of bits in the mm_cpumask */
 114        atomic_t active_cpus;
 115
 116        /* Number of users of the external (Nest) MMU */
 117        atomic_t copros;
 118
 119        /* NPU NMMU context */
 120        struct npu_context *npu_context;
 121
 122#ifdef CONFIG_PPC_MM_SLICES
 123         /* SLB page size encodings*/
 124        unsigned char low_slices_psize[BITS_PER_LONG / BITS_PER_BYTE];
 125        unsigned char high_slices_psize[SLICE_ARRAY_SIZE];
 126        unsigned long slb_addr_limit;
 127# ifdef CONFIG_PPC_64K_PAGES
 128        struct slice_mask mask_64k;
 129# endif
 130        struct slice_mask mask_4k;
 131# ifdef CONFIG_HUGETLB_PAGE
 132        struct slice_mask mask_16m;
 133        struct slice_mask mask_16g;
 134# endif
 135#else
 136        u16 sllp;               /* SLB page size encoding */
 137#endif
 138        unsigned long vdso_base;
 139#ifdef CONFIG_PPC_SUBPAGE_PROT
 140        struct subpage_prot_table spt;
 141#endif /* CONFIG_PPC_SUBPAGE_PROT */
 142        /*
 143         * pagetable fragment support
 144         */
 145        void *pte_frag;
 146        void *pmd_frag;
 147#ifdef CONFIG_SPAPR_TCE_IOMMU
 148        struct list_head iommu_group_mem_list;
 149#endif
 150
 151#ifdef CONFIG_PPC_MEM_KEYS
 152        /*
 153         * Each bit represents one protection key.
 154         * bit set   -> key allocated
 155         * bit unset -> key available for allocation
 156         */
 157        u32 pkey_allocation_map;
 158        s16 execute_only_pkey; /* key holding execute-only protection */
 159#endif
 160} mm_context_t;
 161
 162/*
 163 * The current system page and segment sizes
 164 */
 165extern int mmu_linear_psize;
 166extern int mmu_virtual_psize;
 167extern int mmu_vmalloc_psize;
 168extern int mmu_vmemmap_psize;
 169extern int mmu_io_psize;
 170
 171/* MMU initialization */
 172void mmu_early_init_devtree(void);
 173void hash__early_init_devtree(void);
 174void radix__early_init_devtree(void);
 175extern void hash__early_init_mmu(void);
 176extern void radix__early_init_mmu(void);
 177static inline void early_init_mmu(void)
 178{
 179        if (radix_enabled())
 180                return radix__early_init_mmu();
 181        return hash__early_init_mmu();
 182}
 183extern void hash__early_init_mmu_secondary(void);
 184extern void radix__early_init_mmu_secondary(void);
 185static inline void early_init_mmu_secondary(void)
 186{
 187        if (radix_enabled())
 188                return radix__early_init_mmu_secondary();
 189        return hash__early_init_mmu_secondary();
 190}
 191
 192extern void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
 193                                         phys_addr_t first_memblock_size);
 194static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base,
 195                                              phys_addr_t first_memblock_size)
 196{
 197        /*
 198         * Hash has more strict restrictions. At this point we don't
 199         * know which translations we will pick. Hence go with hash
 200         * restrictions.
 201         */
 202        return hash__setup_initial_memory_limit(first_memblock_base,
 203                                           first_memblock_size);
 204}
 205
 206#ifdef CONFIG_PPC_PSERIES
 207extern void radix_init_pseries(void);
 208#else
 209static inline void radix_init_pseries(void) { };
 210#endif
 211
 212#ifdef CONFIG_HOTPLUG_CPU
 213#define arch_clear_mm_cpumask_cpu(cpu, mm)                              \
 214        do {                                                            \
 215                if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {            \
 216                        atomic_dec(&(mm)->context.active_cpus);         \
 217                        cpumask_clear_cpu(cpu, mm_cpumask(mm));         \
 218                }                                                       \
 219        } while (0)
 220
 221void cleanup_cpu_mmu_context(void);
 222#endif
 223
 224static inline int get_ea_context(mm_context_t *ctx, unsigned long ea)
 225{
 226        int index = ea >> MAX_EA_BITS_PER_CONTEXT;
 227
 228        if (likely(index < ARRAY_SIZE(ctx->extended_id)))
 229                return ctx->extended_id[index];
 230
 231        /* should never happen */
 232        WARN_ON(1);
 233        return 0;
 234}
 235
 236static inline unsigned long get_user_vsid(mm_context_t *ctx,
 237                                          unsigned long ea, int ssize)
 238{
 239        unsigned long context = get_ea_context(ctx, ea);
 240
 241        return get_vsid(context, ea, ssize);
 242}
 243
 244#endif /* __ASSEMBLY__ */
 245#endif /* _ASM_POWERPC_BOOK3S_64_MMU_H_ */
 246