1#ifndef _ASM_POWERPC_MMU_H_ 2#define _ASM_POWERPC_MMU_H_ 3#ifdef __KERNEL__ 4 5#include <linux/types.h> 6 7#include <asm/asm-compat.h> 8#include <asm/feature-fixups.h> 9 10/* 11 * MMU features bit definitions 12 */ 13 14/* 15 * MMU families 16 */ 17#define MMU_FTR_HPTE_TABLE ASM_CONST(0x00000001) 18#define MMU_FTR_TYPE_8xx ASM_CONST(0x00000002) 19#define MMU_FTR_TYPE_40x ASM_CONST(0x00000004) 20#define MMU_FTR_TYPE_44x ASM_CONST(0x00000008) 21#define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010) 22#define MMU_FTR_TYPE_47x ASM_CONST(0x00000020) 23 24/* Radix page table supported and enabled */ 25#define MMU_FTR_TYPE_RADIX ASM_CONST(0x00000040) 26 27/* 28 * Individual features below. 29 */ 30 31/* 32 * Support for 68 bit VA space. We added that from ISA 2.05 33 */ 34#define MMU_FTR_68_BIT_VA ASM_CONST(0x00002000) 35/* 36 * Kernel read only support. 37 * We added the ppp value 0b110 in ISA 2.04. 38 */ 39#define MMU_FTR_KERNEL_RO ASM_CONST(0x00004000) 40 41/* 42 * We need to clear top 16bits of va (from the remaining 64 bits )in 43 * tlbie* instructions 44 */ 45#define MMU_FTR_TLBIE_CROP_VA ASM_CONST(0x00008000) 46 47/* Enable use of high BAT registers */ 48#define MMU_FTR_USE_HIGH_BATS ASM_CONST(0x00010000) 49 50/* Enable >32-bit physical addresses on 32-bit processor, only used 51 * by CONFIG_6xx currently as BookE supports that from day 1 52 */ 53#define MMU_FTR_BIG_PHYS ASM_CONST(0x00020000) 54 55/* Enable use of broadcast TLB invalidations. We don't always set it 56 * on processors that support it due to other constraints with the 57 * use of such invalidations 58 */ 59#define MMU_FTR_USE_TLBIVAX_BCAST ASM_CONST(0x00040000) 60 61/* Enable use of tlbilx invalidate instructions. 62 */ 63#define MMU_FTR_USE_TLBILX ASM_CONST(0x00080000) 64 65/* This indicates that the processor cannot handle multiple outstanding 66 * broadcast tlbivax or tlbsync. This makes the code use a spinlock 67 * around such invalidate forms. 68 */ 69#define MMU_FTR_LOCK_BCAST_INVAL ASM_CONST(0x00100000) 70 71/* This indicates that the processor doesn't handle way selection 72 * properly and needs SW to track and update the LRU state. This 73 * is specific to an errata on e300c2/c3/c4 class parts 74 */ 75#define MMU_FTR_NEED_DTLB_SW_LRU ASM_CONST(0x00200000) 76 77/* Enable use of TLB reservation. Processor should support tlbsrx. 78 * instruction and MAS0[WQ]. 79 */ 80#define MMU_FTR_USE_TLBRSRV ASM_CONST(0x00800000) 81 82/* Use paired MAS registers (MAS7||MAS3, etc.) 83 */ 84#define MMU_FTR_USE_PAIRED_MAS ASM_CONST(0x01000000) 85 86/* Doesn't support the B bit (1T segment) in SLBIE 87 */ 88#define MMU_FTR_NO_SLBIE_B ASM_CONST(0x02000000) 89 90/* Support 16M large pages 91 */ 92#define MMU_FTR_16M_PAGE ASM_CONST(0x04000000) 93 94/* Supports TLBIEL variant 95 */ 96#define MMU_FTR_TLBIEL ASM_CONST(0x08000000) 97 98/* Supports tlbies w/o locking 99 */ 100#define MMU_FTR_LOCKLESS_TLBIE ASM_CONST(0x10000000) 101 102/* Large pages can be marked CI 103 */ 104#define MMU_FTR_CI_LARGE_PAGE ASM_CONST(0x20000000) 105 106/* 1T segments available 107 */ 108#define MMU_FTR_1T_SEGMENT ASM_CONST(0x40000000) 109 110/* MMU feature bit sets for various CPUs */ 111#define MMU_FTRS_DEFAULT_HPTE_ARCH_V2 \ 112 MMU_FTR_HPTE_TABLE | MMU_FTR_PPCAS_ARCH_V2 113#define MMU_FTRS_POWER4 MMU_FTRS_DEFAULT_HPTE_ARCH_V2 114#define MMU_FTRS_PPC970 MMU_FTRS_POWER4 | MMU_FTR_TLBIE_CROP_VA 115#define MMU_FTRS_POWER5 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE 116#define MMU_FTRS_POWER6 MMU_FTRS_POWER5 | MMU_FTR_KERNEL_RO | MMU_FTR_68_BIT_VA 117#define MMU_FTRS_POWER7 MMU_FTRS_POWER6 118#define MMU_FTRS_POWER8 MMU_FTRS_POWER6 119#define MMU_FTRS_POWER9 MMU_FTRS_POWER6 120#define MMU_FTRS_CELL MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \ 121 MMU_FTR_CI_LARGE_PAGE 122#define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \ 123 MMU_FTR_CI_LARGE_PAGE | MMU_FTR_NO_SLBIE_B 124#ifndef __ASSEMBLY__ 125#include <linux/bug.h> 126#include <asm/cputable.h> 127 128#ifdef CONFIG_PPC_FSL_BOOK3E 129#include <asm/percpu.h> 130DECLARE_PER_CPU(int, next_tlbcam_idx); 131#endif 132 133enum { 134 MMU_FTRS_POSSIBLE = MMU_FTR_HPTE_TABLE | MMU_FTR_TYPE_8xx | 135 MMU_FTR_TYPE_40x | MMU_FTR_TYPE_44x | MMU_FTR_TYPE_FSL_E | 136 MMU_FTR_TYPE_47x | MMU_FTR_USE_HIGH_BATS | MMU_FTR_BIG_PHYS | 137 MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_USE_TLBILX | 138 MMU_FTR_LOCK_BCAST_INVAL | MMU_FTR_NEED_DTLB_SW_LRU | 139 MMU_FTR_USE_TLBRSRV | MMU_FTR_USE_PAIRED_MAS | 140 MMU_FTR_NO_SLBIE_B | MMU_FTR_16M_PAGE | MMU_FTR_TLBIEL | 141 MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_CI_LARGE_PAGE | 142 MMU_FTR_1T_SEGMENT | MMU_FTR_TLBIE_CROP_VA | 143 MMU_FTR_KERNEL_RO | MMU_FTR_68_BIT_VA | 144#ifdef CONFIG_PPC_RADIX_MMU 145 MMU_FTR_TYPE_RADIX | 146#endif 147 0, 148}; 149 150static inline bool early_mmu_has_feature(unsigned long feature) 151{ 152 return !!(MMU_FTRS_POSSIBLE & cur_cpu_spec->mmu_features & feature); 153} 154 155#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS 156#include <linux/jump_label.h> 157 158#define NUM_MMU_FTR_KEYS 32 159 160extern struct static_key_true mmu_feature_keys[NUM_MMU_FTR_KEYS]; 161 162extern void mmu_feature_keys_init(void); 163 164static __always_inline bool mmu_has_feature(unsigned long feature) 165{ 166 int i; 167 168#ifndef __clang__ /* clang can't cope with this */ 169 BUILD_BUG_ON(!__builtin_constant_p(feature)); 170#endif 171 172#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG 173 if (!static_key_initialized) { 174 printk("Warning! mmu_has_feature() used prior to jump label init!\n"); 175 dump_stack(); 176 return early_mmu_has_feature(feature); 177 } 178#endif 179 180 if (!(MMU_FTRS_POSSIBLE & feature)) 181 return false; 182 183 i = __builtin_ctzl(feature); 184 return static_branch_likely(&mmu_feature_keys[i]); 185} 186 187static inline void mmu_clear_feature(unsigned long feature) 188{ 189 int i; 190 191 i = __builtin_ctzl(feature); 192 cur_cpu_spec->mmu_features &= ~feature; 193 static_branch_disable(&mmu_feature_keys[i]); 194} 195#else 196 197static inline void mmu_feature_keys_init(void) 198{ 199 200} 201 202static inline bool mmu_has_feature(unsigned long feature) 203{ 204 return early_mmu_has_feature(feature); 205} 206 207static inline void mmu_clear_feature(unsigned long feature) 208{ 209 cur_cpu_spec->mmu_features &= ~feature; 210} 211#endif /* CONFIG_JUMP_LABEL */ 212 213extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup; 214 215#ifdef CONFIG_PPC64 216/* This is our real memory area size on ppc64 server, on embedded, we 217 * make it match the size our of bolted TLB area 218 */ 219extern u64 ppc64_rma_size; 220 221/* Cleanup function used by kexec */ 222extern void mmu_cleanup_all(void); 223extern void radix__mmu_cleanup_all(void); 224 225/* Functions for creating and updating partition table on POWER9 */ 226extern void mmu_partition_table_init(void); 227extern void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0, 228 unsigned long dw1); 229#endif /* CONFIG_PPC64 */ 230 231struct mm_struct; 232#ifdef CONFIG_DEBUG_VM 233extern void assert_pte_locked(struct mm_struct *mm, unsigned long addr); 234#else /* CONFIG_DEBUG_VM */ 235static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr) 236{ 237} 238#endif /* !CONFIG_DEBUG_VM */ 239 240#ifdef CONFIG_PPC_RADIX_MMU 241static inline bool radix_enabled(void) 242{ 243 return mmu_has_feature(MMU_FTR_TYPE_RADIX); 244} 245 246static inline bool early_radix_enabled(void) 247{ 248 return early_mmu_has_feature(MMU_FTR_TYPE_RADIX); 249} 250#else 251static inline bool radix_enabled(void) 252{ 253 return false; 254} 255 256static inline bool early_radix_enabled(void) 257{ 258 return false; 259} 260#endif 261 262#endif /* !__ASSEMBLY__ */ 263 264/* The kernel use the constants below to index in the page sizes array. 265 * The use of fixed constants for this purpose is better for performances 266 * of the low level hash refill handlers. 267 * 268 * A non supported page size has a "shift" field set to 0 269 * 270 * Any new page size being implemented can get a new entry in here. Whether 271 * the kernel will use it or not is a different matter though. The actual page 272 * size used by hugetlbfs is not defined here and may be made variable 273 * 274 * Note: This array ended up being a false good idea as it's growing to the 275 * point where I wonder if we should replace it with something different, 276 * to think about, feedback welcome. --BenH. 277 */ 278 279/* These are #defines as they have to be used in assembly */ 280#define MMU_PAGE_4K 0 281#define MMU_PAGE_16K 1 282#define MMU_PAGE_64K 2 283#define MMU_PAGE_64K_AP 3 /* "Admixed pages" (hash64 only) */ 284#define MMU_PAGE_256K 4 285#define MMU_PAGE_512K 5 286#define MMU_PAGE_1M 6 287#define MMU_PAGE_2M 7 288#define MMU_PAGE_4M 8 289#define MMU_PAGE_8M 9 290#define MMU_PAGE_16M 10 291#define MMU_PAGE_64M 11 292#define MMU_PAGE_256M 12 293#define MMU_PAGE_1G 13 294#define MMU_PAGE_16G 14 295#define MMU_PAGE_64G 15 296 297/* 298 * N.B. we need to change the type of hpte_page_sizes if this gets to be > 16 299 * Also we need to change he type of mm_context.low/high_slices_psize. 300 */ 301#define MMU_PAGE_COUNT 16 302 303#ifdef CONFIG_PPC_BOOK3S_64 304#include <asm/book3s/64/mmu.h> 305#else /* CONFIG_PPC_BOOK3S_64 */ 306 307#ifndef __ASSEMBLY__ 308/* MMU initialization */ 309extern void early_init_mmu(void); 310extern void early_init_mmu_secondary(void); 311extern void setup_initial_memory_limit(phys_addr_t first_memblock_base, 312 phys_addr_t first_memblock_size); 313static inline void mmu_early_init_devtree(void) { } 314#endif /* __ASSEMBLY__ */ 315#endif 316 317#if defined(CONFIG_PPC_STD_MMU_32) 318/* 32-bit classic hash table MMU */ 319#include <asm/book3s/32/mmu-hash.h> 320#elif defined(CONFIG_40x) 321/* 40x-style software loaded TLB */ 322# include <asm/mmu-40x.h> 323#elif defined(CONFIG_44x) 324/* 44x-style software loaded TLB */ 325# include <asm/mmu-44x.h> 326#elif defined(CONFIG_PPC_BOOK3E_MMU) 327/* Freescale Book-E software loaded TLB or Book-3e (ISA 2.06+) MMU */ 328# include <asm/mmu-book3e.h> 329#elif defined (CONFIG_PPC_8xx) 330/* Motorola/Freescale 8xx software loaded TLB */ 331# include <asm/mmu-8xx.h> 332#endif 333 334#endif /* __KERNEL__ */ 335#endif /* _ASM_POWERPC_MMU_H_ */ 336