1
2#ifndef _ASM_POWERPC_BOOK3S_64_MMU_H_
3#define _ASM_POWERPC_BOOK3S_64_MMU_H_
4
5#include <asm/page.h>
6
7#ifndef __ASSEMBLY__
8
9
10
11
12
13
14
15
16
17struct mmu_psize_def {
18 unsigned int shift;
19 int penc[MMU_PAGE_COUNT];
20 unsigned int tlbiel;
21 unsigned long avpnm;
22 union {
23 unsigned long sllp;
24 unsigned long ap;
25 };
26};
27extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
28#endif
29
30
31
32
33
34
35
36
37
38#if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_SPARSEMEM_EXTREME) && \
39 defined(CONFIG_PPC_64K_PAGES)
40#define MAX_PHYSMEM_BITS 51
41#else
42#define MAX_PHYSMEM_BITS 46
43#endif
44
45
46#include <asm/book3s/64/mmu-hash.h>
47
48#ifndef __ASSEMBLY__
49
50
51
52struct prtb_entry {
53 __be64 prtb0;
54 __be64 prtb1;
55};
56extern struct prtb_entry *process_tb;
57
58struct patb_entry {
59 __be64 patb0;
60 __be64 patb1;
61};
62extern struct patb_entry *partition_tb;
63
64
65#define PATB_HR (1UL << 63)
66#define RPDB_MASK 0x0fffffffffffff00UL
67#define RPDB_SHIFT (1UL << 8)
68#define RTS1_SHIFT 61
69#define RTS1_MASK (3UL << RTS1_SHIFT)
70#define RTS2_SHIFT 5
71#define RTS2_MASK (7UL << RTS2_SHIFT)
72#define RPDS_MASK 0x1f
73
74
75#define PATB_GR (1UL << 63)
76#define PRTS_MASK 0x1f
77#define PRTB_MASK 0x0ffffffffffff000UL
78
79
80extern unsigned int mmu_pid_bits;
81
82
83extern unsigned int mmu_base_pid;
84
85#define PRTB_SIZE_SHIFT (mmu_pid_bits + 4)
86#define PRTB_ENTRIES (1ul << mmu_pid_bits)
87
88
89
90
91#define PATB_SIZE_SHIFT 16
92
93typedef unsigned long mm_context_id_t;
94struct spinlock;
95
96
97#define NV_MAX_NPUS 8
98
99typedef struct {
100 union {
101
102
103
104
105
106
107
108
109 mm_context_id_t id;
110 mm_context_id_t extended_id[TASK_SIZE_USER64/TASK_CONTEXT_SIZE];
111 };
112
113
114 atomic_t active_cpus;
115
116
117 atomic_t copros;
118
119
120 struct npu_context *npu_context;
121 struct hash_mm_context *hash_context;
122
123 unsigned long vdso_base;
124
125
126
127 void *pte_frag;
128 void *pmd_frag;
129#ifdef CONFIG_SPAPR_TCE_IOMMU
130 struct list_head iommu_group_mem_list;
131#endif
132
133#ifdef CONFIG_PPC_MEM_KEYS
134
135
136
137
138
139 u32 pkey_allocation_map;
140 s16 execute_only_pkey;
141#endif
142} mm_context_t;
143
144static inline u16 mm_ctx_user_psize(mm_context_t *ctx)
145{
146 return ctx->hash_context->user_psize;
147}
148
149static inline void mm_ctx_set_user_psize(mm_context_t *ctx, u16 user_psize)
150{
151 ctx->hash_context->user_psize = user_psize;
152}
153
154static inline unsigned char *mm_ctx_low_slices(mm_context_t *ctx)
155{
156 return ctx->hash_context->low_slices_psize;
157}
158
159static inline unsigned char *mm_ctx_high_slices(mm_context_t *ctx)
160{
161 return ctx->hash_context->high_slices_psize;
162}
163
164static inline unsigned long mm_ctx_slb_addr_limit(mm_context_t *ctx)
165{
166 return ctx->hash_context->slb_addr_limit;
167}
168
169static inline void mm_ctx_set_slb_addr_limit(mm_context_t *ctx, unsigned long limit)
170{
171 ctx->hash_context->slb_addr_limit = limit;
172}
173
174static inline struct slice_mask *slice_mask_for_size(mm_context_t *ctx, int psize)
175{
176#ifdef CONFIG_PPC_64K_PAGES
177 if (psize == MMU_PAGE_64K)
178 return &ctx->hash_context->mask_64k;
179#endif
180#ifdef CONFIG_HUGETLB_PAGE
181 if (psize == MMU_PAGE_16M)
182 return &ctx->hash_context->mask_16m;
183 if (psize == MMU_PAGE_16G)
184 return &ctx->hash_context->mask_16g;
185#endif
186 BUG_ON(psize != MMU_PAGE_4K);
187
188 return &ctx->hash_context->mask_4k;
189}
190
191#ifdef CONFIG_PPC_SUBPAGE_PROT
192static inline struct subpage_prot_table *mm_ctx_subpage_prot(mm_context_t *ctx)
193{
194 return ctx->hash_context->spt;
195}
196#endif
197
198
199
200
201extern int mmu_linear_psize;
202extern int mmu_virtual_psize;
203extern int mmu_vmalloc_psize;
204extern int mmu_vmemmap_psize;
205extern int mmu_io_psize;
206
207
208void mmu_early_init_devtree(void);
209void hash__early_init_devtree(void);
210void radix__early_init_devtree(void);
211extern void radix_init_native(void);
212extern void hash__early_init_mmu(void);
213extern void radix__early_init_mmu(void);
214static inline void early_init_mmu(void)
215{
216 if (radix_enabled())
217 return radix__early_init_mmu();
218 return hash__early_init_mmu();
219}
220extern void hash__early_init_mmu_secondary(void);
221extern void radix__early_init_mmu_secondary(void);
222static inline void early_init_mmu_secondary(void)
223{
224 if (radix_enabled())
225 return radix__early_init_mmu_secondary();
226 return hash__early_init_mmu_secondary();
227}
228
229extern void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
230 phys_addr_t first_memblock_size);
231extern void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
232 phys_addr_t first_memblock_size);
233static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base,
234 phys_addr_t first_memblock_size)
235{
236 if (early_radix_enabled())
237 return radix__setup_initial_memory_limit(first_memblock_base,
238 first_memblock_size);
239 return hash__setup_initial_memory_limit(first_memblock_base,
240 first_memblock_size);
241}
242
243extern int (*register_process_table)(unsigned long base, unsigned long page_size,
244 unsigned long tbl_size);
245
246#ifdef CONFIG_PPC_PSERIES
247extern void radix_init_pseries(void);
248#else
249static inline void radix_init_pseries(void) { };
250#endif
251
252static inline int get_user_context(mm_context_t *ctx, unsigned long ea)
253{
254 int index = ea >> MAX_EA_BITS_PER_CONTEXT;
255
256 if (likely(index < ARRAY_SIZE(ctx->extended_id)))
257 return ctx->extended_id[index];
258
259
260 WARN_ON(1);
261 return 0;
262}
263
264static inline unsigned long get_user_vsid(mm_context_t *ctx,
265 unsigned long ea, int ssize)
266{
267 unsigned long context = get_user_context(ctx, ea);
268
269 return get_vsid(context, ea, ssize);
270}
271
272#endif
273#endif
274