1
2#ifndef _ASM_POWERPC_BOOK3S_64_MMU_H_
3#define _ASM_POWERPC_BOOK3S_64_MMU_H_
4
5#ifndef __ASSEMBLY__
6
7
8
9
10
11
12
13
14
15struct mmu_psize_def {
16 unsigned int shift;
17 int penc[MMU_PAGE_COUNT];
18 unsigned int tlbiel;
19 unsigned long avpnm;
20 union {
21 unsigned long sllp;
22 unsigned long ap;
23 };
24};
25extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
26
27#endif
28
29
30#include <asm/book3s/64/mmu-hash.h>
31
32#ifndef __ASSEMBLY__
33
34
35
36struct prtb_entry {
37 __be64 prtb0;
38 __be64 prtb1;
39};
40extern struct prtb_entry *process_tb;
41
42struct patb_entry {
43 __be64 patb0;
44 __be64 patb1;
45};
46extern struct patb_entry *partition_tb;
47
48
49#define PATB_HR (1UL << 63)
50#define RPDB_MASK 0x0fffffffffffff00UL
51#define RPDB_SHIFT (1UL << 8)
52#define RTS1_SHIFT 61
53#define RTS1_MASK (3UL << RTS1_SHIFT)
54#define RTS2_SHIFT 5
55#define RTS2_MASK (7UL << RTS2_SHIFT)
56#define RPDS_MASK 0x1f
57
58
59#define PATB_GR (1UL << 63)
60#define PRTS_MASK 0x1f
61#define PRTB_MASK 0x0ffffffffffff000UL
62
63
64extern unsigned int mmu_pid_bits;
65
66
67extern unsigned int mmu_base_pid;
68
69
70
71
72extern unsigned long __ro_after_init radix_mem_block_size;
73
74#define PRTB_SIZE_SHIFT (mmu_pid_bits + 4)
75#define PRTB_ENTRIES (1ul << mmu_pid_bits)
76
77
78
79
80#define PATB_SIZE_SHIFT 16
81
82typedef unsigned long mm_context_id_t;
83struct spinlock;
84
85
86#define NV_MAX_NPUS 8
87
88
89
90
91
92
93struct slice_mask {
94 u64 low_slices;
95 DECLARE_BITMAP(high_slices, SLICE_NUM_HIGH);
96};
97
98typedef struct {
99 union {
100
101
102
103
104
105
106
107
108 mm_context_id_t id;
109 mm_context_id_t extended_id[TASK_SIZE_USER64/TASK_CONTEXT_SIZE];
110 };
111 u16 user_psize;
112
113
114 atomic_t active_cpus;
115
116
117 atomic_t copros;
118
119
120 struct npu_context *npu_context;
121
122#ifdef CONFIG_PPC_MM_SLICES
123
124 unsigned char low_slices_psize[BITS_PER_LONG / BITS_PER_BYTE];
125 unsigned char high_slices_psize[SLICE_ARRAY_SIZE];
126 unsigned long slb_addr_limit;
127# ifdef CONFIG_PPC_64K_PAGES
128 struct slice_mask mask_64k;
129# endif
130 struct slice_mask mask_4k;
131# ifdef CONFIG_HUGETLB_PAGE
132 struct slice_mask mask_16m;
133 struct slice_mask mask_16g;
134# endif
135#else
136 u16 sllp;
137#endif
138 unsigned long vdso_base;
139#ifdef CONFIG_PPC_SUBPAGE_PROT
140 struct subpage_prot_table spt;
141#endif
142
143
144
145 void *pte_frag;
146 void *pmd_frag;
147#ifdef CONFIG_SPAPR_TCE_IOMMU
148 struct list_head iommu_group_mem_list;
149#endif
150
151#ifdef CONFIG_PPC_MEM_KEYS
152
153
154
155
156
157 u32 pkey_allocation_map;
158 s16 execute_only_pkey;
159#endif
160} mm_context_t;
161
162
163
164
165extern int mmu_linear_psize;
166extern int mmu_virtual_psize;
167extern int mmu_vmalloc_psize;
168extern int mmu_vmemmap_psize;
169extern int mmu_io_psize;
170
171
172void mmu_early_init_devtree(void);
173void hash__early_init_devtree(void);
174void radix__early_init_devtree(void);
175extern void hash__early_init_mmu(void);
176extern void radix__early_init_mmu(void);
177static inline void early_init_mmu(void)
178{
179 if (radix_enabled())
180 return radix__early_init_mmu();
181 return hash__early_init_mmu();
182}
183extern void hash__early_init_mmu_secondary(void);
184extern void radix__early_init_mmu_secondary(void);
185static inline void early_init_mmu_secondary(void)
186{
187 if (radix_enabled())
188 return radix__early_init_mmu_secondary();
189 return hash__early_init_mmu_secondary();
190}
191
192extern void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
193 phys_addr_t first_memblock_size);
194static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base,
195 phys_addr_t first_memblock_size)
196{
197
198
199
200
201
202 return hash__setup_initial_memory_limit(first_memblock_base,
203 first_memblock_size);
204}
205
206#ifdef CONFIG_PPC_PSERIES
207extern void radix_init_pseries(void);
208#else
209static inline void radix_init_pseries(void) { };
210#endif
211
212#ifdef CONFIG_HOTPLUG_CPU
213#define arch_clear_mm_cpumask_cpu(cpu, mm) \
214 do { \
215 if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { \
216 atomic_dec(&(mm)->context.active_cpus); \
217 cpumask_clear_cpu(cpu, mm_cpumask(mm)); \
218 } \
219 } while (0)
220
221void cleanup_cpu_mmu_context(void);
222#endif
223
224static inline int get_ea_context(mm_context_t *ctx, unsigned long ea)
225{
226 int index = ea >> MAX_EA_BITS_PER_CONTEXT;
227
228 if (likely(index < ARRAY_SIZE(ctx->extended_id)))
229 return ctx->extended_id[index];
230
231
232 WARN_ON(1);
233 return 0;
234}
235
236static inline unsigned long get_user_vsid(mm_context_t *ctx,
237 unsigned long ea, int ssize)
238{
239 unsigned long context = get_ea_context(ctx, ea);
240
241 return get_vsid(context, ea, ssize);
242}
243
244#endif
245#endif
246