1#ifndef _ASM_POWERPC_MMU_HASH64_H_
2#define _ASM_POWERPC_MMU_HASH64_H_
3
4
5
6
7
8
9
10
11
12
13
14
15#include <asm/asm-compat.h>
16#include <asm/page.h>
17
18
19
20
21
22#define STE_ESID_V 0x80
23#define STE_ESID_KS 0x20
24#define STE_ESID_KP 0x10
25#define STE_ESID_N 0x08
26
27#define STE_VSID_SHIFT 12
28
29
30#define STAB0_PAGE 0x6
31#define STAB0_OFFSET (STAB0_PAGE << 12)
32#define STAB0_PHYS_ADDR (STAB0_OFFSET + PHYSICAL_START)
33
34#ifndef __ASSEMBLY__
35extern char initial_stab[];
36#endif
37
38
39
40
41
42#define SLB_NUM_BOLTED 3
43#define SLB_CACHE_ENTRIES 8
44#define SLB_MIN_SIZE 32
45
46
47#define SLB_ESID_V ASM_CONST(0x0000000008000000)
48
49
50#define SLB_VSID_SHIFT 12
51#define SLB_VSID_SHIFT_1T 24
52#define SLB_VSID_SSIZE_SHIFT 62
53#define SLB_VSID_B ASM_CONST(0xc000000000000000)
54#define SLB_VSID_B_256M ASM_CONST(0x0000000000000000)
55#define SLB_VSID_B_1T ASM_CONST(0x4000000000000000)
56#define SLB_VSID_KS ASM_CONST(0x0000000000000800)
57#define SLB_VSID_KP ASM_CONST(0x0000000000000400)
58#define SLB_VSID_N ASM_CONST(0x0000000000000200)
59#define SLB_VSID_L ASM_CONST(0x0000000000000100)
60#define SLB_VSID_C ASM_CONST(0x0000000000000080)
61#define SLB_VSID_LP ASM_CONST(0x0000000000000030)
62#define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000)
63#define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010)
64#define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020)
65#define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030)
66#define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP)
67
68#define SLB_VSID_KERNEL (SLB_VSID_KP)
69#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
70
71#define SLBIE_C (0x08000000)
72#define SLBIE_SSIZE_SHIFT 25
73
74
75
76
77
78#define HPTES_PER_GROUP 8
79
80#define HPTE_V_SSIZE_SHIFT 62
81#define HPTE_V_AVPN_SHIFT 7
82#define HPTE_V_AVPN ASM_CONST(0x3fffffffffffff80)
83#define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
84#define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & 0xffffffffffffff80UL))
85#define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
86#define HPTE_V_LOCK ASM_CONST(0x0000000000000008)
87#define HPTE_V_LARGE ASM_CONST(0x0000000000000004)
88#define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002)
89#define HPTE_V_VALID ASM_CONST(0x0000000000000001)
90
91#define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
92#define HPTE_R_TS ASM_CONST(0x4000000000000000)
93#define HPTE_R_RPN_SHIFT 12
94#define HPTE_R_RPN ASM_CONST(0x3ffffffffffff000)
95#define HPTE_R_FLAGS ASM_CONST(0x00000000000003ff)
96#define HPTE_R_PP ASM_CONST(0x0000000000000003)
97#define HPTE_R_N ASM_CONST(0x0000000000000004)
98#define HPTE_R_C ASM_CONST(0x0000000000000080)
99#define HPTE_R_R ASM_CONST(0x0000000000000100)
100
101#define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000)
102#define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000)
103
104
105
106#define PP_RWXX 0
107#define PP_RWRX 1
108#define PP_RWRW 2
109#define PP_RXRX 3
110
111#ifndef __ASSEMBLY__
112
113struct hash_pte {
114 unsigned long v;
115 unsigned long r;
116};
117
118extern struct hash_pte *htab_address;
119extern unsigned long htab_size_bytes;
120extern unsigned long htab_hash_mask;
121
122
123
124
125
126
127
128
129
130
131struct mmu_psize_def
132{
133 unsigned int shift;
134 unsigned int penc;
135 unsigned int tlbiel;
136 unsigned long avpnm;
137 unsigned long sllp;
138};
139
140#endif
141
142
143
144
145
146
147
148#define MMU_SEGSIZE_256M 0
149#define MMU_SEGSIZE_1T 1
150
151
152#ifndef __ASSEMBLY__
153
154
155
156
157extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
158extern int mmu_linear_psize;
159extern int mmu_virtual_psize;
160extern int mmu_vmalloc_psize;
161extern int mmu_vmemmap_psize;
162extern int mmu_io_psize;
163extern int mmu_kernel_ssize;
164extern int mmu_highuser_ssize;
165extern u16 mmu_slb_size;
166extern unsigned long tce_alloc_start, tce_alloc_end;
167
168
169
170
171
172
173
174extern int mmu_ci_restrictions;
175
176
177
178
179
180static inline unsigned long hpte_encode_v(unsigned long va, int psize,
181 int ssize)
182{
183 unsigned long v;
184 v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm);
185 v <<= HPTE_V_AVPN_SHIFT;
186 if (psize != MMU_PAGE_4K)
187 v |= HPTE_V_LARGE;
188 v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
189 return v;
190}
191
192
193
194
195
196
197static inline unsigned long hpte_encode_r(unsigned long pa, int psize)
198{
199 unsigned long r;
200
201
202 if (psize == MMU_PAGE_4K)
203 return pa & HPTE_R_RPN;
204 else {
205 unsigned int penc = mmu_psize_defs[psize].penc;
206 unsigned int shift = mmu_psize_defs[psize].shift;
207 return (pa & ~((1ul << shift) - 1)) | (penc << 12);
208 }
209 return r;
210}
211
212
213
214
215static inline unsigned long hpt_va(unsigned long ea, unsigned long vsid,
216 int ssize)
217{
218 if (ssize == MMU_SEGSIZE_256M)
219 return (vsid << 28) | (ea & 0xfffffffUL);
220 return (vsid << 40) | (ea & 0xffffffffffUL);
221}
222
223
224
225
226
227static inline unsigned long hpt_hash(unsigned long va, unsigned int shift,
228 int ssize)
229{
230 unsigned long hash, vsid;
231
232 if (ssize == MMU_SEGSIZE_256M) {
233 hash = (va >> 28) ^ ((va & 0x0fffffffUL) >> shift);
234 } else {
235 vsid = va >> 40;
236 hash = vsid ^ (vsid << 25) ^ ((va & 0xffffffffffUL) >> shift);
237 }
238 return hash & 0x7fffffffffUL;
239}
240
241extern int __hash_page_4K(unsigned long ea, unsigned long access,
242 unsigned long vsid, pte_t *ptep, unsigned long trap,
243 unsigned int local, int ssize, int subpage_prot);
244extern int __hash_page_64K(unsigned long ea, unsigned long access,
245 unsigned long vsid, pte_t *ptep, unsigned long trap,
246 unsigned int local, int ssize);
247struct mm_struct;
248unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap);
249extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap);
250int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
251 pte_t *ptep, unsigned long trap, int local, int ssize,
252 unsigned int shift, unsigned int mmu_psize);
253extern void hash_failure_debug(unsigned long ea, unsigned long access,
254 unsigned long vsid, unsigned long trap,
255 int ssize, int psize, unsigned long pte);
256extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
257 unsigned long pstart, unsigned long prot,
258 int psize, int ssize);
259extern void add_gpage(unsigned long addr, unsigned long page_size,
260 unsigned long number_of_pages);
261extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
262
263extern void hpte_init_native(void);
264extern void hpte_init_lpar(void);
265extern void hpte_init_iSeries(void);
266extern void hpte_init_beat(void);
267extern void hpte_init_beat_v3(void);
268
269extern void stabs_alloc(void);
270extern void slb_initialize(void);
271extern void slb_flush_and_rebolt(void);
272extern void stab_initialize(unsigned long stab);
273
274extern void slb_vmalloc_update(void);
275extern void slb_set_size(u16 size);
276#endif
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329#define VSID_MULTIPLIER_256M ASM_CONST(200730139)
330#define VSID_BITS_256M 36
331#define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1)
332
333#define VSID_MULTIPLIER_1T ASM_CONST(12538073)
334#define VSID_BITS_1T 24
335#define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1)
336
337#define CONTEXT_BITS 19
338#define USER_ESID_BITS 16
339#define USER_ESID_BITS_1T 4
340
341#define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT))
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357#define ASM_VSID_SCRAMBLE(rt, rx, size) \
358 lis rx,VSID_MULTIPLIER_##size@h; \
359 ori rx,rx,VSID_MULTIPLIER_##size@l; \
360 mulld rt,rt,rx; \
361 \
362 srdi rx,rt,VSID_BITS_##size; \
363 clrldi rt,rt,(64-VSID_BITS_##size); \
364 add rt,rt,rx; \
365
366
367
368
369
370\
371 addi rx,rt,1; \
372 srdi rx,rx,VSID_BITS_##size; \
373 add rt,rt,rx
374
375
376#ifndef __ASSEMBLY__
377
378#ifdef CONFIG_PPC_SUBPAGE_PROT
379
380
381
382
383
384
385
386
387
388
389
390struct subpage_prot_table {
391 unsigned long maxaddr;
392 unsigned int **protptrs[2];
393 unsigned int *low_prot[4];
394};
395
396#define SBP_L1_BITS (PAGE_SHIFT - 2)
397#define SBP_L2_BITS (PAGE_SHIFT - 3)
398#define SBP_L1_COUNT (1 << SBP_L1_BITS)
399#define SBP_L2_COUNT (1 << SBP_L2_BITS)
400#define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS)
401#define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS)
402
403extern void subpage_prot_free(struct mm_struct *mm);
404extern void subpage_prot_init_new_context(struct mm_struct *mm);
405#else
406static inline void subpage_prot_free(struct mm_struct *mm) {}
407static inline void subpage_prot_init_new_context(struct mm_struct *mm) { }
408#endif
409
410typedef unsigned long mm_context_id_t;
411
412typedef struct {
413 mm_context_id_t id;
414 u16 user_psize;
415
416#ifdef CONFIG_PPC_MM_SLICES
417 u64 low_slices_psize;
418 u64 high_slices_psize;
419#else
420 u16 sllp;
421#endif
422 unsigned long vdso_base;
423#ifdef CONFIG_PPC_SUBPAGE_PROT
424 struct subpage_prot_table spt;
425#endif
426} mm_context_t;
427
428
429#if 0
430
431
432
433
434
435
436#define vsid_scramble(protovsid, size) \
437 ((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size))
438
439#else
440#define vsid_scramble(protovsid, size) \
441 ({ \
442 unsigned long x; \
443 x = (protovsid) * VSID_MULTIPLIER_##size; \
444 x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \
445 (x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \
446 })
447#endif
448
449
450static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
451{
452 if (ssize == MMU_SEGSIZE_256M)
453 return vsid_scramble(ea >> SID_SHIFT, 256M);
454 return vsid_scramble(ea >> SID_SHIFT_1T, 1T);
455}
456
457
458static inline int user_segment_size(unsigned long addr)
459{
460
461 if (addr >= (1UL << SID_SHIFT_1T))
462 return mmu_highuser_ssize;
463 return MMU_SEGSIZE_256M;
464}
465
466
467static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
468 int ssize)
469{
470 if (ssize == MMU_SEGSIZE_256M)
471 return vsid_scramble((context << USER_ESID_BITS)
472 | (ea >> SID_SHIFT), 256M);
473 return vsid_scramble((context << USER_ESID_BITS_1T)
474 | (ea >> SID_SHIFT_1T), 1T);
475}
476
477
478
479
480
481#define VSID_SCRAMBLE(pvsid) (((pvsid) * VSID_MULTIPLIER_256M) % \
482 VSID_MODULUS_256M)
483#define KERNEL_VSID(ea) VSID_SCRAMBLE(GET_ESID(ea))
484
485#endif
486
487#endif
488