1#ifndef _ASM_POWERPC_MMU_HASH64_H_
2#define _ASM_POWERPC_MMU_HASH64_H_
3
4
5
6
7
8
9
10
11
12
13
14
15#include <asm/asm-compat.h>
16#include <asm/page.h>
17
18
19
20
21
22
23#include <asm/pgtable-ppc64.h>
24#include <asm/bug.h>
25#include <asm/processor.h>
26
27
28
29
30
31#define STE_ESID_V 0x80
32#define STE_ESID_KS 0x20
33#define STE_ESID_KP 0x10
34#define STE_ESID_N 0x08
35
36#define STE_VSID_SHIFT 12
37
38
39#define STAB0_PAGE 0x8
40#define STAB0_OFFSET (STAB0_PAGE << 12)
41#define STAB0_PHYS_ADDR (STAB0_OFFSET + PHYSICAL_START)
42
43#ifndef __ASSEMBLY__
44extern char initial_stab[];
45#endif
46
47
48
49
50
51#define SLB_NUM_BOLTED 3
52#define SLB_CACHE_ENTRIES 8
53#define SLB_MIN_SIZE 32
54
55
56#define SLB_ESID_V ASM_CONST(0x0000000008000000)
57
58
59#define SLB_VSID_SHIFT 12
60#define SLB_VSID_SHIFT_1T 24
61#define SLB_VSID_SSIZE_SHIFT 62
62#define SLB_VSID_B ASM_CONST(0xc000000000000000)
63#define SLB_VSID_B_256M ASM_CONST(0x0000000000000000)
64#define SLB_VSID_B_1T ASM_CONST(0x4000000000000000)
65#define SLB_VSID_KS ASM_CONST(0x0000000000000800)
66#define SLB_VSID_KP ASM_CONST(0x0000000000000400)
67#define SLB_VSID_N ASM_CONST(0x0000000000000200)
68#define SLB_VSID_L ASM_CONST(0x0000000000000100)
69#define SLB_VSID_C ASM_CONST(0x0000000000000080)
70#define SLB_VSID_LP ASM_CONST(0x0000000000000030)
71#define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000)
72#define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010)
73#define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020)
74#define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030)
75#define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP)
76
77#define SLB_VSID_KERNEL (SLB_VSID_KP)
78#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
79
80#define SLBIE_C (0x08000000)
81#define SLBIE_SSIZE_SHIFT 25
82
83
84
85
86
87#define HPTES_PER_GROUP 8
88
89#define HPTE_V_SSIZE_SHIFT 62
90#define HPTE_V_AVPN_SHIFT 7
91#define HPTE_V_AVPN ASM_CONST(0x3fffffffffffff80)
92#define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
93#define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & 0xffffffffffffff80UL))
94#define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
95#define HPTE_V_LOCK ASM_CONST(0x0000000000000008)
96#define HPTE_V_LARGE ASM_CONST(0x0000000000000004)
97#define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002)
98#define HPTE_V_VALID ASM_CONST(0x0000000000000001)
99
100#define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
101#define HPTE_R_TS ASM_CONST(0x4000000000000000)
102#define HPTE_R_KEY_HI ASM_CONST(0x3000000000000000)
103#define HPTE_R_RPN_SHIFT 12
104#define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000)
105#define HPTE_R_PP ASM_CONST(0x0000000000000003)
106#define HPTE_R_N ASM_CONST(0x0000000000000004)
107#define HPTE_R_G ASM_CONST(0x0000000000000008)
108#define HPTE_R_M ASM_CONST(0x0000000000000010)
109#define HPTE_R_I ASM_CONST(0x0000000000000020)
110#define HPTE_R_W ASM_CONST(0x0000000000000040)
111#define HPTE_R_WIMG ASM_CONST(0x0000000000000078)
112#define HPTE_R_C ASM_CONST(0x0000000000000080)
113#define HPTE_R_R ASM_CONST(0x0000000000000100)
114#define HPTE_R_KEY_LO ASM_CONST(0x0000000000000e00)
115
116#define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000)
117#define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000)
118
119
120#define PP_RWXX 0
121#define PP_RWRX 1
122#define PP_RWRW 2
123#define PP_RXRX 3
124#define PP_RXXX (HPTE_R_PP0 | 2)
125
126
127#define TLBIEL_INVAL_SEL_MASK 0xc00
128#define TLBIEL_INVAL_PAGE 0x000
129#define TLBIEL_INVAL_SET_LPID 0x800
130#define TLBIEL_INVAL_SET 0xc00
131#define TLBIEL_INVAL_SET_MASK 0xfff000
132#define TLBIEL_INVAL_SET_SHIFT 12
133
134#define POWER7_TLB_SETS 128
135
136#ifndef __ASSEMBLY__
137
138struct hash_pte {
139 __be64 v;
140 __be64 r;
141};
142
143extern struct hash_pte *htab_address;
144extern unsigned long htab_size_bytes;
145extern unsigned long htab_hash_mask;
146
147
148
149
150
151
152
153
154
155
156struct mmu_psize_def
157{
158 unsigned int shift;
159 int penc[MMU_PAGE_COUNT];
160 unsigned int tlbiel;
161 unsigned long avpnm;
162 unsigned long sllp;
163};
164extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
165
166static inline int shift_to_mmu_psize(unsigned int shift)
167{
168 int psize;
169
170 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
171 if (mmu_psize_defs[psize].shift == shift)
172 return psize;
173 return -1;
174}
175
176static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
177{
178 if (mmu_psize_defs[mmu_psize].shift)
179 return mmu_psize_defs[mmu_psize].shift;
180 BUG();
181}
182
183#endif
184
185
186
187
188
189
190
191#define MMU_SEGSIZE_256M 0
192#define MMU_SEGSIZE_1T 1
193
194
195
196
197
198
199
200
201
202#define VPN_SHIFT 12
203
204
205
206
207#define LP_SHIFT 12
208#define LP_BITS 8
209#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
210
211#ifndef __ASSEMBLY__
212
213static inline int segment_shift(int ssize)
214{
215 if (ssize == MMU_SEGSIZE_256M)
216 return SID_SHIFT;
217 return SID_SHIFT_1T;
218}
219
220
221
222
223extern int mmu_linear_psize;
224extern int mmu_virtual_psize;
225extern int mmu_vmalloc_psize;
226extern int mmu_vmemmap_psize;
227extern int mmu_io_psize;
228extern int mmu_kernel_ssize;
229extern int mmu_highuser_ssize;
230extern u16 mmu_slb_size;
231extern unsigned long tce_alloc_start, tce_alloc_end;
232
233
234
235
236
237
238
239extern int mmu_ci_restrictions;
240
241
242
243
244
245
246static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
247 int ssize)
248{
249 unsigned long v;
250
251
252
253
254
255
256
257
258 v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm);
259 v <<= HPTE_V_AVPN_SHIFT;
260 v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
261 return v;
262}
263
264
265
266
267
268static inline unsigned long hpte_encode_v(unsigned long vpn, int base_psize,
269 int actual_psize, int ssize)
270{
271 unsigned long v;
272 v = hpte_encode_avpn(vpn, base_psize, ssize);
273 if (actual_psize != MMU_PAGE_4K)
274 v |= HPTE_V_LARGE;
275 return v;
276}
277
278
279
280
281
282
283static inline unsigned long hpte_encode_r(unsigned long pa, int base_psize,
284 int actual_psize)
285{
286
287 if (actual_psize == MMU_PAGE_4K)
288 return pa & HPTE_R_RPN;
289 else {
290 unsigned int penc = mmu_psize_defs[base_psize].penc[actual_psize];
291 unsigned int shift = mmu_psize_defs[actual_psize].shift;
292 return (pa & ~((1ul << shift) - 1)) | (penc << LP_SHIFT);
293 }
294}
295
296
297
298
299static inline unsigned long hpt_vpn(unsigned long ea,
300 unsigned long vsid, int ssize)
301{
302 unsigned long mask;
303 int s_shift = segment_shift(ssize);
304
305 mask = (1ul << (s_shift - VPN_SHIFT)) - 1;
306 return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask);
307}
308
309
310
311
312static inline unsigned long hpt_hash(unsigned long vpn,
313 unsigned int shift, int ssize)
314{
315 int mask;
316 unsigned long hash, vsid;
317
318
319 if (ssize == MMU_SEGSIZE_256M) {
320 mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1;
321 hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^
322 ((vpn & mask) >> (shift - VPN_SHIFT));
323 } else {
324 mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1;
325 vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT);
326 hash = vsid ^ (vsid << 25) ^
327 ((vpn & mask) >> (shift - VPN_SHIFT)) ;
328 }
329 return hash & 0x7fffffffffUL;
330}
331
332extern int __hash_page_4K(unsigned long ea, unsigned long access,
333 unsigned long vsid, pte_t *ptep, unsigned long trap,
334 unsigned int local, int ssize, int subpage_prot);
335extern int __hash_page_64K(unsigned long ea, unsigned long access,
336 unsigned long vsid, pte_t *ptep, unsigned long trap,
337 unsigned int local, int ssize);
338struct mm_struct;
339unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap);
340extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap);
341int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
342 pte_t *ptep, unsigned long trap, int local, int ssize,
343 unsigned int shift, unsigned int mmu_psize);
344#ifdef CONFIG_TRANSPARENT_HUGEPAGE
345extern int __hash_page_thp(unsigned long ea, unsigned long access,
346 unsigned long vsid, pmd_t *pmdp, unsigned long trap,
347 int local, int ssize, unsigned int psize);
348#else
349static inline int __hash_page_thp(unsigned long ea, unsigned long access,
350 unsigned long vsid, pmd_t *pmdp,
351 unsigned long trap, int local,
352 int ssize, unsigned int psize)
353{
354 BUG();
355 return -1;
356}
357#endif
358extern void hash_failure_debug(unsigned long ea, unsigned long access,
359 unsigned long vsid, unsigned long trap,
360 int ssize, int psize, int lpsize,
361 unsigned long pte);
362extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
363 unsigned long pstart, unsigned long prot,
364 int psize, int ssize);
365extern void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages);
366extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
367
368extern void hpte_init_native(void);
369extern void hpte_init_lpar(void);
370extern void hpte_init_beat(void);
371extern void hpte_init_beat_v3(void);
372
373extern void stabs_alloc(void);
374extern void slb_initialize(void);
375extern void slb_flush_and_rebolt(void);
376extern void stab_initialize(unsigned long stab);
377
378extern void slb_vmalloc_update(void);
379extern void slb_set_size(u16 size);
380#endif
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419#define CONTEXT_BITS 19
420#define ESID_BITS 18
421#define ESID_BITS_1T 6
422
423
424
425
426
427
428
429
430
431#define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 5)
432
433
434
435
436
437#define VSID_MULTIPLIER_256M ASM_CONST(12538073)
438#define VSID_BITS_256M (CONTEXT_BITS + ESID_BITS)
439#define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1)
440
441#define VSID_MULTIPLIER_1T ASM_CONST(12538073)
442#define VSID_BITS_1T (CONTEXT_BITS + ESID_BITS_1T)
443#define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1)
444
445
446#define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT))
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462#define ASM_VSID_SCRAMBLE(rt, rx, size) \
463 lis rx,VSID_MULTIPLIER_##size@h; \
464 ori rx,rx,VSID_MULTIPLIER_##size@l; \
465 mulld rt,rt,rx; \
466 \
467 srdi rx,rt,VSID_BITS_##size; \
468 clrldi rt,rt,(64-VSID_BITS_##size); \
469 add rt,rt,rx; \
470
471
472
473
474
475
476\
477 addi rx,rt,1; \
478 srdi rx,rx,VSID_BITS_##size; \
479 add rt,rt,rx
480
481
482#define SLICE_ARRAY_SIZE (PGTABLE_RANGE >> 41)
483
484#ifndef __ASSEMBLY__
485
486#ifdef CONFIG_PPC_SUBPAGE_PROT
487
488
489
490
491
492
493
494
495
496
497
498struct subpage_prot_table {
499 unsigned long maxaddr;
500 unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)];
501 unsigned int *low_prot[4];
502};
503
504#define SBP_L1_BITS (PAGE_SHIFT - 2)
505#define SBP_L2_BITS (PAGE_SHIFT - 3)
506#define SBP_L1_COUNT (1 << SBP_L1_BITS)
507#define SBP_L2_COUNT (1 << SBP_L2_BITS)
508#define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS)
509#define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS)
510
511extern void subpage_prot_free(struct mm_struct *mm);
512extern void subpage_prot_init_new_context(struct mm_struct *mm);
513#else
514static inline void subpage_prot_free(struct mm_struct *mm) {}
515static inline void subpage_prot_init_new_context(struct mm_struct *mm) { }
516#endif
517
518typedef unsigned long mm_context_id_t;
519struct spinlock;
520
521typedef struct {
522 mm_context_id_t id;
523 u16 user_psize;
524
525#ifdef CONFIG_PPC_MM_SLICES
526 u64 low_slices_psize;
527 unsigned char high_slices_psize[SLICE_ARRAY_SIZE];
528#else
529 u16 sllp;
530#endif
531 unsigned long vdso_base;
532#ifdef CONFIG_PPC_SUBPAGE_PROT
533 struct subpage_prot_table spt;
534#endif
535#ifdef CONFIG_PPC_ICSWX
536 struct spinlock *cop_lockp;
537 unsigned long acop;
538 unsigned int cop_pid;
539#endif
540#ifdef CONFIG_PPC_64K_PAGES
541
542 void *pte_frag;
543#endif
544} mm_context_t;
545
546
547#if 0
548
549
550
551
552
553
554#define vsid_scramble(protovsid, size) \
555 ((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size))
556
557#else
558#define vsid_scramble(protovsid, size) \
559 ({ \
560 unsigned long x; \
561 x = (protovsid) * VSID_MULTIPLIER_##size; \
562 x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \
563 (x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \
564 })
565#endif
566
567
568static inline int user_segment_size(unsigned long addr)
569{
570
571 if (addr >= (1UL << SID_SHIFT_1T))
572 return mmu_highuser_ssize;
573 return MMU_SEGSIZE_256M;
574}
575
576static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
577 int ssize)
578{
579
580
581
582 if ((ea & ~REGION_MASK) >= PGTABLE_RANGE)
583 return 0;
584
585 if (ssize == MMU_SEGSIZE_256M)
586 return vsid_scramble((context << ESID_BITS)
587 | (ea >> SID_SHIFT), 256M);
588 return vsid_scramble((context << ESID_BITS_1T)
589 | (ea >> SID_SHIFT_1T), 1T);
590}
591
592
593
594
595
596
597
598
599
600
601static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
602{
603 unsigned long context;
604
605
606
607
608 context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1;
609 return get_vsid(context, ea, ssize);
610}
611#endif
612
613#endif
614