1#ifndef _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_
2#define _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_
3
4
5
6
7
8
9
10
11
12
13
14
15#include <asm/asm-compat.h>
16#include <asm/page.h>
17#include <asm/bug.h>
18
19
20
21
22
23
24#include <asm/book3s/64/pgtable.h>
25#include <asm/bug.h>
26#include <asm/processor.h>
27#include <asm/cpu_has_feature.h>
28
29
30
31
32
33#define SLB_NUM_BOLTED 3
34#define SLB_CACHE_ENTRIES 8
35#define SLB_MIN_SIZE 32
36
37
38#define SLB_ESID_V ASM_CONST(0x0000000008000000)
39
40
41#define SLB_VSID_SHIFT 12
42#define SLB_VSID_SHIFT_1T 24
43#define SLB_VSID_SSIZE_SHIFT 62
44#define SLB_VSID_B ASM_CONST(0xc000000000000000)
45#define SLB_VSID_B_256M ASM_CONST(0x0000000000000000)
46#define SLB_VSID_B_1T ASM_CONST(0x4000000000000000)
47#define SLB_VSID_KS ASM_CONST(0x0000000000000800)
48#define SLB_VSID_KP ASM_CONST(0x0000000000000400)
49#define SLB_VSID_N ASM_CONST(0x0000000000000200)
50#define SLB_VSID_L ASM_CONST(0x0000000000000100)
51#define SLB_VSID_C ASM_CONST(0x0000000000000080)
52#define SLB_VSID_LP ASM_CONST(0x0000000000000030)
53#define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000)
54#define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010)
55#define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020)
56#define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030)
57#define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP)
58
59#define SLB_VSID_KERNEL (SLB_VSID_KP)
60#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
61
62#define SLBIE_C (0x08000000)
63#define SLBIE_SSIZE_SHIFT 25
64
65
66
67
68
69#define HPTES_PER_GROUP 8
70
71#define HPTE_V_SSIZE_SHIFT 62
72#define HPTE_V_AVPN_SHIFT 7
73#define HPTE_V_AVPN ASM_CONST(0x3fffffffffffff80)
74#define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
75#define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & 0xffffffffffffff80UL))
76#define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
77#define HPTE_V_LOCK ASM_CONST(0x0000000000000008)
78#define HPTE_V_LARGE ASM_CONST(0x0000000000000004)
79#define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002)
80#define HPTE_V_VALID ASM_CONST(0x0000000000000001)
81
82
83
84
85#define HPTE_R_3_0_SSIZE_SHIFT 58
86#define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
87#define HPTE_R_TS ASM_CONST(0x4000000000000000)
88#define HPTE_R_KEY_HI ASM_CONST(0x3000000000000000)
89#define HPTE_R_RPN_SHIFT 12
90#define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000)
91#define HPTE_R_PP ASM_CONST(0x0000000000000003)
92#define HPTE_R_PPP ASM_CONST(0x8000000000000003)
93#define HPTE_R_N ASM_CONST(0x0000000000000004)
94#define HPTE_R_G ASM_CONST(0x0000000000000008)
95#define HPTE_R_M ASM_CONST(0x0000000000000010)
96#define HPTE_R_I ASM_CONST(0x0000000000000020)
97#define HPTE_R_W ASM_CONST(0x0000000000000040)
98#define HPTE_R_WIMG ASM_CONST(0x0000000000000078)
99#define HPTE_R_C ASM_CONST(0x0000000000000080)
100#define HPTE_R_R ASM_CONST(0x0000000000000100)
101#define HPTE_R_KEY_LO ASM_CONST(0x0000000000000e00)
102
103#define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000)
104#define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000)
105
106
107#define PP_RWXX 0
108#define PP_RWRX 1
109#define PP_RWRW 2
110#define PP_RXRX 3
111#define PP_RXXX (HPTE_R_PP0 | 2)
112
113
114#define TLBIEL_INVAL_SEL_MASK 0xc00
115#define TLBIEL_INVAL_PAGE 0x000
116#define TLBIEL_INVAL_SET_LPID 0x800
117#define TLBIEL_INVAL_SET 0xc00
118#define TLBIEL_INVAL_SET_MASK 0xfff000
119#define TLBIEL_INVAL_SET_SHIFT 12
120
121#define POWER7_TLB_SETS 128
122#define POWER8_TLB_SETS 512
123#define POWER9_TLB_SETS_HASH 256
124#define POWER9_TLB_SETS_RADIX 128
125
126#ifndef __ASSEMBLY__
127
128struct mmu_hash_ops {
129 void (*hpte_invalidate)(unsigned long slot,
130 unsigned long vpn,
131 int bpsize, int apsize,
132 int ssize, int local);
133 long (*hpte_updatepp)(unsigned long slot,
134 unsigned long newpp,
135 unsigned long vpn,
136 int bpsize, int apsize,
137 int ssize, unsigned long flags);
138 void (*hpte_updateboltedpp)(unsigned long newpp,
139 unsigned long ea,
140 int psize, int ssize);
141 long (*hpte_insert)(unsigned long hpte_group,
142 unsigned long vpn,
143 unsigned long prpn,
144 unsigned long rflags,
145 unsigned long vflags,
146 int psize, int apsize,
147 int ssize);
148 long (*hpte_remove)(unsigned long hpte_group);
149 int (*hpte_removebolted)(unsigned long ea,
150 int psize, int ssize);
151 void (*flush_hash_range)(unsigned long number, int local);
152 void (*hugepage_invalidate)(unsigned long vsid,
153 unsigned long addr,
154 unsigned char *hpte_slot_array,
155 int psize, int ssize, int local);
156
157
158
159
160
161
162
163 void (*hpte_clear_all)(void);
164};
165extern struct mmu_hash_ops mmu_hash_ops;
166
167struct hash_pte {
168 __be64 v;
169 __be64 r;
170};
171
172extern struct hash_pte *htab_address;
173extern unsigned long htab_size_bytes;
174extern unsigned long htab_hash_mask;
175
176
177static inline int shift_to_mmu_psize(unsigned int shift)
178{
179 int psize;
180
181 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
182 if (mmu_psize_defs[psize].shift == shift)
183 return psize;
184 return -1;
185}
186
187static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
188{
189 if (mmu_psize_defs[mmu_psize].shift)
190 return mmu_psize_defs[mmu_psize].shift;
191 BUG();
192}
193
194static inline unsigned long get_sllp_encoding(int psize)
195{
196 unsigned long sllp;
197
198 sllp = ((mmu_psize_defs[psize].sllp & SLB_VSID_L) >> 6) |
199 ((mmu_psize_defs[psize].sllp & SLB_VSID_LP) >> 4);
200 return sllp;
201}
202
203#endif
204
205
206
207
208
209
210
211#define MMU_SEGSIZE_256M 0
212#define MMU_SEGSIZE_1T 1
213
214
215
216
217
218
219
220
221
222#define VPN_SHIFT 12
223
224
225
226
227#define LP_SHIFT 12
228#define LP_BITS 8
229#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
230
231#ifndef __ASSEMBLY__
232
233static inline int slb_vsid_shift(int ssize)
234{
235 if (ssize == MMU_SEGSIZE_256M)
236 return SLB_VSID_SHIFT;
237 return SLB_VSID_SHIFT_1T;
238}
239
240static inline int segment_shift(int ssize)
241{
242 if (ssize == MMU_SEGSIZE_256M)
243 return SID_SHIFT;
244 return SID_SHIFT_1T;
245}
246
247
248
249
250
251
252
253
254extern u8 hpte_page_sizes[1 << LP_BITS];
255
256static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l,
257 bool is_base_size)
258{
259 unsigned int i, lp;
260
261 if (!(h & HPTE_V_LARGE))
262 return 1ul << 12;
263
264
265 lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
266 i = hpte_page_sizes[lp];
267 if (!i)
268 return 0;
269 if (!is_base_size)
270 i >>= 4;
271 return 1ul << mmu_psize_defs[i & 0xf].shift;
272}
273
274static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
275{
276 return __hpte_page_size(h, l, 0);
277}
278
279static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l)
280{
281 return __hpte_page_size(h, l, 1);
282}
283
284
285
286
287extern int mmu_kernel_ssize;
288extern int mmu_highuser_ssize;
289extern u16 mmu_slb_size;
290extern unsigned long tce_alloc_start, tce_alloc_end;
291
292
293
294
295
296
297
298extern int mmu_ci_restrictions;
299
300
301
302
303
304
305static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
306 int ssize)
307{
308 unsigned long v;
309
310
311
312
313
314
315
316
317 v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm);
318 v <<= HPTE_V_AVPN_SHIFT;
319 if (!cpu_has_feature(CPU_FTR_ARCH_300))
320 v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
321 return v;
322}
323
324
325
326
327
328static inline unsigned long hpte_encode_v(unsigned long vpn, int base_psize,
329 int actual_psize, int ssize)
330{
331 unsigned long v;
332 v = hpte_encode_avpn(vpn, base_psize, ssize);
333 if (actual_psize != MMU_PAGE_4K)
334 v |= HPTE_V_LARGE;
335 return v;
336}
337
338
339
340
341
342
343static inline unsigned long hpte_encode_r(unsigned long pa, int base_psize,
344 int actual_psize, int ssize)
345{
346
347 if (cpu_has_feature(CPU_FTR_ARCH_300))
348 pa |= ((unsigned long) ssize) << HPTE_R_3_0_SSIZE_SHIFT;
349
350
351 if (actual_psize == MMU_PAGE_4K)
352 return pa & HPTE_R_RPN;
353 else {
354 unsigned int penc = mmu_psize_defs[base_psize].penc[actual_psize];
355 unsigned int shift = mmu_psize_defs[actual_psize].shift;
356 return (pa & ~((1ul << shift) - 1)) | (penc << LP_SHIFT);
357 }
358}
359
360
361
362
363static inline unsigned long hpt_vpn(unsigned long ea,
364 unsigned long vsid, int ssize)
365{
366 unsigned long mask;
367 int s_shift = segment_shift(ssize);
368
369 mask = (1ul << (s_shift - VPN_SHIFT)) - 1;
370 return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask);
371}
372
373
374
375
376static inline unsigned long hpt_hash(unsigned long vpn,
377 unsigned int shift, int ssize)
378{
379 int mask;
380 unsigned long hash, vsid;
381
382
383 if (ssize == MMU_SEGSIZE_256M) {
384 mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1;
385 hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^
386 ((vpn & mask) >> (shift - VPN_SHIFT));
387 } else {
388 mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1;
389 vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT);
390 hash = vsid ^ (vsid << 25) ^
391 ((vpn & mask) >> (shift - VPN_SHIFT)) ;
392 }
393 return hash & 0x7fffffffffUL;
394}
395
396#define HPTE_LOCAL_UPDATE 0x1
397#define HPTE_NOHPTE_UPDATE 0x2
398
399extern int __hash_page_4K(unsigned long ea, unsigned long access,
400 unsigned long vsid, pte_t *ptep, unsigned long trap,
401 unsigned long flags, int ssize, int subpage_prot);
402extern int __hash_page_64K(unsigned long ea, unsigned long access,
403 unsigned long vsid, pte_t *ptep, unsigned long trap,
404 unsigned long flags, int ssize);
405struct mm_struct;
406unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap);
407extern int hash_page_mm(struct mm_struct *mm, unsigned long ea,
408 unsigned long access, unsigned long trap,
409 unsigned long flags);
410extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
411 unsigned long dsisr);
412int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
413 pte_t *ptep, unsigned long trap, unsigned long flags,
414 int ssize, unsigned int shift, unsigned int mmu_psize);
415#ifdef CONFIG_TRANSPARENT_HUGEPAGE
416extern int __hash_page_thp(unsigned long ea, unsigned long access,
417 unsigned long vsid, pmd_t *pmdp, unsigned long trap,
418 unsigned long flags, int ssize, unsigned int psize);
419#else
420static inline int __hash_page_thp(unsigned long ea, unsigned long access,
421 unsigned long vsid, pmd_t *pmdp,
422 unsigned long trap, unsigned long flags,
423 int ssize, unsigned int psize)
424{
425 BUG();
426 return -1;
427}
428#endif
429extern void hash_failure_debug(unsigned long ea, unsigned long access,
430 unsigned long vsid, unsigned long trap,
431 int ssize, int psize, int lpsize,
432 unsigned long pte);
433extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
434 unsigned long pstart, unsigned long prot,
435 int psize, int ssize);
436int htab_remove_mapping(unsigned long vstart, unsigned long vend,
437 int psize, int ssize);
438extern void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages);
439extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
440
441#ifdef CONFIG_PPC_PSERIES
442void hpte_init_pseries(void);
443#else
444static inline void hpte_init_pseries(void) { }
445#endif
446
447extern void hpte_init_native(void);
448
449extern void slb_initialize(void);
450extern void slb_flush_and_rebolt(void);
451
452extern void slb_vmalloc_update(void);
453extern void slb_set_size(u16 size);
454#endif
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493#define CONTEXT_BITS 19
494#define ESID_BITS 18
495#define ESID_BITS_1T 6
496
497
498
499
500
501
502
503
504
505#define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 5)
506
507
508
509
510
511#define VSID_MULTIPLIER_256M ASM_CONST(12538073)
512#define VSID_BITS_256M (CONTEXT_BITS + ESID_BITS)
513#define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1)
514
515#define VSID_MULTIPLIER_1T ASM_CONST(12538073)
516#define VSID_BITS_1T (CONTEXT_BITS + ESID_BITS_1T)
517#define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1)
518
519
520#define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT))
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536#define ASM_VSID_SCRAMBLE(rt, rx, size) \
537 lis rx,VSID_MULTIPLIER_##size@h; \
538 ori rx,rx,VSID_MULTIPLIER_##size@l; \
539 mulld rt,rt,rx; \
540 \
541 srdi rx,rt,VSID_BITS_##size; \
542 clrldi rt,rt,(64-VSID_BITS_##size); \
543 add rt,rt,rx; \
544
545
546
547
548
549
550\
551 addi rx,rt,1; \
552 srdi rx,rx,VSID_BITS_##size; \
553 add rt,rt,rx
554
555
556#define SLICE_ARRAY_SIZE (H_PGTABLE_RANGE >> 41)
557
558#ifndef __ASSEMBLY__
559
560#ifdef CONFIG_PPC_SUBPAGE_PROT
561
562
563
564
565
566
567
568
569
570
571
572struct subpage_prot_table {
573 unsigned long maxaddr;
574 unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)];
575 unsigned int *low_prot[4];
576};
577
578#define SBP_L1_BITS (PAGE_SHIFT - 2)
579#define SBP_L2_BITS (PAGE_SHIFT - 3)
580#define SBP_L1_COUNT (1 << SBP_L1_BITS)
581#define SBP_L2_COUNT (1 << SBP_L2_BITS)
582#define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS)
583#define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS)
584
585extern void subpage_prot_free(struct mm_struct *mm);
586extern void subpage_prot_init_new_context(struct mm_struct *mm);
587#else
588static inline void subpage_prot_free(struct mm_struct *mm) {}
589static inline void subpage_prot_init_new_context(struct mm_struct *mm) { }
590#endif
591
592#if 0
593
594
595
596
597
598
599#define vsid_scramble(protovsid, size) \
600 ((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size))
601
602#else
603#define vsid_scramble(protovsid, size) \
604 ({ \
605 unsigned long x; \
606 x = (protovsid) * VSID_MULTIPLIER_##size; \
607 x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \
608 (x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \
609 })
610#endif
611
612
613static inline int user_segment_size(unsigned long addr)
614{
615
616 if (addr >= (1UL << SID_SHIFT_1T))
617 return mmu_highuser_ssize;
618 return MMU_SEGSIZE_256M;
619}
620
621static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
622 int ssize)
623{
624
625
626
627 if ((ea & ~REGION_MASK) >= H_PGTABLE_RANGE)
628 return 0;
629
630 if (ssize == MMU_SEGSIZE_256M)
631 return vsid_scramble((context << ESID_BITS)
632 | (ea >> SID_SHIFT), 256M);
633 return vsid_scramble((context << ESID_BITS_1T)
634 | (ea >> SID_SHIFT_1T), 1T);
635}
636
637
638
639
640
641
642
643
644
645
646static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
647{
648 unsigned long context;
649
650
651
652
653 context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1;
654 return get_vsid(context, ea, ssize);
655}
656
657unsigned htab_shift_for_mem_size(unsigned long mem_size);
658
659#endif
660
661#endif
662