1#ifndef _ASM_POWERPC_MMU_HASH64_H_
2#define _ASM_POWERPC_MMU_HASH64_H_
3
4
5
6
7
8
9
10
11
12
13
14
15#include <asm/asm-compat.h>
16#include <asm/page.h>
17#include <asm/bug.h>
18
19
20
21
22
23
24#include <asm/book3s/64/pgtable.h>
25#include <asm/bug.h>
26#include <asm/processor.h>
27
28
29
30
31
32#define SLB_NUM_BOLTED 3
33#define SLB_CACHE_ENTRIES 8
34#define SLB_MIN_SIZE 32
35
36
37#define SLB_ESID_V ASM_CONST(0x0000000008000000)
38
39
40#define SLB_VSID_SHIFT 12
41#define SLB_VSID_SHIFT_1T 24
42#define SLB_VSID_SSIZE_SHIFT 62
43#define SLB_VSID_B ASM_CONST(0xc000000000000000)
44#define SLB_VSID_B_256M ASM_CONST(0x0000000000000000)
45#define SLB_VSID_B_1T ASM_CONST(0x4000000000000000)
46#define SLB_VSID_KS ASM_CONST(0x0000000000000800)
47#define SLB_VSID_KP ASM_CONST(0x0000000000000400)
48#define SLB_VSID_N ASM_CONST(0x0000000000000200)
49#define SLB_VSID_L ASM_CONST(0x0000000000000100)
50#define SLB_VSID_C ASM_CONST(0x0000000000000080)
51#define SLB_VSID_LP ASM_CONST(0x0000000000000030)
52#define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000)
53#define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010)
54#define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020)
55#define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030)
56#define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP)
57
58#define SLB_VSID_KERNEL (SLB_VSID_KP)
59#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
60
61#define SLBIE_C (0x08000000)
62#define SLBIE_SSIZE_SHIFT 25
63
64
65
66
67
68#define HPTES_PER_GROUP 8
69
70#define HPTE_V_SSIZE_SHIFT 62
71#define HPTE_V_AVPN_SHIFT 7
72#define HPTE_V_AVPN ASM_CONST(0x3fffffffffffff80)
73#define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
74#define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & 0xffffffffffffff80UL))
75#define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
76#define HPTE_V_LOCK ASM_CONST(0x0000000000000008)
77#define HPTE_V_LARGE ASM_CONST(0x0000000000000004)
78#define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002)
79#define HPTE_V_VALID ASM_CONST(0x0000000000000001)
80
81#define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
82#define HPTE_R_TS ASM_CONST(0x4000000000000000)
83#define HPTE_R_KEY_HI ASM_CONST(0x3000000000000000)
84#define HPTE_R_RPN_SHIFT 12
85#define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000)
86#define HPTE_R_PP ASM_CONST(0x0000000000000003)
87#define HPTE_R_N ASM_CONST(0x0000000000000004)
88#define HPTE_R_G ASM_CONST(0x0000000000000008)
89#define HPTE_R_M ASM_CONST(0x0000000000000010)
90#define HPTE_R_I ASM_CONST(0x0000000000000020)
91#define HPTE_R_W ASM_CONST(0x0000000000000040)
92#define HPTE_R_WIMG ASM_CONST(0x0000000000000078)
93#define HPTE_R_C ASM_CONST(0x0000000000000080)
94#define HPTE_R_R ASM_CONST(0x0000000000000100)
95#define HPTE_R_KEY_LO ASM_CONST(0x0000000000000e00)
96
97#define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000)
98#define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000)
99
100
101#define PP_RWXX 0
102#define PP_RWRX 1
103#define PP_RWRW 2
104#define PP_RXRX 3
105#define PP_RXXX (HPTE_R_PP0 | 2)
106
107
108#define TLBIEL_INVAL_SEL_MASK 0xc00
109#define TLBIEL_INVAL_PAGE 0x000
110#define TLBIEL_INVAL_SET_LPID 0x800
111#define TLBIEL_INVAL_SET 0xc00
112#define TLBIEL_INVAL_SET_MASK 0xfff000
113#define TLBIEL_INVAL_SET_SHIFT 12
114
115#define POWER7_TLB_SETS 128
116#define POWER8_TLB_SETS 512
117#define POWER9_TLB_SETS_HASH 256
118
119#ifndef __ASSEMBLY__
120
121struct hash_pte {
122 __be64 v;
123 __be64 r;
124};
125
126extern struct hash_pte *htab_address;
127extern unsigned long htab_size_bytes;
128extern unsigned long htab_hash_mask;
129
130
131
132
133
134
135
136
137
138
139struct mmu_psize_def
140{
141 unsigned int shift;
142 int penc[MMU_PAGE_COUNT];
143 unsigned int tlbiel;
144 unsigned long avpnm;
145 unsigned long sllp;
146};
147extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
148
149static inline int shift_to_mmu_psize(unsigned int shift)
150{
151 int psize;
152
153 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
154 if (mmu_psize_defs[psize].shift == shift)
155 return psize;
156 return -1;
157}
158
159static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
160{
161 if (mmu_psize_defs[mmu_psize].shift)
162 return mmu_psize_defs[mmu_psize].shift;
163 BUG();
164}
165
166#endif
167
168
169
170
171
172
173
174#define MMU_SEGSIZE_256M 0
175#define MMU_SEGSIZE_1T 1
176
177
178
179
180
181
182
183
184
185#define VPN_SHIFT 12
186
187
188
189
190#define LP_SHIFT 12
191#define LP_BITS 8
192#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
193
194#ifndef __ASSEMBLY__
195
196static inline int slb_vsid_shift(int ssize)
197{
198 if (ssize == MMU_SEGSIZE_256M)
199 return SLB_VSID_SHIFT;
200 return SLB_VSID_SHIFT_1T;
201}
202
203static inline int segment_shift(int ssize)
204{
205 if (ssize == MMU_SEGSIZE_256M)
206 return SID_SHIFT;
207 return SID_SHIFT_1T;
208}
209
210
211
212
213extern int mmu_linear_psize;
214extern int mmu_virtual_psize;
215extern int mmu_vmalloc_psize;
216extern int mmu_vmemmap_psize;
217extern int mmu_io_psize;
218extern int mmu_kernel_ssize;
219extern int mmu_highuser_ssize;
220extern u16 mmu_slb_size;
221extern unsigned long tce_alloc_start, tce_alloc_end;
222
223
224
225
226
227
228
229extern int mmu_ci_restrictions;
230
231
232
233
234
235
236static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
237 int ssize)
238{
239 unsigned long v;
240
241
242
243
244
245
246
247
248 v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm);
249 v <<= HPTE_V_AVPN_SHIFT;
250 v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
251 return v;
252}
253
254
255
256
257
258static inline unsigned long hpte_encode_v(unsigned long vpn, int base_psize,
259 int actual_psize, int ssize)
260{
261 unsigned long v;
262 v = hpte_encode_avpn(vpn, base_psize, ssize);
263 if (actual_psize != MMU_PAGE_4K)
264 v |= HPTE_V_LARGE;
265 return v;
266}
267
268
269
270
271
272
273static inline unsigned long hpte_encode_r(unsigned long pa, int base_psize,
274 int actual_psize)
275{
276
277 if (actual_psize == MMU_PAGE_4K)
278 return pa & HPTE_R_RPN;
279 else {
280 unsigned int penc = mmu_psize_defs[base_psize].penc[actual_psize];
281 unsigned int shift = mmu_psize_defs[actual_psize].shift;
282 return (pa & ~((1ul << shift) - 1)) | (penc << LP_SHIFT);
283 }
284}
285
286
287
288
289static inline unsigned long hpt_vpn(unsigned long ea,
290 unsigned long vsid, int ssize)
291{
292 unsigned long mask;
293 int s_shift = segment_shift(ssize);
294
295 mask = (1ul << (s_shift - VPN_SHIFT)) - 1;
296 return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask);
297}
298
299
300
301
302static inline unsigned long hpt_hash(unsigned long vpn,
303 unsigned int shift, int ssize)
304{
305 int mask;
306 unsigned long hash, vsid;
307
308
309 if (ssize == MMU_SEGSIZE_256M) {
310 mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1;
311 hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^
312 ((vpn & mask) >> (shift - VPN_SHIFT));
313 } else {
314 mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1;
315 vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT);
316 hash = vsid ^ (vsid << 25) ^
317 ((vpn & mask) >> (shift - VPN_SHIFT)) ;
318 }
319 return hash & 0x7fffffffffUL;
320}
321
322#define HPTE_LOCAL_UPDATE 0x1
323#define HPTE_NOHPTE_UPDATE 0x2
324
325extern int __hash_page_4K(unsigned long ea, unsigned long access,
326 unsigned long vsid, pte_t *ptep, unsigned long trap,
327 unsigned long flags, int ssize, int subpage_prot);
328extern int __hash_page_64K(unsigned long ea, unsigned long access,
329 unsigned long vsid, pte_t *ptep, unsigned long trap,
330 unsigned long flags, int ssize);
331struct mm_struct;
332unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap);
333extern int hash_page_mm(struct mm_struct *mm, unsigned long ea,
334 unsigned long access, unsigned long trap,
335 unsigned long flags);
336extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
337 unsigned long dsisr);
338int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
339 pte_t *ptep, unsigned long trap, unsigned long flags,
340 int ssize, unsigned int shift, unsigned int mmu_psize);
341#ifdef CONFIG_TRANSPARENT_HUGEPAGE
342extern int __hash_page_thp(unsigned long ea, unsigned long access,
343 unsigned long vsid, pmd_t *pmdp, unsigned long trap,
344 unsigned long flags, int ssize, unsigned int psize);
345#else
346static inline int __hash_page_thp(unsigned long ea, unsigned long access,
347 unsigned long vsid, pmd_t *pmdp,
348 unsigned long trap, unsigned long flags,
349 int ssize, unsigned int psize)
350{
351 BUG();
352 return -1;
353}
354#endif
355extern void hash_failure_debug(unsigned long ea, unsigned long access,
356 unsigned long vsid, unsigned long trap,
357 int ssize, int psize, int lpsize,
358 unsigned long pte);
359extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
360 unsigned long pstart, unsigned long prot,
361 int psize, int ssize);
362int htab_remove_mapping(unsigned long vstart, unsigned long vend,
363 int psize, int ssize);
364extern void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages);
365extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
366
367extern void hpte_init_native(void);
368extern void hpte_init_lpar(void);
369extern void hpte_init_beat(void);
370extern void hpte_init_beat_v3(void);
371
372extern void slb_initialize(void);
373extern void slb_flush_and_rebolt(void);
374
375extern void slb_vmalloc_update(void);
376extern void slb_set_size(u16 size);
377#endif
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416#define CONTEXT_BITS 19
417#define ESID_BITS 18
418#define ESID_BITS_1T 6
419
420
421
422
423
424
425
426
427
428#define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 5)
429
430
431
432
433
434#define VSID_MULTIPLIER_256M ASM_CONST(12538073)
435#define VSID_BITS_256M (CONTEXT_BITS + ESID_BITS)
436#define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1)
437
438#define VSID_MULTIPLIER_1T ASM_CONST(12538073)
439#define VSID_BITS_1T (CONTEXT_BITS + ESID_BITS_1T)
440#define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1)
441
442
443#define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT))
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459#define ASM_VSID_SCRAMBLE(rt, rx, size) \
460 lis rx,VSID_MULTIPLIER_##size@h; \
461 ori rx,rx,VSID_MULTIPLIER_##size@l; \
462 mulld rt,rt,rx; \
463 \
464 srdi rx,rt,VSID_BITS_##size; \
465 clrldi rt,rt,(64-VSID_BITS_##size); \
466 add rt,rt,rx; \
467
468
469
470
471
472
473\
474 addi rx,rt,1; \
475 srdi rx,rx,VSID_BITS_##size; \
476 add rt,rt,rx
477
478
479#define SLICE_ARRAY_SIZE (PGTABLE_RANGE >> 41)
480
481#ifndef __ASSEMBLY__
482
483#ifdef CONFIG_PPC_SUBPAGE_PROT
484
485
486
487
488
489
490
491
492
493
494
495struct subpage_prot_table {
496 unsigned long maxaddr;
497 unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)];
498 unsigned int *low_prot[4];
499};
500
501#define SBP_L1_BITS (PAGE_SHIFT - 2)
502#define SBP_L2_BITS (PAGE_SHIFT - 3)
503#define SBP_L1_COUNT (1 << SBP_L1_BITS)
504#define SBP_L2_COUNT (1 << SBP_L2_BITS)
505#define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS)
506#define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS)
507
508extern void subpage_prot_free(struct mm_struct *mm);
509extern void subpage_prot_init_new_context(struct mm_struct *mm);
510#else
511static inline void subpage_prot_free(struct mm_struct *mm) {}
512static inline void subpage_prot_init_new_context(struct mm_struct *mm) { }
513#endif
514
515typedef unsigned long mm_context_id_t;
516struct spinlock;
517
518typedef struct {
519 mm_context_id_t id;
520 u16 user_psize;
521
522#ifdef CONFIG_PPC_MM_SLICES
523 u64 low_slices_psize;
524 unsigned char high_slices_psize[SLICE_ARRAY_SIZE];
525#else
526 u16 sllp;
527#endif
528 unsigned long vdso_base;
529#ifdef CONFIG_PPC_SUBPAGE_PROT
530 struct subpage_prot_table spt;
531#endif
532#ifdef CONFIG_PPC_ICSWX
533 struct spinlock *cop_lockp;
534 unsigned long acop;
535 unsigned int cop_pid;
536#endif
537#ifdef CONFIG_PPC_64K_PAGES
538
539 void *pte_frag;
540#endif
541#ifdef CONFIG_SPAPR_TCE_IOMMU
542 struct list_head iommu_group_mem_list;
543#endif
544} mm_context_t;
545
546
547#if 0
548
549
550
551
552
553
554#define vsid_scramble(protovsid, size) \
555 ((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size))
556
557#else
558#define vsid_scramble(protovsid, size) \
559 ({ \
560 unsigned long x; \
561 x = (protovsid) * VSID_MULTIPLIER_##size; \
562 x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \
563 (x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \
564 })
565#endif
566
567
568static inline int user_segment_size(unsigned long addr)
569{
570
571 if (addr >= (1UL << SID_SHIFT_1T))
572 return mmu_highuser_ssize;
573 return MMU_SEGSIZE_256M;
574}
575
576static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
577 int ssize)
578{
579
580
581
582 if ((ea & ~REGION_MASK) >= PGTABLE_RANGE)
583 return 0;
584
585 if (ssize == MMU_SEGSIZE_256M)
586 return vsid_scramble((context << ESID_BITS)
587 | (ea >> SID_SHIFT), 256M);
588 return vsid_scramble((context << ESID_BITS_1T)
589 | (ea >> SID_SHIFT_1T), 1T);
590}
591
592
593
594
595
596
597
598
599
600
601static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
602{
603 unsigned long context;
604
605
606
607
608 context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1;
609 return get_vsid(context, ea, ssize);
610}
611
612unsigned htab_shift_for_mem_size(unsigned long mem_size);
613
614#endif
615
616#endif
617