1#ifndef _ASM_POWERPC_MMU_HASH64_H_
2#define _ASM_POWERPC_MMU_HASH64_H_
3
4
5
6
7
8
9
10
11
12
13
14
15#include <asm/asm-compat.h>
16#include <asm/page.h>
17
18
19
20
21
22
23#include <asm/pgtable-ppc64.h>
24#include <asm/bug.h>
25#include <asm/processor.h>
26
27
28
29
30
31#define SLB_NUM_BOLTED 3
32#define SLB_CACHE_ENTRIES 8
33#define SLB_MIN_SIZE 32
34
35
36#define SLB_ESID_V ASM_CONST(0x0000000008000000)
37
38
39#define SLB_VSID_SHIFT 12
40#define SLB_VSID_SHIFT_1T 24
41#define SLB_VSID_SSIZE_SHIFT 62
42#define SLB_VSID_B ASM_CONST(0xc000000000000000)
43#define SLB_VSID_B_256M ASM_CONST(0x0000000000000000)
44#define SLB_VSID_B_1T ASM_CONST(0x4000000000000000)
45#define SLB_VSID_KS ASM_CONST(0x0000000000000800)
46#define SLB_VSID_KP ASM_CONST(0x0000000000000400)
47#define SLB_VSID_N ASM_CONST(0x0000000000000200)
48#define SLB_VSID_L ASM_CONST(0x0000000000000100)
49#define SLB_VSID_C ASM_CONST(0x0000000000000080)
50#define SLB_VSID_LP ASM_CONST(0x0000000000000030)
51#define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000)
52#define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010)
53#define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020)
54#define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030)
55#define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP)
56
57#define SLB_VSID_KERNEL (SLB_VSID_KP)
58#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
59
60#define SLBIE_C (0x08000000)
61#define SLBIE_SSIZE_SHIFT 25
62
63
64
65
66
67#define HPTES_PER_GROUP 8
68
69#define HPTE_V_SSIZE_SHIFT 62
70#define HPTE_V_AVPN_SHIFT 7
71#define HPTE_V_AVPN ASM_CONST(0x3fffffffffffff80)
72#define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
73#define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & 0xffffffffffffff80UL))
74#define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
75#define HPTE_V_LOCK ASM_CONST(0x0000000000000008)
76#define HPTE_V_LARGE ASM_CONST(0x0000000000000004)
77#define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002)
78#define HPTE_V_VALID ASM_CONST(0x0000000000000001)
79
80#define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
81#define HPTE_R_TS ASM_CONST(0x4000000000000000)
82#define HPTE_R_KEY_HI ASM_CONST(0x3000000000000000)
83#define HPTE_R_RPN_SHIFT 12
84#define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000)
85#define HPTE_R_PP ASM_CONST(0x0000000000000003)
86#define HPTE_R_N ASM_CONST(0x0000000000000004)
87#define HPTE_R_G ASM_CONST(0x0000000000000008)
88#define HPTE_R_M ASM_CONST(0x0000000000000010)
89#define HPTE_R_I ASM_CONST(0x0000000000000020)
90#define HPTE_R_W ASM_CONST(0x0000000000000040)
91#define HPTE_R_WIMG ASM_CONST(0x0000000000000078)
92#define HPTE_R_C ASM_CONST(0x0000000000000080)
93#define HPTE_R_R ASM_CONST(0x0000000000000100)
94#define HPTE_R_KEY_LO ASM_CONST(0x0000000000000e00)
95
96#define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000)
97#define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000)
98
99
100#define PP_RWXX 0
101#define PP_RWRX 1
102#define PP_RWRW 2
103#define PP_RXRX 3
104#define PP_RXXX (HPTE_R_PP0 | 2)
105
106
107#define TLBIEL_INVAL_SEL_MASK 0xc00
108#define TLBIEL_INVAL_PAGE 0x000
109#define TLBIEL_INVAL_SET_LPID 0x800
110#define TLBIEL_INVAL_SET 0xc00
111#define TLBIEL_INVAL_SET_MASK 0xfff000
112#define TLBIEL_INVAL_SET_SHIFT 12
113
114#define POWER7_TLB_SETS 128
115#define POWER8_TLB_SETS 512
116
117#ifndef __ASSEMBLY__
118
119struct hash_pte {
120 __be64 v;
121 __be64 r;
122};
123
124extern struct hash_pte *htab_address;
125extern unsigned long htab_size_bytes;
126extern unsigned long htab_hash_mask;
127
128
129
130
131
132
133
134
135
136
137struct mmu_psize_def
138{
139 unsigned int shift;
140 int penc[MMU_PAGE_COUNT];
141 unsigned int tlbiel;
142 unsigned long avpnm;
143 unsigned long sllp;
144};
145extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
146
147static inline int shift_to_mmu_psize(unsigned int shift)
148{
149 int psize;
150
151 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
152 if (mmu_psize_defs[psize].shift == shift)
153 return psize;
154 return -1;
155}
156
157static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
158{
159 if (mmu_psize_defs[mmu_psize].shift)
160 return mmu_psize_defs[mmu_psize].shift;
161 BUG();
162}
163
164#endif
165
166
167
168
169
170
171
172#define MMU_SEGSIZE_256M 0
173#define MMU_SEGSIZE_1T 1
174
175
176
177
178
179
180
181
182
183#define VPN_SHIFT 12
184
185
186
187
188#define LP_SHIFT 12
189#define LP_BITS 8
190#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
191
192#ifndef __ASSEMBLY__
193
194static inline int slb_vsid_shift(int ssize)
195{
196 if (ssize == MMU_SEGSIZE_256M)
197 return SLB_VSID_SHIFT;
198 return SLB_VSID_SHIFT_1T;
199}
200
201static inline int segment_shift(int ssize)
202{
203 if (ssize == MMU_SEGSIZE_256M)
204 return SID_SHIFT;
205 return SID_SHIFT_1T;
206}
207
208
209
210
211extern int mmu_linear_psize;
212extern int mmu_virtual_psize;
213extern int mmu_vmalloc_psize;
214extern int mmu_vmemmap_psize;
215extern int mmu_io_psize;
216extern int mmu_kernel_ssize;
217extern int mmu_highuser_ssize;
218extern u16 mmu_slb_size;
219extern unsigned long tce_alloc_start, tce_alloc_end;
220
221
222
223
224
225
226
227extern int mmu_ci_restrictions;
228
229
230
231
232
233
234static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
235 int ssize)
236{
237 unsigned long v;
238
239
240
241
242
243
244
245
246 v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm);
247 v <<= HPTE_V_AVPN_SHIFT;
248 v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
249 return v;
250}
251
252
253
254
255
256static inline unsigned long hpte_encode_v(unsigned long vpn, int base_psize,
257 int actual_psize, int ssize)
258{
259 unsigned long v;
260 v = hpte_encode_avpn(vpn, base_psize, ssize);
261 if (actual_psize != MMU_PAGE_4K)
262 v |= HPTE_V_LARGE;
263 return v;
264}
265
266
267
268
269
270
271static inline unsigned long hpte_encode_r(unsigned long pa, int base_psize,
272 int actual_psize)
273{
274
275 if (actual_psize == MMU_PAGE_4K)
276 return pa & HPTE_R_RPN;
277 else {
278 unsigned int penc = mmu_psize_defs[base_psize].penc[actual_psize];
279 unsigned int shift = mmu_psize_defs[actual_psize].shift;
280 return (pa & ~((1ul << shift) - 1)) | (penc << LP_SHIFT);
281 }
282}
283
284
285
286
287static inline unsigned long hpt_vpn(unsigned long ea,
288 unsigned long vsid, int ssize)
289{
290 unsigned long mask;
291 int s_shift = segment_shift(ssize);
292
293 mask = (1ul << (s_shift - VPN_SHIFT)) - 1;
294 return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask);
295}
296
297
298
299
300static inline unsigned long hpt_hash(unsigned long vpn,
301 unsigned int shift, int ssize)
302{
303 int mask;
304 unsigned long hash, vsid;
305
306
307 if (ssize == MMU_SEGSIZE_256M) {
308 mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1;
309 hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^
310 ((vpn & mask) >> (shift - VPN_SHIFT));
311 } else {
312 mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1;
313 vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT);
314 hash = vsid ^ (vsid << 25) ^
315 ((vpn & mask) >> (shift - VPN_SHIFT)) ;
316 }
317 return hash & 0x7fffffffffUL;
318}
319
320#define HPTE_LOCAL_UPDATE 0x1
321#define HPTE_NOHPTE_UPDATE 0x2
322
323extern int __hash_page_4K(unsigned long ea, unsigned long access,
324 unsigned long vsid, pte_t *ptep, unsigned long trap,
325 unsigned long flags, int ssize, int subpage_prot);
326extern int __hash_page_64K(unsigned long ea, unsigned long access,
327 unsigned long vsid, pte_t *ptep, unsigned long trap,
328 unsigned long flags, int ssize);
329struct mm_struct;
330unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap);
331extern int hash_page_mm(struct mm_struct *mm, unsigned long ea,
332 unsigned long access, unsigned long trap,
333 unsigned long flags);
334extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
335 unsigned long dsisr);
336int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
337 pte_t *ptep, unsigned long trap, unsigned long flags,
338 int ssize, unsigned int shift, unsigned int mmu_psize);
339#ifdef CONFIG_TRANSPARENT_HUGEPAGE
340extern int __hash_page_thp(unsigned long ea, unsigned long access,
341 unsigned long vsid, pmd_t *pmdp, unsigned long trap,
342 unsigned long flags, int ssize, unsigned int psize);
343#else
344static inline int __hash_page_thp(unsigned long ea, unsigned long access,
345 unsigned long vsid, pmd_t *pmdp,
346 unsigned long trap, unsigned long flags,
347 int ssize, unsigned int psize)
348{
349 BUG();
350 return -1;
351}
352#endif
353extern void hash_failure_debug(unsigned long ea, unsigned long access,
354 unsigned long vsid, unsigned long trap,
355 int ssize, int psize, int lpsize,
356 unsigned long pte);
357extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
358 unsigned long pstart, unsigned long prot,
359 int psize, int ssize);
360int htab_remove_mapping(unsigned long vstart, unsigned long vend,
361 int psize, int ssize);
362extern void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages);
363extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
364
365extern void hpte_init_native(void);
366extern void hpte_init_lpar(void);
367extern void hpte_init_beat(void);
368extern void hpte_init_beat_v3(void);
369
370extern void slb_initialize(void);
371extern void slb_flush_and_rebolt(void);
372
373extern void slb_vmalloc_update(void);
374extern void slb_set_size(u16 size);
375#endif
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414#define CONTEXT_BITS 19
415#define ESID_BITS 18
416#define ESID_BITS_1T 6
417
418
419
420
421
422
423
424
425
426#define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 5)
427
428
429
430
431
432#define VSID_MULTIPLIER_256M ASM_CONST(12538073)
433#define VSID_BITS_256M (CONTEXT_BITS + ESID_BITS)
434#define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1)
435
436#define VSID_MULTIPLIER_1T ASM_CONST(12538073)
437#define VSID_BITS_1T (CONTEXT_BITS + ESID_BITS_1T)
438#define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1)
439
440
441#define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT))
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457#define ASM_VSID_SCRAMBLE(rt, rx, size) \
458 lis rx,VSID_MULTIPLIER_##size@h; \
459 ori rx,rx,VSID_MULTIPLIER_##size@l; \
460 mulld rt,rt,rx; \
461 \
462 srdi rx,rt,VSID_BITS_##size; \
463 clrldi rt,rt,(64-VSID_BITS_##size); \
464 add rt,rt,rx; \
465
466
467
468
469
470
471\
472 addi rx,rt,1; \
473 srdi rx,rx,VSID_BITS_##size; \
474 add rt,rt,rx
475
476
477#define SLICE_ARRAY_SIZE (PGTABLE_RANGE >> 41)
478
479#ifndef __ASSEMBLY__
480
481#ifdef CONFIG_PPC_SUBPAGE_PROT
482
483
484
485
486
487
488
489
490
491
492
493struct subpage_prot_table {
494 unsigned long maxaddr;
495 unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)];
496 unsigned int *low_prot[4];
497};
498
499#define SBP_L1_BITS (PAGE_SHIFT - 2)
500#define SBP_L2_BITS (PAGE_SHIFT - 3)
501#define SBP_L1_COUNT (1 << SBP_L1_BITS)
502#define SBP_L2_COUNT (1 << SBP_L2_BITS)
503#define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS)
504#define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS)
505
506extern void subpage_prot_free(struct mm_struct *mm);
507extern void subpage_prot_init_new_context(struct mm_struct *mm);
508#else
509static inline void subpage_prot_free(struct mm_struct *mm) {}
510static inline void subpage_prot_init_new_context(struct mm_struct *mm) { }
511#endif
512
513typedef unsigned long mm_context_id_t;
514struct spinlock;
515
516typedef struct {
517 mm_context_id_t id;
518 u16 user_psize;
519
520#ifdef CONFIG_PPC_MM_SLICES
521 u64 low_slices_psize;
522 unsigned char high_slices_psize[SLICE_ARRAY_SIZE];
523#else
524 u16 sllp;
525#endif
526 unsigned long vdso_base;
527#ifdef CONFIG_PPC_SUBPAGE_PROT
528 struct subpage_prot_table spt;
529#endif
530#ifdef CONFIG_PPC_ICSWX
531 struct spinlock *cop_lockp;
532 unsigned long acop;
533 unsigned int cop_pid;
534#endif
535#ifdef CONFIG_PPC_64K_PAGES
536
537 void *pte_frag;
538#endif
539#ifdef CONFIG_SPAPR_TCE_IOMMU
540 struct list_head iommu_group_mem_list;
541#endif
542} mm_context_t;
543
544
545#if 0
546
547
548
549
550
551
552#define vsid_scramble(protovsid, size) \
553 ((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size))
554
555#else
556#define vsid_scramble(protovsid, size) \
557 ({ \
558 unsigned long x; \
559 x = (protovsid) * VSID_MULTIPLIER_##size; \
560 x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \
561 (x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \
562 })
563#endif
564
565
566static inline int user_segment_size(unsigned long addr)
567{
568
569 if (addr >= (1UL << SID_SHIFT_1T))
570 return mmu_highuser_ssize;
571 return MMU_SEGSIZE_256M;
572}
573
574static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
575 int ssize)
576{
577
578
579
580 if ((ea & ~REGION_MASK) >= PGTABLE_RANGE)
581 return 0;
582
583 if (ssize == MMU_SEGSIZE_256M)
584 return vsid_scramble((context << ESID_BITS)
585 | (ea >> SID_SHIFT), 256M);
586 return vsid_scramble((context << ESID_BITS_1T)
587 | (ea >> SID_SHIFT_1T), 1T);
588}
589
590
591
592
593
594
595
596
597
598
599static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
600{
601 unsigned long context;
602
603
604
605
606 context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1;
607 return get_vsid(context, ea, ssize);
608}
609#endif
610
611#endif
612