1#ifndef _ASM_POWERPC_MMU_HASH64_H_
2#define _ASM_POWERPC_MMU_HASH64_H_
3
4
5
6
7
8
9
10
11
12
13
14
15#include <asm/asm-compat.h>
16#include <asm/page.h>
17
18
19
20
21
22#define STE_ESID_V 0x80
23#define STE_ESID_KS 0x20
24#define STE_ESID_KP 0x10
25#define STE_ESID_N 0x08
26
27#define STE_VSID_SHIFT 12
28
29
30#define STAB0_PAGE 0x6
31#define STAB0_OFFSET (STAB0_PAGE << 12)
32#define STAB0_PHYS_ADDR (STAB0_OFFSET + PHYSICAL_START)
33
34#ifndef __ASSEMBLY__
35extern char initial_stab[];
36#endif
37
38
39
40
41
42#define SLB_NUM_BOLTED 3
43#define SLB_CACHE_ENTRIES 8
44#define SLB_MIN_SIZE 32
45
46
47#define SLB_ESID_V ASM_CONST(0x0000000008000000)
48
49
50#define SLB_VSID_SHIFT 12
51#define SLB_VSID_SHIFT_1T 24
52#define SLB_VSID_SSIZE_SHIFT 62
53#define SLB_VSID_B ASM_CONST(0xc000000000000000)
54#define SLB_VSID_B_256M ASM_CONST(0x0000000000000000)
55#define SLB_VSID_B_1T ASM_CONST(0x4000000000000000)
56#define SLB_VSID_KS ASM_CONST(0x0000000000000800)
57#define SLB_VSID_KP ASM_CONST(0x0000000000000400)
58#define SLB_VSID_N ASM_CONST(0x0000000000000200)
59#define SLB_VSID_L ASM_CONST(0x0000000000000100)
60#define SLB_VSID_C ASM_CONST(0x0000000000000080)
61#define SLB_VSID_LP ASM_CONST(0x0000000000000030)
62#define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000)
63#define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010)
64#define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020)
65#define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030)
66#define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP)
67
68#define SLB_VSID_KERNEL (SLB_VSID_KP)
69#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
70
71#define SLBIE_C (0x08000000)
72#define SLBIE_SSIZE_SHIFT 25
73
74
75
76
77
78#define HPTES_PER_GROUP 8
79
80#define HPTE_V_SSIZE_SHIFT 62
81#define HPTE_V_AVPN_SHIFT 7
82#define HPTE_V_AVPN ASM_CONST(0x3fffffffffffff80)
83#define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
84#define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & 0xffffffffffffff80UL))
85#define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
86#define HPTE_V_LOCK ASM_CONST(0x0000000000000008)
87#define HPTE_V_LARGE ASM_CONST(0x0000000000000004)
88#define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002)
89#define HPTE_V_VALID ASM_CONST(0x0000000000000001)
90
91#define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
92#define HPTE_R_TS ASM_CONST(0x4000000000000000)
93#define HPTE_R_RPN_SHIFT 12
94#define HPTE_R_RPN ASM_CONST(0x3ffffffffffff000)
95#define HPTE_R_FLAGS ASM_CONST(0x00000000000003ff)
96#define HPTE_R_PP ASM_CONST(0x0000000000000003)
97#define HPTE_R_N ASM_CONST(0x0000000000000004)
98#define HPTE_R_C ASM_CONST(0x0000000000000080)
99#define HPTE_R_R ASM_CONST(0x0000000000000100)
100
101#define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000)
102#define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000)
103
104
105
106#define PP_RWXX 0
107#define PP_RWRX 1
108#define PP_RWRW 2
109#define PP_RXRX 3
110
111#ifndef __ASSEMBLY__
112
113struct hash_pte {
114 unsigned long v;
115 unsigned long r;
116};
117
118extern struct hash_pte *htab_address;
119extern unsigned long htab_size_bytes;
120extern unsigned long htab_hash_mask;
121
122
123
124
125
126
127
128
129
130
131struct mmu_psize_def
132{
133 unsigned int shift;
134 unsigned int penc;
135 unsigned int tlbiel;
136 unsigned long avpnm;
137 unsigned long sllp;
138};
139
140#endif
141
142
143
144
145
146
147
148#define MMU_SEGSIZE_256M 0
149#define MMU_SEGSIZE_1T 1
150
151
152#ifndef __ASSEMBLY__
153
154
155
156
157extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
158extern int mmu_linear_psize;
159extern int mmu_virtual_psize;
160extern int mmu_vmalloc_psize;
161extern int mmu_vmemmap_psize;
162extern int mmu_io_psize;
163extern int mmu_kernel_ssize;
164extern int mmu_highuser_ssize;
165extern u16 mmu_slb_size;
166extern unsigned long tce_alloc_start, tce_alloc_end;
167
168
169
170
171
172
173
174extern int mmu_ci_restrictions;
175
176#ifdef CONFIG_HUGETLB_PAGE
177
178
179
180extern unsigned int mmu_huge_psizes[MMU_PAGE_COUNT];
181
182#endif
183
184
185
186
187
188static inline unsigned long hpte_encode_v(unsigned long va, int psize,
189 int ssize)
190{
191 unsigned long v;
192 v = (va >> 23) & ~(mmu_psize_defs[psize].avpnm);
193 v <<= HPTE_V_AVPN_SHIFT;
194 if (psize != MMU_PAGE_4K)
195 v |= HPTE_V_LARGE;
196 v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
197 return v;
198}
199
200
201
202
203
204
205static inline unsigned long hpte_encode_r(unsigned long pa, int psize)
206{
207 unsigned long r;
208
209
210 if (psize == MMU_PAGE_4K)
211 return pa & HPTE_R_RPN;
212 else {
213 unsigned int penc = mmu_psize_defs[psize].penc;
214 unsigned int shift = mmu_psize_defs[psize].shift;
215 return (pa & ~((1ul << shift) - 1)) | (penc << 12);
216 }
217 return r;
218}
219
220
221
222
223static inline unsigned long hpt_va(unsigned long ea, unsigned long vsid,
224 int ssize)
225{
226 if (ssize == MMU_SEGSIZE_256M)
227 return (vsid << 28) | (ea & 0xfffffffUL);
228 return (vsid << 40) | (ea & 0xffffffffffUL);
229}
230
231
232
233
234
235static inline unsigned long hpt_hash(unsigned long va, unsigned int shift,
236 int ssize)
237{
238 unsigned long hash, vsid;
239
240 if (ssize == MMU_SEGSIZE_256M) {
241 hash = (va >> 28) ^ ((va & 0x0fffffffUL) >> shift);
242 } else {
243 vsid = va >> 40;
244 hash = vsid ^ (vsid << 25) ^ ((va & 0xffffffffffUL) >> shift);
245 }
246 return hash & 0x7fffffffffUL;
247}
248
249extern int __hash_page_4K(unsigned long ea, unsigned long access,
250 unsigned long vsid, pte_t *ptep, unsigned long trap,
251 unsigned int local, int ssize, int subpage_prot);
252extern int __hash_page_64K(unsigned long ea, unsigned long access,
253 unsigned long vsid, pte_t *ptep, unsigned long trap,
254 unsigned int local, int ssize);
255struct mm_struct;
256extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap);
257extern int hash_huge_page(struct mm_struct *mm, unsigned long access,
258 unsigned long ea, unsigned long vsid, int local,
259 unsigned long trap);
260
261extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
262 unsigned long pstart, unsigned long prot,
263 int psize, int ssize);
264extern void add_gpage(unsigned long addr, unsigned long page_size,
265 unsigned long number_of_pages);
266extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
267
268extern void hpte_init_native(void);
269extern void hpte_init_lpar(void);
270extern void hpte_init_iSeries(void);
271extern void hpte_init_beat(void);
272extern void hpte_init_beat_v3(void);
273
274extern void stabs_alloc(void);
275extern void slb_initialize(void);
276extern void slb_flush_and_rebolt(void);
277extern void stab_initialize(unsigned long stab);
278
279extern void slb_vmalloc_update(void);
280extern void slb_set_size(u16 size);
281#endif
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334#define VSID_MULTIPLIER_256M ASM_CONST(200730139)
335#define VSID_BITS_256M 36
336#define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1)
337
338#define VSID_MULTIPLIER_1T ASM_CONST(12538073)
339#define VSID_BITS_1T 24
340#define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1)
341
342#define CONTEXT_BITS 19
343#define USER_ESID_BITS 16
344#define USER_ESID_BITS_1T 4
345
346#define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT))
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362#define ASM_VSID_SCRAMBLE(rt, rx, size) \
363 lis rx,VSID_MULTIPLIER_##size@h; \
364 ori rx,rx,VSID_MULTIPLIER_##size@l; \
365 mulld rt,rt,rx; \
366 \
367 srdi rx,rt,VSID_BITS_##size; \
368 clrldi rt,rt,(64-VSID_BITS_##size); \
369 add rt,rt,rx; \
370
371
372
373
374
375\
376 addi rx,rt,1; \
377 srdi rx,rx,VSID_BITS_##size; \
378 add rt,rt,rx
379
380
381#ifndef __ASSEMBLY__
382
383typedef unsigned long mm_context_id_t;
384
385typedef struct {
386 mm_context_id_t id;
387 u16 user_psize;
388
389#ifdef CONFIG_PPC_MM_SLICES
390 u64 low_slices_psize;
391 u64 high_slices_psize;
392#else
393 u16 sllp;
394#endif
395 unsigned long vdso_base;
396} mm_context_t;
397
398
399#if 0
400
401
402
403
404
405
406#define vsid_scrample(protovsid, size) \
407 ((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size))
408
409#else
410#define vsid_scramble(protovsid, size) \
411 ({ \
412 unsigned long x; \
413 x = (protovsid) * VSID_MULTIPLIER_##size; \
414 x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \
415 (x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \
416 })
417#endif
418
419
420static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
421{
422 if (ssize == MMU_SEGSIZE_256M)
423 return vsid_scramble(ea >> SID_SHIFT, 256M);
424 return vsid_scramble(ea >> SID_SHIFT_1T, 1T);
425}
426
427
428static inline int user_segment_size(unsigned long addr)
429{
430
431 if (addr >= (1UL << SID_SHIFT_1T))
432 return mmu_highuser_ssize;
433 return MMU_SEGSIZE_256M;
434}
435
436
437static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
438 int ssize)
439{
440 if (ssize == MMU_SEGSIZE_256M)
441 return vsid_scramble((context << USER_ESID_BITS)
442 | (ea >> SID_SHIFT), 256M);
443 return vsid_scramble((context << USER_ESID_BITS_1T)
444 | (ea >> SID_SHIFT_1T), 1T);
445}
446
447
448
449
450
451#define VSID_SCRAMBLE(pvsid) (((pvsid) * VSID_MULTIPLIER_256M) % \
452 VSID_MODULUS_256M)
453#define KERNEL_VSID(ea) VSID_SCRAMBLE(GET_ESID(ea))
454
455#endif
456
457#endif
458