1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <asm/pgtable.h>
18#include <asm/mmu.h>
19#include <asm/mmu_context.h>
20#include <asm/paca.h>
21#include <asm/cputable.h>
22#include <asm/cacheflush.h>
23#include <asm/smp.h>
24#include <linux/compiler.h>
25#include <linux/context_tracking.h>
26#include <linux/mm_types.h>
27
28#include <asm/udbg.h>
29#include <asm/code-patching.h>
30
31enum slb_index {
32 LINEAR_INDEX = 0,
33 VMALLOC_INDEX = 1,
34 KSTACK_INDEX = 2,
35};
36
37extern void slb_allocate(unsigned long ea);
38
39#define slb_esid_mask(ssize) \
40 (((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T)
41
42static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
43 enum slb_index index)
44{
45 return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | index;
46}
47
48static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
49 unsigned long flags)
50{
51 return (get_kernel_vsid(ea, ssize) << slb_vsid_shift(ssize)) | flags |
52 ((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT);
53}
54
55static inline void slb_shadow_update(unsigned long ea, int ssize,
56 unsigned long flags,
57 enum slb_index index)
58{
59 struct slb_shadow *p = get_slb_shadow();
60
61
62
63
64
65
66 WRITE_ONCE(p->save_area[index].esid, 0);
67 WRITE_ONCE(p->save_area[index].vsid, cpu_to_be64(mk_vsid_data(ea, ssize, flags)));
68 WRITE_ONCE(p->save_area[index].esid, cpu_to_be64(mk_esid_data(ea, ssize, index)));
69}
70
71static inline void slb_shadow_clear(enum slb_index index)
72{
73 WRITE_ONCE(get_slb_shadow()->save_area[index].esid, cpu_to_be64(index));
74}
75
76static inline void create_shadowed_slbe(unsigned long ea, int ssize,
77 unsigned long flags,
78 enum slb_index index)
79{
80
81
82
83
84
85 slb_shadow_update(ea, ssize, flags, index);
86
87 asm volatile("slbmte %0,%1" :
88 : "r" (mk_vsid_data(ea, ssize, flags)),
89 "r" (mk_esid_data(ea, ssize, index))
90 : "memory" );
91}
92
93
94
95
96
97void __slb_restore_bolted_realmode(void)
98{
99 struct slb_shadow *p = get_slb_shadow();
100 enum slb_index index;
101
102
103 for (index = 0; index < SLB_NUM_BOLTED; index++) {
104 asm volatile("slbmte %0,%1" :
105 : "r" (be64_to_cpu(p->save_area[index].vsid)),
106 "r" (be64_to_cpu(p->save_area[index].esid)));
107 }
108}
109
110
111
112
113
114
115void slb_restore_bolted_realmode(void)
116{
117 __slb_restore_bolted_realmode();
118 get_paca()->slb_cache_ptr = 0;
119}
120
121
122
123
124void slb_flush_all_realmode(void)
125{
126
127
128
129 asm volatile("slbmte %0,%0; slbia" : : "r" (0));
130}
131
132static void __slb_flush_and_rebolt(void)
133{
134
135
136 unsigned long linear_llp, vmalloc_llp, lflags, vflags;
137 unsigned long ksp_esid_data, ksp_vsid_data;
138
139 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
140 vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
141 lflags = SLB_VSID_KERNEL | linear_llp;
142 vflags = SLB_VSID_KERNEL | vmalloc_llp;
143
144 ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, KSTACK_INDEX);
145 if ((ksp_esid_data & ~0xfffffffUL) <= PAGE_OFFSET) {
146 ksp_esid_data &= ~SLB_ESID_V;
147 ksp_vsid_data = 0;
148 slb_shadow_clear(KSTACK_INDEX);
149 } else {
150
151 slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, KSTACK_INDEX);
152 ksp_vsid_data =
153 be64_to_cpu(get_slb_shadow()->save_area[KSTACK_INDEX].vsid);
154 }
155
156
157
158 asm volatile("isync\n"
159 "slbia\n"
160
161 "slbmte %0,%1\n"
162
163 "slbmte %2,%3\n"
164 "isync"
165 :: "r"(mk_vsid_data(VMALLOC_START, mmu_kernel_ssize, vflags)),
166 "r"(mk_esid_data(VMALLOC_START, mmu_kernel_ssize, VMALLOC_INDEX)),
167 "r"(ksp_vsid_data),
168 "r"(ksp_esid_data)
169 : "memory");
170}
171
172void slb_flush_and_rebolt(void)
173{
174
175 WARN_ON(!irqs_disabled());
176
177
178
179
180
181 hard_irq_disable();
182
183 __slb_flush_and_rebolt();
184 get_paca()->slb_cache_ptr = 0;
185}
186
187void slb_save_contents(struct slb_entry *slb_ptr)
188{
189 int i;
190 unsigned long e, v;
191
192
193 get_paca()->slb_save_cache_ptr = get_paca()->slb_cache_ptr;
194
195 if (!slb_ptr)
196 return;
197
198 for (i = 0; i < mmu_slb_size; i++) {
199 asm volatile("slbmfee %0,%1" : "=r" (e) : "r" (i));
200 asm volatile("slbmfev %0,%1" : "=r" (v) : "r" (i));
201 slb_ptr->esid = e;
202 slb_ptr->vsid = v;
203 slb_ptr++;
204 }
205}
206
207void slb_dump_contents(struct slb_entry *slb_ptr)
208{
209 int i, n;
210 unsigned long e, v;
211 unsigned long llp;
212
213 if (!slb_ptr)
214 return;
215
216 pr_err("SLB contents of cpu 0x%x\n", smp_processor_id());
217 pr_err("Last SLB entry inserted at slot %lld\n", get_paca()->stab_rr);
218
219 for (i = 0; i < mmu_slb_size; i++) {
220 e = slb_ptr->esid;
221 v = slb_ptr->vsid;
222 slb_ptr++;
223
224 if (!e && !v)
225 continue;
226
227 pr_err("%02d %016lx %016lx\n", i, e, v);
228
229 if (!(e & SLB_ESID_V)) {
230 pr_err("\n");
231 continue;
232 }
233 llp = v & SLB_VSID_LLP;
234 if (v & SLB_VSID_B_1T) {
235 pr_err(" 1T ESID=%9lx VSID=%13lx LLP:%3lx\n",
236 GET_ESID_1T(e),
237 (v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T, llp);
238 } else {
239 pr_err(" 256M ESID=%9lx VSID=%13lx LLP:%3lx\n",
240 GET_ESID(e),
241 (v & ~SLB_VSID_B) >> SLB_VSID_SHIFT, llp);
242 }
243 }
244 pr_err("----------------------------------\n");
245
246
247 pr_err("SLB cache ptr value = %d\n", get_paca()->slb_save_cache_ptr);
248 pr_err("Valid SLB cache entries:\n");
249 n = min_t(int, get_paca()->slb_save_cache_ptr, SLB_CACHE_ENTRIES);
250 for (i = 0; i < n; i++)
251 pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]);
252 pr_err("Rest of SLB cache entries:\n");
253 for (i = n; i < SLB_CACHE_ENTRIES; i++)
254 pr_err("%02d EA[0-35]=%9x\n", i, get_paca()->slb_cache[i]);
255}
256
257void slb_vmalloc_update(void)
258{
259 unsigned long vflags;
260
261 vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp;
262 slb_shadow_update(VMALLOC_START, mmu_kernel_ssize, vflags, VMALLOC_INDEX);
263 slb_flush_and_rebolt();
264}
265
266
267
268
269
270
271
272static inline int esids_match(unsigned long addr1, unsigned long addr2)
273{
274 int esid_1t_count;
275
276
277 if (!mmu_has_feature(MMU_FTR_1T_SEGMENT))
278 return (GET_ESID(addr1) == GET_ESID(addr2));
279
280 esid_1t_count = (((addr1 >> SID_SHIFT_1T) != 0) +
281 ((addr2 >> SID_SHIFT_1T) != 0));
282
283
284 if (esid_1t_count == 0)
285 return (GET_ESID(addr1) == GET_ESID(addr2));
286
287
288 if (esid_1t_count == 1)
289 return 0;
290
291
292 return (GET_ESID_1T(addr1) == GET_ESID_1T(addr2));
293}
294
295
296void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
297{
298 unsigned long offset;
299 unsigned long slbie_data = 0;
300 unsigned long pc = KSTK_EIP(tsk);
301 unsigned long stack = KSTK_ESP(tsk);
302 unsigned long exec_base;
303
304
305
306
307
308
309
310 hard_irq_disable();
311 offset = get_paca()->slb_cache_ptr;
312 if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B) &&
313 offset <= SLB_CACHE_ENTRIES) {
314 int i;
315 asm volatile("isync" : : : "memory");
316 for (i = 0; i < offset; i++) {
317 slbie_data = (unsigned long)get_paca()->slb_cache[i]
318 << SID_SHIFT;
319 slbie_data |= user_segment_size(slbie_data)
320 << SLBIE_SSIZE_SHIFT;
321 slbie_data |= SLBIE_C;
322 asm volatile("slbie %0" : : "r" (slbie_data));
323 }
324 asm volatile("isync" : : : "memory");
325 } else {
326 __slb_flush_and_rebolt();
327 }
328
329
330 if (offset == 1 || offset > SLB_CACHE_ENTRIES)
331 asm volatile("slbie %0" : : "r" (slbie_data));
332
333 get_paca()->slb_cache_ptr = 0;
334 copy_mm_to_paca(mm);
335
336
337
338
339
340
341 exec_base = 0x10000000;
342
343 if (is_kernel_addr(pc) || is_kernel_addr(stack) ||
344 is_kernel_addr(exec_base))
345 return;
346
347 slb_allocate(pc);
348
349 if (!esids_match(pc, stack))
350 slb_allocate(stack);
351
352 if (!esids_match(pc, exec_base) &&
353 !esids_match(stack, exec_base))
354 slb_allocate(exec_base);
355}
356
357static inline void patch_slb_encoding(unsigned int *insn_addr,
358 unsigned int immed)
359{
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374 unsigned int insn = (*insn_addr & 0xffff0000) | immed;
375 patch_instruction((struct ppc_inst *)insn_addr, ppc_inst(insn));
376}
377
378extern u32 slb_miss_kernel_load_linear[];
379extern u32 slb_miss_kernel_load_io[];
380extern u32 slb_compare_rr_to_size[];
381extern u32 slb_miss_kernel_load_vmemmap[];
382
383void slb_set_size(u16 size)
384{
385 if (mmu_slb_size == size)
386 return;
387
388 mmu_slb_size = size;
389 patch_slb_encoding(slb_compare_rr_to_size, mmu_slb_size);
390}
391
392void slb_initialize(void)
393{
394 unsigned long linear_llp, vmalloc_llp, io_llp;
395 unsigned long lflags, vflags;
396 static int slb_encoding_inited;
397#ifdef CONFIG_SPARSEMEM_VMEMMAP
398 unsigned long vmemmap_llp;
399#endif
400
401
402 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
403 io_llp = mmu_psize_defs[mmu_io_psize].sllp;
404 vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
405 get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp;
406#ifdef CONFIG_SPARSEMEM_VMEMMAP
407 vmemmap_llp = mmu_psize_defs[mmu_vmemmap_psize].sllp;
408#endif
409 if (!slb_encoding_inited) {
410 slb_encoding_inited = 1;
411 patch_slb_encoding(slb_miss_kernel_load_linear,
412 SLB_VSID_KERNEL | linear_llp);
413 patch_slb_encoding(slb_miss_kernel_load_io,
414 SLB_VSID_KERNEL | io_llp);
415 patch_slb_encoding(slb_compare_rr_to_size,
416 mmu_slb_size);
417
418 pr_devel("SLB: linear LLP = %04lx\n", linear_llp);
419 pr_devel("SLB: io LLP = %04lx\n", io_llp);
420
421#ifdef CONFIG_SPARSEMEM_VMEMMAP
422 patch_slb_encoding(slb_miss_kernel_load_vmemmap,
423 SLB_VSID_KERNEL | vmemmap_llp);
424 pr_devel("SLB: vmemmap LLP = %04lx\n", vmemmap_llp);
425#endif
426 }
427
428 get_paca()->stab_rr = SLB_NUM_BOLTED;
429
430 lflags = SLB_VSID_KERNEL | linear_llp;
431 vflags = SLB_VSID_KERNEL | vmalloc_llp;
432
433
434 asm volatile("isync":::"memory");
435 asm volatile("slbmte %0,%0"::"r" (0) : "memory");
436 asm volatile("isync; slbia; isync":::"memory");
437 create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, LINEAR_INDEX);
438 create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, VMALLOC_INDEX);
439
440
441
442
443
444
445
446 slb_shadow_clear(KSTACK_INDEX);
447 if (raw_smp_processor_id() != boot_cpuid &&
448 (get_paca()->kstack & slb_esid_mask(mmu_kernel_ssize)) > PAGE_OFFSET)
449 create_shadowed_slbe(get_paca()->kstack,
450 mmu_kernel_ssize, lflags, KSTACK_INDEX);
451
452 asm volatile("isync":::"memory");
453}
454
455static void insert_slb_entry(unsigned long vsid, unsigned long ea,
456 int bpsize, int ssize)
457{
458 unsigned long flags, vsid_data, esid_data;
459 enum slb_index index;
460 int slb_cache_index;
461
462
463
464
465 VM_WARN_ON(!irqs_disabled());
466
467
468
469
470
471 hard_irq_disable();
472
473 index = get_paca()->stab_rr;
474
475
476
477
478 if (index < (mmu_slb_size - 1))
479 index++;
480 else
481 index = SLB_NUM_BOLTED;
482
483 get_paca()->stab_rr = index;
484
485 flags = SLB_VSID_USER | mmu_psize_defs[bpsize].sllp;
486 vsid_data = (vsid << slb_vsid_shift(ssize)) | flags |
487 ((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT);
488 esid_data = mk_esid_data(ea, ssize, index);
489
490
491
492
493
494
495 asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data)
496 : "memory");
497
498
499
500
501 slb_cache_index = get_paca()->slb_cache_ptr;
502 if (slb_cache_index < SLB_CACHE_ENTRIES) {
503
504
505
506
507 get_paca()->slb_cache[slb_cache_index++] = esid_data >> 28;
508 get_paca()->slb_cache_ptr++;
509 } else {
510
511
512
513
514
515 get_paca()->slb_cache_ptr = SLB_CACHE_ENTRIES + 1;
516 }
517}
518
519static void handle_multi_context_slb_miss(int context_id, unsigned long ea)
520{
521 struct mm_struct *mm = current->mm;
522 unsigned long vsid;
523 int bpsize;
524
525
526
527
528 vsid = get_vsid(context_id, ea, mmu_highuser_ssize);
529 bpsize = get_slice_psize(mm, ea);
530 insert_slb_entry(vsid, ea, bpsize, mmu_highuser_ssize);
531}
532
533void slb_miss_large_addr(struct pt_regs *regs)
534{
535 enum ctx_state prev_state = exception_enter();
536 unsigned long ea = regs->dar;
537 int context;
538
539 if (REGION_ID(ea) != USER_REGION_ID)
540 goto slb_bad_addr;
541
542
543
544
545 if ((ea & ~REGION_MASK) >= H_PGTABLE_RANGE)
546 goto slb_bad_addr;
547
548
549 if (ea < (1UL << MAX_EA_BITS_PER_CONTEXT))
550 goto slb_bad_addr;
551
552
553
554
555
556 if (ea >= current->mm->context.slb_addr_limit)
557 goto slb_bad_addr;
558
559 context = get_ea_context(¤t->mm->context, ea);
560 if (!context)
561 goto slb_bad_addr;
562
563 handle_multi_context_slb_miss(context, ea);
564 exception_exit(prev_state);
565 return;
566
567slb_bad_addr:
568 if (user_mode(regs))
569 _exception(SIGSEGV, regs, SEGV_BNDERR, ea);
570 else
571 bad_page_fault(regs, ea, SIGSEGV);
572 exception_exit(prev_state);
573}
574