1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/errno.h>
14#include <linux/sched.h>
15#include <linux/sched/debug.h>
16#include <linux/sched/task.h>
17#include <linux/sched/task_stack.h>
18#include <linux/kernel.h>
19#include <linux/mm.h>
20#include <linux/smp.h>
21#include <linux/stddef.h>
22#include <linux/unistd.h>
23#include <linux/ptrace.h>
24#include <linux/slab.h>
25#include <linux/user.h>
26#include <linux/elf.h>
27#include <linux/prctl.h>
28#include <linux/init_task.h>
29#include <linux/export.h>
30#include <linux/kallsyms.h>
31#include <linux/mqueue.h>
32#include <linux/hardirq.h>
33#include <linux/utsname.h>
34#include <linux/ftrace.h>
35#include <linux/kernel_stat.h>
36#include <linux/personality.h>
37#include <linux/random.h>
38#include <linux/hw_breakpoint.h>
39#include <linux/uaccess.h>
40#include <linux/elf-randomize.h>
41#include <linux/pkeys.h>
42#include <linux/seq_buf.h>
43
44#include <asm/interrupt.h>
45#include <asm/io.h>
46#include <asm/processor.h>
47#include <asm/mmu.h>
48#include <asm/prom.h>
49#include <asm/machdep.h>
50#include <asm/time.h>
51#include <asm/runlatch.h>
52#include <asm/syscalls.h>
53#include <asm/switch_to.h>
54#include <asm/tm.h>
55#include <asm/debug.h>
56#ifdef CONFIG_PPC64
57#include <asm/firmware.h>
58#include <asm/hw_irq.h>
59#endif
60#include <asm/code-patching.h>
61#include <asm/exec.h>
62#include <asm/livepatch.h>
63#include <asm/cpu_has_feature.h>
64#include <asm/asm-prototypes.h>
65#include <asm/stacktrace.h>
66#include <asm/hw_breakpoint.h>
67
68#include <linux/kprobes.h>
69#include <linux/kdebug.h>
70
71
72#ifdef TM_DEBUG_SW
73#define TM_DEBUG(x...) printk(KERN_INFO x)
74#else
75#define TM_DEBUG(x...) do { } while(0)
76#endif
77
78extern unsigned long _get_SP(void);
79
80#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
81
82
83
84
85
86bool tm_suspend_disabled __ro_after_init = false;
87
88static void check_if_tm_restore_required(struct task_struct *tsk)
89{
90
91
92
93
94
95
96 if (tsk == current && tsk->thread.regs &&
97 MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
98 !test_thread_flag(TIF_RESTORE_TM)) {
99 regs_set_return_msr(&tsk->thread.ckpt_regs,
100 tsk->thread.regs->msr);
101 set_thread_flag(TIF_RESTORE_TM);
102 }
103}
104
105#else
106static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
107#endif
108
109bool strict_msr_control;
110EXPORT_SYMBOL(strict_msr_control);
111
112static int __init enable_strict_msr_control(char *str)
113{
114 strict_msr_control = true;
115 pr_info("Enabling strict facility control\n");
116
117 return 0;
118}
119early_param("ppc_strict_facility_enable", enable_strict_msr_control);
120
121
122unsigned long notrace msr_check_and_set(unsigned long bits)
123{
124 unsigned long oldmsr = mfmsr();
125 unsigned long newmsr;
126
127 newmsr = oldmsr | bits;
128
129 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
130 newmsr |= MSR_VSX;
131
132 if (oldmsr != newmsr)
133 mtmsr_isync(newmsr);
134
135 return newmsr;
136}
137EXPORT_SYMBOL_GPL(msr_check_and_set);
138
139
140void notrace __msr_check_and_clear(unsigned long bits)
141{
142 unsigned long oldmsr = mfmsr();
143 unsigned long newmsr;
144
145 newmsr = oldmsr & ~bits;
146
147 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
148 newmsr &= ~MSR_VSX;
149
150 if (oldmsr != newmsr)
151 mtmsr_isync(newmsr);
152}
153EXPORT_SYMBOL(__msr_check_and_clear);
154
155#ifdef CONFIG_PPC_FPU
156static void __giveup_fpu(struct task_struct *tsk)
157{
158 unsigned long msr;
159
160 save_fpu(tsk);
161 msr = tsk->thread.regs->msr;
162 msr &= ~(MSR_FP|MSR_FE0|MSR_FE1);
163 if (cpu_has_feature(CPU_FTR_VSX))
164 msr &= ~MSR_VSX;
165 regs_set_return_msr(tsk->thread.regs, msr);
166}
167
168void giveup_fpu(struct task_struct *tsk)
169{
170 check_if_tm_restore_required(tsk);
171
172 msr_check_and_set(MSR_FP);
173 __giveup_fpu(tsk);
174 msr_check_and_clear(MSR_FP);
175}
176EXPORT_SYMBOL(giveup_fpu);
177
178
179
180
181
182void flush_fp_to_thread(struct task_struct *tsk)
183{
184 if (tsk->thread.regs) {
185
186
187
188
189
190
191
192
193 preempt_disable();
194 if (tsk->thread.regs->msr & MSR_FP) {
195
196
197
198
199
200
201
202 BUG_ON(tsk != current);
203 giveup_fpu(tsk);
204 }
205 preempt_enable();
206 }
207}
208EXPORT_SYMBOL_GPL(flush_fp_to_thread);
209
210void enable_kernel_fp(void)
211{
212 unsigned long cpumsr;
213
214 WARN_ON(preemptible());
215
216 cpumsr = msr_check_and_set(MSR_FP);
217
218 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
219 check_if_tm_restore_required(current);
220
221
222
223
224
225
226
227 if (!MSR_TM_ACTIVE(cpumsr) &&
228 MSR_TM_ACTIVE(current->thread.regs->msr))
229 return;
230 __giveup_fpu(current);
231 }
232}
233EXPORT_SYMBOL(enable_kernel_fp);
234#else
235static inline void __giveup_fpu(struct task_struct *tsk) { }
236#endif
237
238#ifdef CONFIG_ALTIVEC
239static void __giveup_altivec(struct task_struct *tsk)
240{
241 unsigned long msr;
242
243 save_altivec(tsk);
244 msr = tsk->thread.regs->msr;
245 msr &= ~MSR_VEC;
246 if (cpu_has_feature(CPU_FTR_VSX))
247 msr &= ~MSR_VSX;
248 regs_set_return_msr(tsk->thread.regs, msr);
249}
250
251void giveup_altivec(struct task_struct *tsk)
252{
253 check_if_tm_restore_required(tsk);
254
255 msr_check_and_set(MSR_VEC);
256 __giveup_altivec(tsk);
257 msr_check_and_clear(MSR_VEC);
258}
259EXPORT_SYMBOL(giveup_altivec);
260
261void enable_kernel_altivec(void)
262{
263 unsigned long cpumsr;
264
265 WARN_ON(preemptible());
266
267 cpumsr = msr_check_and_set(MSR_VEC);
268
269 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
270 check_if_tm_restore_required(current);
271
272
273
274
275
276
277
278 if (!MSR_TM_ACTIVE(cpumsr) &&
279 MSR_TM_ACTIVE(current->thread.regs->msr))
280 return;
281 __giveup_altivec(current);
282 }
283}
284EXPORT_SYMBOL(enable_kernel_altivec);
285
286
287
288
289
290void flush_altivec_to_thread(struct task_struct *tsk)
291{
292 if (tsk->thread.regs) {
293 preempt_disable();
294 if (tsk->thread.regs->msr & MSR_VEC) {
295 BUG_ON(tsk != current);
296 giveup_altivec(tsk);
297 }
298 preempt_enable();
299 }
300}
301EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
302#endif
303
304#ifdef CONFIG_VSX
305static void __giveup_vsx(struct task_struct *tsk)
306{
307 unsigned long msr = tsk->thread.regs->msr;
308
309
310
311
312
313 WARN_ON((msr & MSR_VSX) && !((msr & MSR_FP) && (msr & MSR_VEC)));
314
315
316 if (msr & MSR_FP)
317 __giveup_fpu(tsk);
318 if (msr & MSR_VEC)
319 __giveup_altivec(tsk);
320}
321
322static void giveup_vsx(struct task_struct *tsk)
323{
324 check_if_tm_restore_required(tsk);
325
326 msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
327 __giveup_vsx(tsk);
328 msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
329}
330
331void enable_kernel_vsx(void)
332{
333 unsigned long cpumsr;
334
335 WARN_ON(preemptible());
336
337 cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
338
339 if (current->thread.regs &&
340 (current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) {
341 check_if_tm_restore_required(current);
342
343
344
345
346
347
348
349 if (!MSR_TM_ACTIVE(cpumsr) &&
350 MSR_TM_ACTIVE(current->thread.regs->msr))
351 return;
352 __giveup_vsx(current);
353 }
354}
355EXPORT_SYMBOL(enable_kernel_vsx);
356
357void flush_vsx_to_thread(struct task_struct *tsk)
358{
359 if (tsk->thread.regs) {
360 preempt_disable();
361 if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) {
362 BUG_ON(tsk != current);
363 giveup_vsx(tsk);
364 }
365 preempt_enable();
366 }
367}
368EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
369#endif
370
371#ifdef CONFIG_SPE
372void giveup_spe(struct task_struct *tsk)
373{
374 check_if_tm_restore_required(tsk);
375
376 msr_check_and_set(MSR_SPE);
377 __giveup_spe(tsk);
378 msr_check_and_clear(MSR_SPE);
379}
380EXPORT_SYMBOL(giveup_spe);
381
382void enable_kernel_spe(void)
383{
384 WARN_ON(preemptible());
385
386 msr_check_and_set(MSR_SPE);
387
388 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
389 check_if_tm_restore_required(current);
390 __giveup_spe(current);
391 }
392}
393EXPORT_SYMBOL(enable_kernel_spe);
394
395void flush_spe_to_thread(struct task_struct *tsk)
396{
397 if (tsk->thread.regs) {
398 preempt_disable();
399 if (tsk->thread.regs->msr & MSR_SPE) {
400 BUG_ON(tsk != current);
401 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
402 giveup_spe(tsk);
403 }
404 preempt_enable();
405 }
406}
407#endif
408
409static unsigned long msr_all_available;
410
411static int __init init_msr_all_available(void)
412{
413 if (IS_ENABLED(CONFIG_PPC_FPU))
414 msr_all_available |= MSR_FP;
415 if (cpu_has_feature(CPU_FTR_ALTIVEC))
416 msr_all_available |= MSR_VEC;
417 if (cpu_has_feature(CPU_FTR_VSX))
418 msr_all_available |= MSR_VSX;
419 if (cpu_has_feature(CPU_FTR_SPE))
420 msr_all_available |= MSR_SPE;
421
422 return 0;
423}
424early_initcall(init_msr_all_available);
425
426void giveup_all(struct task_struct *tsk)
427{
428 unsigned long usermsr;
429
430 if (!tsk->thread.regs)
431 return;
432
433 check_if_tm_restore_required(tsk);
434
435 usermsr = tsk->thread.regs->msr;
436
437 if ((usermsr & msr_all_available) == 0)
438 return;
439
440 msr_check_and_set(msr_all_available);
441
442 WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
443
444 if (usermsr & MSR_FP)
445 __giveup_fpu(tsk);
446 if (usermsr & MSR_VEC)
447 __giveup_altivec(tsk);
448 if (usermsr & MSR_SPE)
449 __giveup_spe(tsk);
450
451 msr_check_and_clear(msr_all_available);
452}
453EXPORT_SYMBOL(giveup_all);
454
455#ifdef CONFIG_PPC_BOOK3S_64
456#ifdef CONFIG_PPC_FPU
457static bool should_restore_fp(void)
458{
459 if (current->thread.load_fp) {
460 current->thread.load_fp++;
461 return true;
462 }
463 return false;
464}
465
466static void do_restore_fp(void)
467{
468 load_fp_state(¤t->thread.fp_state);
469}
470#else
471static bool should_restore_fp(void) { return false; }
472static void do_restore_fp(void) { }
473#endif
474
475#ifdef CONFIG_ALTIVEC
476static bool should_restore_altivec(void)
477{
478 if (cpu_has_feature(CPU_FTR_ALTIVEC) && (current->thread.load_vec)) {
479 current->thread.load_vec++;
480 return true;
481 }
482 return false;
483}
484
485static void do_restore_altivec(void)
486{
487 load_vr_state(¤t->thread.vr_state);
488 current->thread.used_vr = 1;
489}
490#else
491static bool should_restore_altivec(void) { return false; }
492static void do_restore_altivec(void) { }
493#endif
494
495static bool should_restore_vsx(void)
496{
497 if (cpu_has_feature(CPU_FTR_VSX))
498 return true;
499 return false;
500}
501#ifdef CONFIG_VSX
502static void do_restore_vsx(void)
503{
504 current->thread.used_vsr = 1;
505}
506#else
507static void do_restore_vsx(void) { }
508#endif
509
510
511
512
513
514
515
516
517
518
519
520void notrace restore_math(struct pt_regs *regs)
521{
522 unsigned long msr;
523 unsigned long new_msr = 0;
524
525 msr = regs->msr;
526
527
528
529
530
531
532 if ((!(msr & MSR_FP)) && should_restore_fp())
533 new_msr |= MSR_FP;
534
535 if ((!(msr & MSR_VEC)) && should_restore_altivec())
536 new_msr |= MSR_VEC;
537
538 if ((!(msr & MSR_VSX)) && should_restore_vsx()) {
539 if (((msr | new_msr) & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC))
540 new_msr |= MSR_VSX;
541 }
542
543 if (new_msr) {
544 unsigned long fpexc_mode = 0;
545
546 msr_check_and_set(new_msr);
547
548 if (new_msr & MSR_FP) {
549 do_restore_fp();
550
551
552 fpexc_mode = current->thread.fpexc_mode;
553 }
554
555 if (new_msr & MSR_VEC)
556 do_restore_altivec();
557
558 if (new_msr & MSR_VSX)
559 do_restore_vsx();
560
561 msr_check_and_clear(new_msr);
562
563 regs_set_return_msr(regs, regs->msr | new_msr | fpexc_mode);
564 }
565}
566#endif
567
568static void save_all(struct task_struct *tsk)
569{
570 unsigned long usermsr;
571
572 if (!tsk->thread.regs)
573 return;
574
575 usermsr = tsk->thread.regs->msr;
576
577 if ((usermsr & msr_all_available) == 0)
578 return;
579
580 msr_check_and_set(msr_all_available);
581
582 WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
583
584 if (usermsr & MSR_FP)
585 save_fpu(tsk);
586
587 if (usermsr & MSR_VEC)
588 save_altivec(tsk);
589
590 if (usermsr & MSR_SPE)
591 __giveup_spe(tsk);
592
593 msr_check_and_clear(msr_all_available);
594}
595
596void flush_all_to_thread(struct task_struct *tsk)
597{
598 if (tsk->thread.regs) {
599 preempt_disable();
600 BUG_ON(tsk != current);
601#ifdef CONFIG_SPE
602 if (tsk->thread.regs->msr & MSR_SPE)
603 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
604#endif
605 save_all(tsk);
606
607 preempt_enable();
608 }
609}
610EXPORT_SYMBOL(flush_all_to_thread);
611
612#ifdef CONFIG_PPC_ADV_DEBUG_REGS
613void do_send_trap(struct pt_regs *regs, unsigned long address,
614 unsigned long error_code, int breakpt)
615{
616 current->thread.trap_nr = TRAP_HWBKPT;
617 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
618 11, SIGSEGV) == NOTIFY_STOP)
619 return;
620
621
622 force_sig_ptrace_errno_trap(breakpt,
623 (void __user *)address);
624}
625#else
626
627static void do_break_handler(struct pt_regs *regs)
628{
629 struct arch_hw_breakpoint null_brk = {0};
630 struct arch_hw_breakpoint *info;
631 ppc_inst_t instr = ppc_inst(0);
632 int type = 0;
633 int size = 0;
634 unsigned long ea;
635 int i;
636
637
638
639
640
641 if (nr_wp_slots() == 1) {
642 __set_breakpoint(0, &null_brk);
643 current->thread.hw_brk[0] = null_brk;
644 current->thread.hw_brk[0].flags |= HW_BRK_FLAG_DISABLED;
645 return;
646 }
647
648
649 wp_get_instr_detail(regs, &instr, &type, &size, &ea);
650
651 for (i = 0; i < nr_wp_slots(); i++) {
652 info = ¤t->thread.hw_brk[i];
653 if (!info->address)
654 continue;
655
656 if (wp_check_constraints(regs, instr, ea, type, size, info)) {
657 __set_breakpoint(i, &null_brk);
658 current->thread.hw_brk[i] = null_brk;
659 current->thread.hw_brk[i].flags |= HW_BRK_FLAG_DISABLED;
660 }
661 }
662}
663
664DEFINE_INTERRUPT_HANDLER(do_break)
665{
666 current->thread.trap_nr = TRAP_HWBKPT;
667 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, regs->dsisr,
668 11, SIGSEGV) == NOTIFY_STOP)
669 return;
670
671 if (debugger_break_match(regs))
672 return;
673
674
675
676
677
678
679
680
681 if (!IS_ENABLED(CONFIG_HAVE_HW_BREAKPOINT))
682 do_break_handler(regs);
683
684
685 force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void __user *)regs->dar);
686}
687#endif
688
689static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk[HBP_NUM_MAX]);
690
691#ifdef CONFIG_PPC_ADV_DEBUG_REGS
692
693
694
695static void set_debug_reg_defaults(struct thread_struct *thread)
696{
697 thread->debug.iac1 = thread->debug.iac2 = 0;
698#if CONFIG_PPC_ADV_DEBUG_IACS > 2
699 thread->debug.iac3 = thread->debug.iac4 = 0;
700#endif
701 thread->debug.dac1 = thread->debug.dac2 = 0;
702#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
703 thread->debug.dvc1 = thread->debug.dvc2 = 0;
704#endif
705 thread->debug.dbcr0 = 0;
706#ifdef CONFIG_BOOKE
707
708
709
710 thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |
711 DBCR1_IAC3US | DBCR1_IAC4US;
712
713
714
715
716 thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
717#else
718 thread->debug.dbcr1 = 0;
719#endif
720}
721
722static void prime_debug_regs(struct debug_reg *debug)
723{
724
725
726
727
728
729 mtmsr(mfmsr() & ~MSR_DE);
730
731 mtspr(SPRN_IAC1, debug->iac1);
732 mtspr(SPRN_IAC2, debug->iac2);
733#if CONFIG_PPC_ADV_DEBUG_IACS > 2
734 mtspr(SPRN_IAC3, debug->iac3);
735 mtspr(SPRN_IAC4, debug->iac4);
736#endif
737 mtspr(SPRN_DAC1, debug->dac1);
738 mtspr(SPRN_DAC2, debug->dac2);
739#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
740 mtspr(SPRN_DVC1, debug->dvc1);
741 mtspr(SPRN_DVC2, debug->dvc2);
742#endif
743 mtspr(SPRN_DBCR0, debug->dbcr0);
744 mtspr(SPRN_DBCR1, debug->dbcr1);
745#ifdef CONFIG_BOOKE
746 mtspr(SPRN_DBCR2, debug->dbcr2);
747#endif
748}
749
750
751
752
753
754void switch_booke_debug_regs(struct debug_reg *new_debug)
755{
756 if ((current->thread.debug.dbcr0 & DBCR0_IDM)
757 || (new_debug->dbcr0 & DBCR0_IDM))
758 prime_debug_regs(new_debug);
759}
760EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
761#else
762#ifndef CONFIG_HAVE_HW_BREAKPOINT
763static void set_breakpoint(int i, struct arch_hw_breakpoint *brk)
764{
765 preempt_disable();
766 __set_breakpoint(i, brk);
767 preempt_enable();
768}
769
770static void set_debug_reg_defaults(struct thread_struct *thread)
771{
772 int i;
773 struct arch_hw_breakpoint null_brk = {0};
774
775 for (i = 0; i < nr_wp_slots(); i++) {
776 thread->hw_brk[i] = null_brk;
777 if (ppc_breakpoint_available())
778 set_breakpoint(i, &thread->hw_brk[i]);
779 }
780}
781
782static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
783 struct arch_hw_breakpoint *b)
784{
785 if (a->address != b->address)
786 return false;
787 if (a->type != b->type)
788 return false;
789 if (a->len != b->len)
790 return false;
791
792 return true;
793}
794
795static void switch_hw_breakpoint(struct task_struct *new)
796{
797 int i;
798
799 for (i = 0; i < nr_wp_slots(); i++) {
800 if (likely(hw_brk_match(this_cpu_ptr(¤t_brk[i]),
801 &new->thread.hw_brk[i])))
802 continue;
803
804 __set_breakpoint(i, &new->thread.hw_brk[i]);
805 }
806}
807#endif
808#endif
809
810static inline int set_dabr(struct arch_hw_breakpoint *brk)
811{
812 unsigned long dabr, dabrx;
813
814 dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
815 dabrx = ((brk->type >> 3) & 0x7);
816
817 if (ppc_md.set_dabr)
818 return ppc_md.set_dabr(dabr, dabrx);
819
820 if (IS_ENABLED(CONFIG_PPC_ADV_DEBUG_REGS)) {
821 mtspr(SPRN_DAC1, dabr);
822 if (IS_ENABLED(CONFIG_PPC_47x))
823 isync();
824 return 0;
825 } else if (IS_ENABLED(CONFIG_PPC_BOOK3S)) {
826 mtspr(SPRN_DABR, dabr);
827 if (cpu_has_feature(CPU_FTR_DABRX))
828 mtspr(SPRN_DABRX, dabrx);
829 return 0;
830 } else {
831 return -EINVAL;
832 }
833}
834
835static inline int set_breakpoint_8xx(struct arch_hw_breakpoint *brk)
836{
837 unsigned long lctrl1 = LCTRL1_CTE_GT | LCTRL1_CTF_LT | LCTRL1_CRWE_RW |
838 LCTRL1_CRWF_RW;
839 unsigned long lctrl2 = LCTRL2_LW0EN | LCTRL2_LW0LADC | LCTRL2_SLW0EN;
840 unsigned long start_addr = ALIGN_DOWN(brk->address, HW_BREAKPOINT_SIZE);
841 unsigned long end_addr = ALIGN(brk->address + brk->len, HW_BREAKPOINT_SIZE);
842
843 if (start_addr == 0)
844 lctrl2 |= LCTRL2_LW0LA_F;
845 else if (end_addr == 0)
846 lctrl2 |= LCTRL2_LW0LA_E;
847 else
848 lctrl2 |= LCTRL2_LW0LA_EandF;
849
850 mtspr(SPRN_LCTRL2, 0);
851
852 if ((brk->type & HW_BRK_TYPE_RDWR) == 0)
853 return 0;
854
855 if ((brk->type & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_READ)
856 lctrl1 |= LCTRL1_CRWE_RO | LCTRL1_CRWF_RO;
857 if ((brk->type & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_WRITE)
858 lctrl1 |= LCTRL1_CRWE_WO | LCTRL1_CRWF_WO;
859
860 mtspr(SPRN_CMPE, start_addr - 1);
861 mtspr(SPRN_CMPF, end_addr);
862 mtspr(SPRN_LCTRL1, lctrl1);
863 mtspr(SPRN_LCTRL2, lctrl2);
864
865 return 0;
866}
867
868void __set_breakpoint(int nr, struct arch_hw_breakpoint *brk)
869{
870 memcpy(this_cpu_ptr(¤t_brk[nr]), brk, sizeof(*brk));
871
872 if (dawr_enabled())
873
874 set_dawr(nr, brk);
875 else if (IS_ENABLED(CONFIG_PPC_8xx))
876 set_breakpoint_8xx(brk);
877 else if (!cpu_has_feature(CPU_FTR_ARCH_207S))
878
879 set_dabr(brk);
880 else
881
882 WARN_ON_ONCE(1);
883}
884
885
886bool ppc_breakpoint_available(void)
887{
888 if (dawr_enabled())
889 return true;
890 if (cpu_has_feature(CPU_FTR_ARCH_207S))
891 return false;
892
893 return true;
894}
895EXPORT_SYMBOL_GPL(ppc_breakpoint_available);
896
897#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
898
899static inline bool tm_enabled(struct task_struct *tsk)
900{
901 return tsk && tsk->thread.regs && (tsk->thread.regs->msr & MSR_TM);
902}
903
904static void tm_reclaim_thread(struct thread_struct *thr, uint8_t cause)
905{
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921 if (!MSR_TM_SUSPENDED(mfmsr()))
922 return;
923
924 giveup_all(container_of(thr, struct task_struct, thread));
925
926 tm_reclaim(thr, cause);
927
928
929
930
931
932
933
934
935
936
937
938
939
940 if ((thr->ckpt_regs.msr & MSR_FP) == 0)
941 memcpy(&thr->ckfp_state, &thr->fp_state,
942 sizeof(struct thread_fp_state));
943 if ((thr->ckpt_regs.msr & MSR_VEC) == 0)
944 memcpy(&thr->ckvr_state, &thr->vr_state,
945 sizeof(struct thread_vr_state));
946}
947
948void tm_reclaim_current(uint8_t cause)
949{
950 tm_enable();
951 tm_reclaim_thread(¤t->thread, cause);
952}
953
954static inline void tm_reclaim_task(struct task_struct *tsk)
955{
956
957
958
959
960
961
962
963
964
965
966 struct thread_struct *thr = &tsk->thread;
967
968 if (!thr->regs)
969 return;
970
971 if (!MSR_TM_ACTIVE(thr->regs->msr))
972 goto out_and_saveregs;
973
974 WARN_ON(tm_suspend_disabled);
975
976 TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
977 "ccr=%lx, msr=%lx, trap=%lx)\n",
978 tsk->pid, thr->regs->nip,
979 thr->regs->ccr, thr->regs->msr,
980 thr->regs->trap);
981
982 tm_reclaim_thread(thr, TM_CAUSE_RESCHED);
983
984 TM_DEBUG("--- tm_reclaim on pid %d complete\n",
985 tsk->pid);
986
987out_and_saveregs:
988
989
990
991
992
993 tm_save_sprs(thr);
994}
995
996extern void __tm_recheckpoint(struct thread_struct *thread);
997
998void tm_recheckpoint(struct thread_struct *thread)
999{
1000 unsigned long flags;
1001
1002 if (!(thread->regs->msr & MSR_TM))
1003 return;
1004
1005
1006
1007
1008
1009 local_irq_save(flags);
1010 hard_irq_disable();
1011
1012
1013
1014
1015 tm_restore_sprs(thread);
1016
1017 __tm_recheckpoint(thread);
1018
1019 local_irq_restore(flags);
1020}
1021
1022static inline void tm_recheckpoint_new_task(struct task_struct *new)
1023{
1024 if (!cpu_has_feature(CPU_FTR_TM))
1025 return;
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035 if (!tm_enabled(new))
1036 return;
1037
1038 if (!MSR_TM_ACTIVE(new->thread.regs->msr)){
1039 tm_restore_sprs(&new->thread);
1040 return;
1041 }
1042
1043 TM_DEBUG("*** tm_recheckpoint of pid %d (new->msr 0x%lx)\n",
1044 new->pid, new->thread.regs->msr);
1045
1046 tm_recheckpoint(&new->thread);
1047
1048
1049
1050
1051
1052
1053 new->thread.regs->msr &= ~(MSR_FP | MSR_VEC | MSR_VSX);
1054
1055 TM_DEBUG("*** tm_recheckpoint of pid %d complete "
1056 "(kernel msr 0x%lx)\n",
1057 new->pid, mfmsr());
1058}
1059
1060static inline void __switch_to_tm(struct task_struct *prev,
1061 struct task_struct *new)
1062{
1063 if (cpu_has_feature(CPU_FTR_TM)) {
1064 if (tm_enabled(prev) || tm_enabled(new))
1065 tm_enable();
1066
1067 if (tm_enabled(prev)) {
1068 prev->thread.load_tm++;
1069 tm_reclaim_task(prev);
1070 if (!MSR_TM_ACTIVE(prev->thread.regs->msr) && prev->thread.load_tm == 0)
1071 prev->thread.regs->msr &= ~MSR_TM;
1072 }
1073
1074 tm_recheckpoint_new_task(new);
1075 }
1076}
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092void restore_tm_state(struct pt_regs *regs)
1093{
1094 unsigned long msr_diff;
1095
1096
1097
1098
1099
1100
1101
1102 clear_thread_flag(TIF_RESTORE_TM);
1103 if (!MSR_TM_ACTIVE(regs->msr))
1104 return;
1105
1106 msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
1107 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
1108
1109
1110 if (msr_diff & MSR_FP)
1111 current->thread.load_fp = 1;
1112#ifdef CONFIG_ALTIVEC
1113 if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)
1114 current->thread.load_vec = 1;
1115#endif
1116 restore_math(regs);
1117
1118 regs_set_return_msr(regs, regs->msr | msr_diff);
1119}
1120
1121#else
1122#define tm_recheckpoint_new_task(new)
1123#define __switch_to_tm(prev, new)
1124void tm_reclaim_current(uint8_t cause) {}
1125#endif
1126
1127static inline void save_sprs(struct thread_struct *t)
1128{
1129#ifdef CONFIG_ALTIVEC
1130 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1131 t->vrsave = mfspr(SPRN_VRSAVE);
1132#endif
1133#ifdef CONFIG_SPE
1134 if (cpu_has_feature(CPU_FTR_SPE))
1135 t->spefscr = mfspr(SPRN_SPEFSCR);
1136#endif
1137#ifdef CONFIG_PPC_BOOK3S_64
1138 if (cpu_has_feature(CPU_FTR_DSCR))
1139 t->dscr = mfspr(SPRN_DSCR);
1140
1141 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1142 t->bescr = mfspr(SPRN_BESCR);
1143 t->ebbhr = mfspr(SPRN_EBBHR);
1144 t->ebbrr = mfspr(SPRN_EBBRR);
1145
1146 t->fscr = mfspr(SPRN_FSCR);
1147
1148
1149
1150
1151
1152
1153
1154 t->tar = mfspr(SPRN_TAR);
1155 }
1156#endif
1157}
1158
1159#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1160void kvmppc_save_user_regs(void)
1161{
1162 unsigned long usermsr;
1163
1164 if (!current->thread.regs)
1165 return;
1166
1167 usermsr = current->thread.regs->msr;
1168
1169 if (usermsr & MSR_FP)
1170 save_fpu(current);
1171
1172 if (usermsr & MSR_VEC)
1173 save_altivec(current);
1174
1175#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1176 if (usermsr & MSR_TM) {
1177 current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
1178 current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
1179 current->thread.tm_texasr = mfspr(SPRN_TEXASR);
1180 current->thread.regs->msr &= ~MSR_TM;
1181 }
1182#endif
1183}
1184EXPORT_SYMBOL_GPL(kvmppc_save_user_regs);
1185
1186void kvmppc_save_current_sprs(void)
1187{
1188 save_sprs(¤t->thread);
1189}
1190EXPORT_SYMBOL_GPL(kvmppc_save_current_sprs);
1191#endif
1192
1193static inline void restore_sprs(struct thread_struct *old_thread,
1194 struct thread_struct *new_thread)
1195{
1196#ifdef CONFIG_ALTIVEC
1197 if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
1198 old_thread->vrsave != new_thread->vrsave)
1199 mtspr(SPRN_VRSAVE, new_thread->vrsave);
1200#endif
1201#ifdef CONFIG_SPE
1202 if (cpu_has_feature(CPU_FTR_SPE) &&
1203 old_thread->spefscr != new_thread->spefscr)
1204 mtspr(SPRN_SPEFSCR, new_thread->spefscr);
1205#endif
1206#ifdef CONFIG_PPC_BOOK3S_64
1207 if (cpu_has_feature(CPU_FTR_DSCR)) {
1208 u64 dscr = get_paca()->dscr_default;
1209 if (new_thread->dscr_inherit)
1210 dscr = new_thread->dscr;
1211
1212 if (old_thread->dscr != dscr)
1213 mtspr(SPRN_DSCR, dscr);
1214 }
1215
1216 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1217 if (old_thread->bescr != new_thread->bescr)
1218 mtspr(SPRN_BESCR, new_thread->bescr);
1219 if (old_thread->ebbhr != new_thread->ebbhr)
1220 mtspr(SPRN_EBBHR, new_thread->ebbhr);
1221 if (old_thread->ebbrr != new_thread->ebbrr)
1222 mtspr(SPRN_EBBRR, new_thread->ebbrr);
1223
1224 if (old_thread->fscr != new_thread->fscr)
1225 mtspr(SPRN_FSCR, new_thread->fscr);
1226
1227 if (old_thread->tar != new_thread->tar)
1228 mtspr(SPRN_TAR, new_thread->tar);
1229 }
1230
1231 if (cpu_has_feature(CPU_FTR_P9_TIDR) &&
1232 old_thread->tidr != new_thread->tidr)
1233 mtspr(SPRN_TIDR, new_thread->tidr);
1234#endif
1235
1236}
1237
1238struct task_struct *__switch_to(struct task_struct *prev,
1239 struct task_struct *new)
1240{
1241 struct thread_struct *new_thread, *old_thread;
1242 struct task_struct *last;
1243#ifdef CONFIG_PPC_64S_HASH_MMU
1244 struct ppc64_tlb_batch *batch;
1245#endif
1246
1247 new_thread = &new->thread;
1248 old_thread = ¤t->thread;
1249
1250 WARN_ON(!irqs_disabled());
1251
1252#ifdef CONFIG_PPC_64S_HASH_MMU
1253 batch = this_cpu_ptr(&ppc64_tlb_batch);
1254 if (batch->active) {
1255 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
1256 if (batch->index)
1257 __flush_tlb_pending(batch);
1258 batch->active = 0;
1259 }
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270 if (new->mm && (cpu_has_feature(CPU_FTR_ARCH_31) ||
1271 atomic_read(&new->mm->context.vas_windows)))
1272 asm volatile(PPC_CP_ABORT);
1273#endif
1274
1275#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1276 switch_booke_debug_regs(&new->thread.debug);
1277#else
1278
1279
1280
1281
1282#ifndef CONFIG_HAVE_HW_BREAKPOINT
1283 switch_hw_breakpoint(new);
1284#endif
1285#endif
1286
1287
1288
1289
1290
1291 save_sprs(&prev->thread);
1292
1293
1294 giveup_all(prev);
1295
1296 __switch_to_tm(prev, new);
1297
1298 if (!radix_enabled()) {
1299
1300
1301
1302
1303
1304 hard_irq_disable();
1305 }
1306
1307
1308
1309
1310
1311
1312
1313
1314 restore_sprs(old_thread, new_thread);
1315
1316 set_return_regs_changed();
1317
1318 if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64))
1319 kuap_assert_locked();
1320
1321 last = _switch(old_thread, new_thread);
1322
1323
1324
1325
1326
1327
1328
1329
1330#ifdef CONFIG_PPC_BOOK3S_64
1331#ifdef CONFIG_PPC_64S_HASH_MMU
1332
1333
1334
1335
1336
1337
1338 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
1339 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
1340 batch = this_cpu_ptr(&ppc64_tlb_batch);
1341 batch->active = 1;
1342 }
1343#endif
1344
1345
1346
1347
1348
1349
1350 if (current->thread.regs)
1351 restore_math(current->thread.regs);
1352#endif
1353
1354 return last;
1355}
1356
1357#define NR_INSN_TO_PRINT 16
1358
1359static void show_instructions(struct pt_regs *regs)
1360{
1361 int i;
1362 unsigned long nip = regs->nip;
1363 unsigned long pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int));
1364
1365 printk("Instruction dump:");
1366
1367
1368
1369
1370
1371 if (!IS_ENABLED(CONFIG_BOOKE) && !(regs->msr & MSR_IR)) {
1372 pc = (unsigned long)phys_to_virt(pc);
1373 nip = (unsigned long)phys_to_virt(regs->nip);
1374 }
1375
1376 for (i = 0; i < NR_INSN_TO_PRINT; i++) {
1377 int instr;
1378
1379 if (!(i % 8))
1380 pr_cont("\n");
1381
1382 if (!__kernel_text_address(pc) ||
1383 get_kernel_nofault(instr, (const void *)pc)) {
1384 pr_cont("XXXXXXXX ");
1385 } else {
1386 if (nip == pc)
1387 pr_cont("<%08x> ", instr);
1388 else
1389 pr_cont("%08x ", instr);
1390 }
1391
1392 pc += sizeof(int);
1393 }
1394
1395 pr_cont("\n");
1396}
1397
1398void show_user_instructions(struct pt_regs *regs)
1399{
1400 unsigned long pc;
1401 int n = NR_INSN_TO_PRINT;
1402 struct seq_buf s;
1403 char buf[96];
1404
1405 pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int));
1406
1407 seq_buf_init(&s, buf, sizeof(buf));
1408
1409 while (n) {
1410 int i;
1411
1412 seq_buf_clear(&s);
1413
1414 for (i = 0; i < 8 && n; i++, n--, pc += sizeof(int)) {
1415 int instr;
1416
1417 if (copy_from_user_nofault(&instr, (void __user *)pc,
1418 sizeof(instr))) {
1419 seq_buf_printf(&s, "XXXXXXXX ");
1420 continue;
1421 }
1422 seq_buf_printf(&s, regs->nip == pc ? "<%08x> " : "%08x ", instr);
1423 }
1424
1425 if (!seq_buf_has_overflowed(&s))
1426 pr_info("%s[%d]: code: %s\n", current->comm,
1427 current->pid, s.buffer);
1428 }
1429}
1430
1431struct regbit {
1432 unsigned long bit;
1433 const char *name;
1434};
1435
1436static struct regbit msr_bits[] = {
1437#if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
1438 {MSR_SF, "SF"},
1439 {MSR_HV, "HV"},
1440#endif
1441 {MSR_VEC, "VEC"},
1442 {MSR_VSX, "VSX"},
1443#ifdef CONFIG_BOOKE
1444 {MSR_CE, "CE"},
1445#endif
1446 {MSR_EE, "EE"},
1447 {MSR_PR, "PR"},
1448 {MSR_FP, "FP"},
1449 {MSR_ME, "ME"},
1450#ifdef CONFIG_BOOKE
1451 {MSR_DE, "DE"},
1452#else
1453 {MSR_SE, "SE"},
1454 {MSR_BE, "BE"},
1455#endif
1456 {MSR_IR, "IR"},
1457 {MSR_DR, "DR"},
1458 {MSR_PMM, "PMM"},
1459#ifndef CONFIG_BOOKE
1460 {MSR_RI, "RI"},
1461 {MSR_LE, "LE"},
1462#endif
1463 {0, NULL}
1464};
1465
1466static void print_bits(unsigned long val, struct regbit *bits, const char *sep)
1467{
1468 const char *s = "";
1469
1470 for (; bits->bit; ++bits)
1471 if (val & bits->bit) {
1472 pr_cont("%s%s", s, bits->name);
1473 s = sep;
1474 }
1475}
1476
1477#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1478static struct regbit msr_tm_bits[] = {
1479 {MSR_TS_T, "T"},
1480 {MSR_TS_S, "S"},
1481 {MSR_TM, "E"},
1482 {0, NULL}
1483};
1484
1485static void print_tm_bits(unsigned long val)
1486{
1487
1488
1489
1490
1491
1492
1493
1494 if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
1495 pr_cont(",TM[");
1496 print_bits(val, msr_tm_bits, "");
1497 pr_cont("]");
1498 }
1499}
1500#else
1501static void print_tm_bits(unsigned long val) {}
1502#endif
1503
1504static void print_msr_bits(unsigned long val)
1505{
1506 pr_cont("<");
1507 print_bits(val, msr_bits, ",");
1508 print_tm_bits(val);
1509 pr_cont(">");
1510}
1511
1512#ifdef CONFIG_PPC64
1513#define REG "%016lx"
1514#define REGS_PER_LINE 4
1515#else
1516#define REG "%08lx"
1517#define REGS_PER_LINE 8
1518#endif
1519
1520static void __show_regs(struct pt_regs *regs)
1521{
1522 int i, trap;
1523
1524 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
1525 regs->nip, regs->link, regs->ctr);
1526 printk("REGS: %px TRAP: %04lx %s (%s)\n",
1527 regs, regs->trap, print_tainted(), init_utsname()->release);
1528 printk("MSR: "REG" ", regs->msr);
1529 print_msr_bits(regs->msr);
1530 pr_cont(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
1531 trap = TRAP(regs);
1532 if (!trap_is_syscall(regs) && cpu_has_feature(CPU_FTR_CFAR))
1533 pr_cont("CFAR: "REG" ", regs->orig_gpr3);
1534 if (trap == INTERRUPT_MACHINE_CHECK ||
1535 trap == INTERRUPT_DATA_STORAGE ||
1536 trap == INTERRUPT_ALIGNMENT) {
1537 if (IS_ENABLED(CONFIG_4xx) || IS_ENABLED(CONFIG_BOOKE))
1538 pr_cont("DEAR: "REG" ESR: "REG" ", regs->dear, regs->esr);
1539 else
1540 pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
1541 }
1542
1543#ifdef CONFIG_PPC64
1544 pr_cont("IRQMASK: %lx ", regs->softe);
1545#endif
1546#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1547 if (MSR_TM_ACTIVE(regs->msr))
1548 pr_cont("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
1549#endif
1550
1551 for (i = 0; i < 32; i++) {
1552 if ((i % REGS_PER_LINE) == 0)
1553 pr_cont("\nGPR%02d: ", i);
1554 pr_cont(REG " ", regs->gpr[i]);
1555 }
1556 pr_cont("\n");
1557
1558
1559
1560
1561 if (IS_ENABLED(CONFIG_KALLSYMS)) {
1562 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
1563 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
1564 }
1565}
1566
1567void show_regs(struct pt_regs *regs)
1568{
1569 show_regs_print_info(KERN_DEFAULT);
1570 __show_regs(regs);
1571 show_stack(current, (unsigned long *) regs->gpr[1], KERN_DEFAULT);
1572 if (!user_mode(regs))
1573 show_instructions(regs);
1574}
1575
1576void flush_thread(void)
1577{
1578#ifdef CONFIG_HAVE_HW_BREAKPOINT
1579 flush_ptrace_hw_breakpoint(current);
1580#else
1581 set_debug_reg_defaults(¤t->thread);
1582#endif
1583}
1584
1585void arch_setup_new_exec(void)
1586{
1587
1588#ifdef CONFIG_PPC_BOOK3S_64
1589 if (!radix_enabled())
1590 hash__setup_new_exec();
1591#endif
1592
1593
1594
1595
1596 if (!current->thread.regs) {
1597 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
1598 current->thread.regs = regs - 1;
1599 }
1600
1601#ifdef CONFIG_PPC_MEM_KEYS
1602 current->thread.regs->amr = default_amr;
1603 current->thread.regs->iamr = default_iamr;
1604#endif
1605}
1606
1607#ifdef CONFIG_PPC64
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641int set_thread_tidr(struct task_struct *t)
1642{
1643 if (!cpu_has_feature(CPU_FTR_P9_TIDR))
1644 return -EINVAL;
1645
1646 if (t != current)
1647 return -EINVAL;
1648
1649 if (t->thread.tidr)
1650 return 0;
1651
1652 t->thread.tidr = (u16)task_pid_nr(t);
1653 mtspr(SPRN_TIDR, t->thread.tidr);
1654
1655 return 0;
1656}
1657EXPORT_SYMBOL_GPL(set_thread_tidr);
1658
1659#endif
1660
1661void
1662release_thread(struct task_struct *t)
1663{
1664}
1665
1666
1667
1668
1669
1670int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
1671{
1672 flush_all_to_thread(src);
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683 __switch_to_tm(src, src);
1684
1685 *dst = *src;
1686
1687 clear_task_ebb(dst);
1688
1689 return 0;
1690}
1691
1692static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
1693{
1694#ifdef CONFIG_PPC_64S_HASH_MMU
1695 unsigned long sp_vsid;
1696 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
1697
1698 if (radix_enabled())
1699 return;
1700
1701 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1702 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
1703 << SLB_VSID_SHIFT_1T;
1704 else
1705 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
1706 << SLB_VSID_SHIFT;
1707 sp_vsid |= SLB_VSID_KERNEL | llp;
1708 p->thread.ksp_vsid = sp_vsid;
1709#endif
1710}
1711
1712
1713
1714
1715
1716
1717
1718
1719int copy_thread(unsigned long clone_flags, unsigned long usp,
1720 unsigned long kthread_arg, struct task_struct *p,
1721 unsigned long tls)
1722{
1723 struct pt_regs *childregs, *kregs;
1724 extern void ret_from_fork(void);
1725 extern void ret_from_fork_scv(void);
1726 extern void ret_from_kernel_thread(void);
1727 void (*f)(void);
1728 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
1729 struct thread_info *ti = task_thread_info(p);
1730#ifdef CONFIG_HAVE_HW_BREAKPOINT
1731 int i;
1732#endif
1733
1734 klp_init_thread_info(p);
1735
1736
1737 sp -= sizeof(struct pt_regs);
1738 childregs = (struct pt_regs *) sp;
1739 if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
1740
1741 memset(childregs, 0, sizeof(struct pt_regs));
1742 childregs->gpr[1] = sp + sizeof(struct pt_regs);
1743
1744 if (usp)
1745 childregs->gpr[14] = ppc_function_entry((void *)usp);
1746#ifdef CONFIG_PPC64
1747 clear_tsk_thread_flag(p, TIF_32BIT);
1748 childregs->softe = IRQS_ENABLED;
1749#endif
1750 childregs->gpr[15] = kthread_arg;
1751 p->thread.regs = NULL;
1752 ti->flags |= _TIF_RESTOREALL;
1753 f = ret_from_kernel_thread;
1754 } else {
1755
1756 struct pt_regs *regs = current_pt_regs();
1757 *childregs = *regs;
1758 if (usp)
1759 childregs->gpr[1] = usp;
1760 p->thread.regs = childregs;
1761
1762 if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64))
1763 childregs->gpr[3] = 0;
1764 if (clone_flags & CLONE_SETTLS) {
1765 if (!is_32bit_task())
1766 childregs->gpr[13] = tls;
1767 else
1768 childregs->gpr[2] = tls;
1769 }
1770
1771 if (trap_is_scv(regs))
1772 f = ret_from_fork_scv;
1773 else
1774 f = ret_from_fork;
1775 }
1776 childregs->msr &= ~(MSR_FP|MSR_VEC|MSR_VSX);
1777 sp -= STACK_FRAME_OVERHEAD;
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787 ((unsigned long *)sp)[0] = 0;
1788 sp -= sizeof(struct pt_regs);
1789 kregs = (struct pt_regs *) sp;
1790 sp -= STACK_FRAME_OVERHEAD;
1791 p->thread.ksp = sp;
1792#ifdef CONFIG_HAVE_HW_BREAKPOINT
1793 for (i = 0; i < nr_wp_slots(); i++)
1794 p->thread.ptrace_bps[i] = NULL;
1795#endif
1796
1797#ifdef CONFIG_PPC_FPU_REGS
1798 p->thread.fp_save_area = NULL;
1799#endif
1800#ifdef CONFIG_ALTIVEC
1801 p->thread.vr_save_area = NULL;
1802#endif
1803#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
1804 p->thread.kuap = KUAP_NONE;
1805#endif
1806#if defined(CONFIG_BOOKE_OR_40x) && defined(CONFIG_PPC_KUAP)
1807 p->thread.pid = MMU_NO_CONTEXT;
1808#endif
1809
1810 setup_ksp_vsid(p, sp);
1811
1812#ifdef CONFIG_PPC64
1813 if (cpu_has_feature(CPU_FTR_DSCR)) {
1814 p->thread.dscr_inherit = current->thread.dscr_inherit;
1815 p->thread.dscr = mfspr(SPRN_DSCR);
1816 }
1817 if (cpu_has_feature(CPU_FTR_HAS_PPR))
1818 childregs->ppr = DEFAULT_PPR;
1819
1820 p->thread.tidr = 0;
1821#endif
1822
1823
1824
1825#ifdef CONFIG_PPC_PKEY
1826 if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
1827 kregs->amr = AMR_KUAP_BLOCKED;
1828
1829 if (mmu_has_feature(MMU_FTR_BOOK3S_KUEP))
1830 kregs->iamr = AMR_KUEP_BLOCKED;
1831#endif
1832 kregs->nip = ppc_function_entry(f);
1833 return 0;
1834}
1835
1836void preload_new_slb_context(unsigned long start, unsigned long sp);
1837
1838
1839
1840
1841void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1842{
1843#ifdef CONFIG_PPC64
1844 unsigned long load_addr = regs->gpr[2];
1845
1846 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !radix_enabled())
1847 preload_new_slb_context(start, sp);
1848#endif
1849
1850#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1851
1852
1853
1854
1855
1856 if (MSR_TM_SUSPENDED(mfmsr()))
1857 tm_reclaim_current(0);
1858#endif
1859
1860 memset(regs->gpr, 0, sizeof(regs->gpr));
1861 regs->ctr = 0;
1862 regs->link = 0;
1863 regs->xer = 0;
1864 regs->ccr = 0;
1865 regs->gpr[1] = sp;
1866
1867#ifdef CONFIG_PPC32
1868 regs->mq = 0;
1869 regs->nip = start;
1870 regs->msr = MSR_USER;
1871#else
1872 if (!is_32bit_task()) {
1873 unsigned long entry;
1874
1875 if (is_elf2_task()) {
1876
1877 entry = start;
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887 regs->gpr[12] = start;
1888
1889 set_thread_flag(TIF_RESTOREALL);
1890 } else {
1891 unsigned long toc;
1892
1893
1894
1895
1896
1897
1898
1899 __get_user(entry, (unsigned long __user *)start);
1900 __get_user(toc, (unsigned long __user *)start+1);
1901
1902
1903
1904
1905 if (load_addr != 0) {
1906 entry += load_addr;
1907 toc += load_addr;
1908 }
1909 regs->gpr[2] = toc;
1910 }
1911 regs_set_return_ip(regs, entry);
1912 regs_set_return_msr(regs, MSR_USER64);
1913 } else {
1914 regs->gpr[2] = 0;
1915 regs_set_return_ip(regs, start);
1916 regs_set_return_msr(regs, MSR_USER32);
1917 }
1918
1919#endif
1920#ifdef CONFIG_VSX
1921 current->thread.used_vsr = 0;
1922#endif
1923 current->thread.load_slb = 0;
1924 current->thread.load_fp = 0;
1925#ifdef CONFIG_PPC_FPU_REGS
1926 memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state));
1927 current->thread.fp_save_area = NULL;
1928#endif
1929#ifdef CONFIG_ALTIVEC
1930 memset(¤t->thread.vr_state, 0, sizeof(current->thread.vr_state));
1931 current->thread.vr_state.vscr.u[3] = 0x00010000;
1932 current->thread.vr_save_area = NULL;
1933 current->thread.vrsave = 0;
1934 current->thread.used_vr = 0;
1935 current->thread.load_vec = 0;
1936#endif
1937#ifdef CONFIG_SPE
1938 memset(current->thread.evr, 0, sizeof(current->thread.evr));
1939 current->thread.acc = 0;
1940 current->thread.spefscr = 0;
1941 current->thread.used_spe = 0;
1942#endif
1943#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1944 current->thread.tm_tfhar = 0;
1945 current->thread.tm_texasr = 0;
1946 current->thread.tm_tfiar = 0;
1947 current->thread.load_tm = 0;
1948#endif
1949}
1950EXPORT_SYMBOL(start_thread);
1951
1952#define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
1953 | PR_FP_EXC_RES | PR_FP_EXC_INV)
1954
1955int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
1956{
1957 struct pt_regs *regs = tsk->thread.regs;
1958
1959
1960
1961
1962
1963 if (val & PR_FP_EXC_SW_ENABLE) {
1964 if (cpu_has_feature(CPU_FTR_SPE)) {
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977#ifdef CONFIG_SPE
1978 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1979 tsk->thread.fpexc_mode = val &
1980 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
1981#endif
1982 return 0;
1983 } else {
1984 return -EINVAL;
1985 }
1986 }
1987
1988
1989
1990
1991
1992
1993 if (val > PR_FP_EXC_PRECISE)
1994 return -EINVAL;
1995 tsk->thread.fpexc_mode = __pack_fe01(val);
1996 if (regs != NULL && (regs->msr & MSR_FP) != 0) {
1997 regs_set_return_msr(regs, (regs->msr & ~(MSR_FE0|MSR_FE1))
1998 | tsk->thread.fpexc_mode);
1999 }
2000 return 0;
2001}
2002
2003int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
2004{
2005 unsigned int val = 0;
2006
2007 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) {
2008 if (cpu_has_feature(CPU_FTR_SPE)) {
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021#ifdef CONFIG_SPE
2022 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
2023 val = tsk->thread.fpexc_mode;
2024#endif
2025 } else
2026 return -EINVAL;
2027 } else {
2028 val = __unpack_fe01(tsk->thread.fpexc_mode);
2029 }
2030 return put_user(val, (unsigned int __user *) adr);
2031}
2032
2033int set_endian(struct task_struct *tsk, unsigned int val)
2034{
2035 struct pt_regs *regs = tsk->thread.regs;
2036
2037 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
2038 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
2039 return -EINVAL;
2040
2041 if (regs == NULL)
2042 return -EINVAL;
2043
2044 if (val == PR_ENDIAN_BIG)
2045 regs_set_return_msr(regs, regs->msr & ~MSR_LE);
2046 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
2047 regs_set_return_msr(regs, regs->msr | MSR_LE);
2048 else
2049 return -EINVAL;
2050
2051 return 0;
2052}
2053
2054int get_endian(struct task_struct *tsk, unsigned long adr)
2055{
2056 struct pt_regs *regs = tsk->thread.regs;
2057 unsigned int val;
2058
2059 if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
2060 !cpu_has_feature(CPU_FTR_REAL_LE))
2061 return -EINVAL;
2062
2063 if (regs == NULL)
2064 return -EINVAL;
2065
2066 if (regs->msr & MSR_LE) {
2067 if (cpu_has_feature(CPU_FTR_REAL_LE))
2068 val = PR_ENDIAN_LITTLE;
2069 else
2070 val = PR_ENDIAN_PPC_LITTLE;
2071 } else
2072 val = PR_ENDIAN_BIG;
2073
2074 return put_user(val, (unsigned int __user *)adr);
2075}
2076
2077int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
2078{
2079 tsk->thread.align_ctl = val;
2080 return 0;
2081}
2082
2083int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
2084{
2085 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
2086}
2087
2088static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
2089 unsigned long nbytes)
2090{
2091 unsigned long stack_page;
2092 unsigned long cpu = task_cpu(p);
2093
2094 stack_page = (unsigned long)hardirq_ctx[cpu];
2095 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2096 return 1;
2097
2098 stack_page = (unsigned long)softirq_ctx[cpu];
2099 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2100 return 1;
2101
2102 return 0;
2103}
2104
2105static inline int valid_emergency_stack(unsigned long sp, struct task_struct *p,
2106 unsigned long nbytes)
2107{
2108#ifdef CONFIG_PPC64
2109 unsigned long stack_page;
2110 unsigned long cpu = task_cpu(p);
2111
2112 if (!paca_ptrs)
2113 return 0;
2114
2115 stack_page = (unsigned long)paca_ptrs[cpu]->emergency_sp - THREAD_SIZE;
2116 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2117 return 1;
2118
2119# ifdef CONFIG_PPC_BOOK3S_64
2120 stack_page = (unsigned long)paca_ptrs[cpu]->nmi_emergency_sp - THREAD_SIZE;
2121 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2122 return 1;
2123
2124 stack_page = (unsigned long)paca_ptrs[cpu]->mc_emergency_sp - THREAD_SIZE;
2125 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2126 return 1;
2127# endif
2128#endif
2129
2130 return 0;
2131}
2132
2133
2134int validate_sp(unsigned long sp, struct task_struct *p,
2135 unsigned long nbytes)
2136{
2137 unsigned long stack_page = (unsigned long)task_stack_page(p);
2138
2139 if (sp < THREAD_SIZE)
2140 return 0;
2141
2142 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2143 return 1;
2144
2145 if (valid_irq_stack(sp, p, nbytes))
2146 return 1;
2147
2148 return valid_emergency_stack(sp, p, nbytes);
2149}
2150
2151EXPORT_SYMBOL(validate_sp);
2152
2153static unsigned long ___get_wchan(struct task_struct *p)
2154{
2155 unsigned long ip, sp;
2156 int count = 0;
2157
2158 sp = p->thread.ksp;
2159 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
2160 return 0;
2161
2162 do {
2163 sp = *(unsigned long *)sp;
2164 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) ||
2165 task_is_running(p))
2166 return 0;
2167 if (count > 0) {
2168 ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
2169 if (!in_sched_functions(ip))
2170 return ip;
2171 }
2172 } while (count++ < 16);
2173 return 0;
2174}
2175
2176unsigned long __get_wchan(struct task_struct *p)
2177{
2178 unsigned long ret;
2179
2180 if (!try_get_task_stack(p))
2181 return 0;
2182
2183 ret = ___get_wchan(p);
2184
2185 put_task_stack(p);
2186
2187 return ret;
2188}
2189
2190static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
2191
2192void __no_sanitize_address show_stack(struct task_struct *tsk,
2193 unsigned long *stack,
2194 const char *loglvl)
2195{
2196 unsigned long sp, ip, lr, newsp;
2197 int count = 0;
2198 int firstframe = 1;
2199 unsigned long ret_addr;
2200 int ftrace_idx = 0;
2201
2202 if (tsk == NULL)
2203 tsk = current;
2204
2205 if (!try_get_task_stack(tsk))
2206 return;
2207
2208 sp = (unsigned long) stack;
2209 if (sp == 0) {
2210 if (tsk == current)
2211 sp = current_stack_frame();
2212 else
2213 sp = tsk->thread.ksp;
2214 }
2215
2216 lr = 0;
2217 printk("%sCall Trace:\n", loglvl);
2218 do {
2219 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
2220 break;
2221
2222 stack = (unsigned long *) sp;
2223 newsp = stack[0];
2224 ip = stack[STACK_FRAME_LR_SAVE];
2225 if (!firstframe || ip != lr) {
2226 printk("%s["REG"] ["REG"] %pS",
2227 loglvl, sp, ip, (void *)ip);
2228 ret_addr = ftrace_graph_ret_addr(current,
2229 &ftrace_idx, ip, stack);
2230 if (ret_addr != ip)
2231 pr_cont(" (%pS)", (void *)ret_addr);
2232 if (firstframe)
2233 pr_cont(" (unreliable)");
2234 pr_cont("\n");
2235 }
2236 firstframe = 0;
2237
2238
2239
2240
2241
2242 if (validate_sp(sp, tsk, STACK_FRAME_WITH_PT_REGS)
2243 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
2244 struct pt_regs *regs = (struct pt_regs *)
2245 (sp + STACK_FRAME_OVERHEAD);
2246
2247 lr = regs->link;
2248 printk("%s--- interrupt: %lx at %pS\n",
2249 loglvl, regs->trap, (void *)regs->nip);
2250 __show_regs(regs);
2251 printk("%s--- interrupt: %lx\n",
2252 loglvl, regs->trap);
2253
2254 firstframe = 1;
2255 }
2256
2257 sp = newsp;
2258 } while (count++ < kstack_depth_to_print);
2259
2260 put_task_stack(tsk);
2261}
2262
2263#ifdef CONFIG_PPC64
2264
2265void notrace __ppc64_runlatch_on(void)
2266{
2267 struct thread_info *ti = current_thread_info();
2268
2269 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
2270
2271
2272
2273
2274
2275 mtspr(SPRN_CTRLT, CTRL_RUNLATCH);
2276 } else {
2277 unsigned long ctrl;
2278
2279
2280
2281
2282
2283 ctrl = mfspr(SPRN_CTRLF);
2284 ctrl |= CTRL_RUNLATCH;
2285 mtspr(SPRN_CTRLT, ctrl);
2286 }
2287
2288 ti->local_flags |= _TLF_RUNLATCH;
2289}
2290
2291
2292void notrace __ppc64_runlatch_off(void)
2293{
2294 struct thread_info *ti = current_thread_info();
2295
2296 ti->local_flags &= ~_TLF_RUNLATCH;
2297
2298 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
2299 mtspr(SPRN_CTRLT, 0);
2300 } else {
2301 unsigned long ctrl;
2302
2303 ctrl = mfspr(SPRN_CTRLF);
2304 ctrl &= ~CTRL_RUNLATCH;
2305 mtspr(SPRN_CTRLT, ctrl);
2306 }
2307}
2308#endif
2309
2310unsigned long arch_align_stack(unsigned long sp)
2311{
2312 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2313 sp -= get_random_int() & ~PAGE_MASK;
2314 return sp & ~0xf;
2315}
2316
2317static inline unsigned long brk_rnd(void)
2318{
2319 unsigned long rnd = 0;
2320
2321
2322 if (is_32bit_task())
2323 rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT)));
2324 else
2325 rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT)));
2326
2327 return rnd << PAGE_SHIFT;
2328}
2329
2330unsigned long arch_randomize_brk(struct mm_struct *mm)
2331{
2332 unsigned long base = mm->brk;
2333 unsigned long ret;
2334
2335#ifdef CONFIG_PPC_BOOK3S_64
2336
2337
2338
2339
2340
2341
2342
2343 if (!radix_enabled() && !is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2344 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2345#endif
2346
2347 ret = PAGE_ALIGN(base + brk_rnd());
2348
2349 if (ret < mm->brk)
2350 return mm->brk;
2351
2352 return ret;
2353}
2354
2355