1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/errno.h>
14#include <linux/sched.h>
15#include <linux/sched/debug.h>
16#include <linux/sched/task.h>
17#include <linux/sched/task_stack.h>
18#include <linux/kernel.h>
19#include <linux/mm.h>
20#include <linux/smp.h>
21#include <linux/stddef.h>
22#include <linux/unistd.h>
23#include <linux/ptrace.h>
24#include <linux/slab.h>
25#include <linux/user.h>
26#include <linux/elf.h>
27#include <linux/prctl.h>
28#include <linux/init_task.h>
29#include <linux/export.h>
30#include <linux/kallsyms.h>
31#include <linux/mqueue.h>
32#include <linux/hardirq.h>
33#include <linux/utsname.h>
34#include <linux/ftrace.h>
35#include <linux/kernel_stat.h>
36#include <linux/personality.h>
37#include <linux/random.h>
38#include <linux/hw_breakpoint.h>
39#include <linux/uaccess.h>
40#include <linux/elf-randomize.h>
41#include <linux/pkeys.h>
42#include <linux/seq_buf.h>
43
44#include <asm/interrupt.h>
45#include <asm/io.h>
46#include <asm/processor.h>
47#include <asm/mmu.h>
48#include <asm/prom.h>
49#include <asm/machdep.h>
50#include <asm/time.h>
51#include <asm/runlatch.h>
52#include <asm/syscalls.h>
53#include <asm/switch_to.h>
54#include <asm/tm.h>
55#include <asm/debug.h>
56#ifdef CONFIG_PPC64
57#include <asm/firmware.h>
58#include <asm/hw_irq.h>
59#endif
60#include <asm/code-patching.h>
61#include <asm/exec.h>
62#include <asm/livepatch.h>
63#include <asm/cpu_has_feature.h>
64#include <asm/asm-prototypes.h>
65#include <asm/stacktrace.h>
66#include <asm/hw_breakpoint.h>
67
68#include <linux/kprobes.h>
69#include <linux/kdebug.h>
70
71
72#ifdef TM_DEBUG_SW
73#define TM_DEBUG(x...) printk(KERN_INFO x)
74#else
75#define TM_DEBUG(x...) do { } while(0)
76#endif
77
78extern unsigned long _get_SP(void);
79
80#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
81
82
83
84
85
86bool tm_suspend_disabled __ro_after_init = false;
87
88static void check_if_tm_restore_required(struct task_struct *tsk)
89{
90
91
92
93
94
95
96 if (tsk == current && tsk->thread.regs &&
97 MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
98 !test_thread_flag(TIF_RESTORE_TM)) {
99 regs_set_return_msr(&tsk->thread.ckpt_regs,
100 tsk->thread.regs->msr);
101 set_thread_flag(TIF_RESTORE_TM);
102 }
103}
104
105#else
106static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
107#endif
108
109bool strict_msr_control;
110EXPORT_SYMBOL(strict_msr_control);
111
112static int __init enable_strict_msr_control(char *str)
113{
114 strict_msr_control = true;
115 pr_info("Enabling strict facility control\n");
116
117 return 0;
118}
119early_param("ppc_strict_facility_enable", enable_strict_msr_control);
120
121
122unsigned long notrace msr_check_and_set(unsigned long bits)
123{
124 unsigned long oldmsr = mfmsr();
125 unsigned long newmsr;
126
127 newmsr = oldmsr | bits;
128
129 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
130 newmsr |= MSR_VSX;
131
132 if (oldmsr != newmsr)
133 mtmsr_isync(newmsr);
134
135 return newmsr;
136}
137EXPORT_SYMBOL_GPL(msr_check_and_set);
138
139
140void notrace __msr_check_and_clear(unsigned long bits)
141{
142 unsigned long oldmsr = mfmsr();
143 unsigned long newmsr;
144
145 newmsr = oldmsr & ~bits;
146
147 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
148 newmsr &= ~MSR_VSX;
149
150 if (oldmsr != newmsr)
151 mtmsr_isync(newmsr);
152}
153EXPORT_SYMBOL(__msr_check_and_clear);
154
155#ifdef CONFIG_PPC_FPU
156static void __giveup_fpu(struct task_struct *tsk)
157{
158 unsigned long msr;
159
160 save_fpu(tsk);
161 msr = tsk->thread.regs->msr;
162 msr &= ~(MSR_FP|MSR_FE0|MSR_FE1);
163 if (cpu_has_feature(CPU_FTR_VSX))
164 msr &= ~MSR_VSX;
165 regs_set_return_msr(tsk->thread.regs, msr);
166}
167
168void giveup_fpu(struct task_struct *tsk)
169{
170 check_if_tm_restore_required(tsk);
171
172 msr_check_and_set(MSR_FP);
173 __giveup_fpu(tsk);
174 msr_check_and_clear(MSR_FP);
175}
176EXPORT_SYMBOL(giveup_fpu);
177
178
179
180
181
182void flush_fp_to_thread(struct task_struct *tsk)
183{
184 if (tsk->thread.regs) {
185
186
187
188
189
190
191
192
193 preempt_disable();
194 if (tsk->thread.regs->msr & MSR_FP) {
195
196
197
198
199
200
201
202 BUG_ON(tsk != current);
203 giveup_fpu(tsk);
204 }
205 preempt_enable();
206 }
207}
208EXPORT_SYMBOL_GPL(flush_fp_to_thread);
209
210void enable_kernel_fp(void)
211{
212 unsigned long cpumsr;
213
214 WARN_ON(preemptible());
215
216 cpumsr = msr_check_and_set(MSR_FP);
217
218 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
219 check_if_tm_restore_required(current);
220
221
222
223
224
225
226
227 if (!MSR_TM_ACTIVE(cpumsr) &&
228 MSR_TM_ACTIVE(current->thread.regs->msr))
229 return;
230 __giveup_fpu(current);
231 }
232}
233EXPORT_SYMBOL(enable_kernel_fp);
234#else
235static inline void __giveup_fpu(struct task_struct *tsk) { }
236#endif
237
238#ifdef CONFIG_ALTIVEC
239static void __giveup_altivec(struct task_struct *tsk)
240{
241 unsigned long msr;
242
243 save_altivec(tsk);
244 msr = tsk->thread.regs->msr;
245 msr &= ~MSR_VEC;
246 if (cpu_has_feature(CPU_FTR_VSX))
247 msr &= ~MSR_VSX;
248 regs_set_return_msr(tsk->thread.regs, msr);
249}
250
251void giveup_altivec(struct task_struct *tsk)
252{
253 check_if_tm_restore_required(tsk);
254
255 msr_check_and_set(MSR_VEC);
256 __giveup_altivec(tsk);
257 msr_check_and_clear(MSR_VEC);
258}
259EXPORT_SYMBOL(giveup_altivec);
260
261void enable_kernel_altivec(void)
262{
263 unsigned long cpumsr;
264
265 WARN_ON(preemptible());
266
267 cpumsr = msr_check_and_set(MSR_VEC);
268
269 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
270 check_if_tm_restore_required(current);
271
272
273
274
275
276
277
278 if (!MSR_TM_ACTIVE(cpumsr) &&
279 MSR_TM_ACTIVE(current->thread.regs->msr))
280 return;
281 __giveup_altivec(current);
282 }
283}
284EXPORT_SYMBOL(enable_kernel_altivec);
285
286
287
288
289
290void flush_altivec_to_thread(struct task_struct *tsk)
291{
292 if (tsk->thread.regs) {
293 preempt_disable();
294 if (tsk->thread.regs->msr & MSR_VEC) {
295 BUG_ON(tsk != current);
296 giveup_altivec(tsk);
297 }
298 preempt_enable();
299 }
300}
301EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
302#endif
303
304#ifdef CONFIG_VSX
305static void __giveup_vsx(struct task_struct *tsk)
306{
307 unsigned long msr = tsk->thread.regs->msr;
308
309
310
311
312
313 WARN_ON((msr & MSR_VSX) && !((msr & MSR_FP) && (msr & MSR_VEC)));
314
315
316 if (msr & MSR_FP)
317 __giveup_fpu(tsk);
318 if (msr & MSR_VEC)
319 __giveup_altivec(tsk);
320}
321
322static void giveup_vsx(struct task_struct *tsk)
323{
324 check_if_tm_restore_required(tsk);
325
326 msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
327 __giveup_vsx(tsk);
328 msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
329}
330
331void enable_kernel_vsx(void)
332{
333 unsigned long cpumsr;
334
335 WARN_ON(preemptible());
336
337 cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
338
339 if (current->thread.regs &&
340 (current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) {
341 check_if_tm_restore_required(current);
342
343
344
345
346
347
348
349 if (!MSR_TM_ACTIVE(cpumsr) &&
350 MSR_TM_ACTIVE(current->thread.regs->msr))
351 return;
352 __giveup_vsx(current);
353 }
354}
355EXPORT_SYMBOL(enable_kernel_vsx);
356
357void flush_vsx_to_thread(struct task_struct *tsk)
358{
359 if (tsk->thread.regs) {
360 preempt_disable();
361 if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) {
362 BUG_ON(tsk != current);
363 giveup_vsx(tsk);
364 }
365 preempt_enable();
366 }
367}
368EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
369#endif
370
371#ifdef CONFIG_SPE
372void giveup_spe(struct task_struct *tsk)
373{
374 check_if_tm_restore_required(tsk);
375
376 msr_check_and_set(MSR_SPE);
377 __giveup_spe(tsk);
378 msr_check_and_clear(MSR_SPE);
379}
380EXPORT_SYMBOL(giveup_spe);
381
382void enable_kernel_spe(void)
383{
384 WARN_ON(preemptible());
385
386 msr_check_and_set(MSR_SPE);
387
388 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
389 check_if_tm_restore_required(current);
390 __giveup_spe(current);
391 }
392}
393EXPORT_SYMBOL(enable_kernel_spe);
394
395void flush_spe_to_thread(struct task_struct *tsk)
396{
397 if (tsk->thread.regs) {
398 preempt_disable();
399 if (tsk->thread.regs->msr & MSR_SPE) {
400 BUG_ON(tsk != current);
401 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
402 giveup_spe(tsk);
403 }
404 preempt_enable();
405 }
406}
407#endif
408
409static unsigned long msr_all_available;
410
411static int __init init_msr_all_available(void)
412{
413 if (IS_ENABLED(CONFIG_PPC_FPU))
414 msr_all_available |= MSR_FP;
415 if (cpu_has_feature(CPU_FTR_ALTIVEC))
416 msr_all_available |= MSR_VEC;
417 if (cpu_has_feature(CPU_FTR_VSX))
418 msr_all_available |= MSR_VSX;
419 if (cpu_has_feature(CPU_FTR_SPE))
420 msr_all_available |= MSR_SPE;
421
422 return 0;
423}
424early_initcall(init_msr_all_available);
425
426void giveup_all(struct task_struct *tsk)
427{
428 unsigned long usermsr;
429
430 if (!tsk->thread.regs)
431 return;
432
433 check_if_tm_restore_required(tsk);
434
435 usermsr = tsk->thread.regs->msr;
436
437 if ((usermsr & msr_all_available) == 0)
438 return;
439
440 msr_check_and_set(msr_all_available);
441
442 WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
443
444 if (usermsr & MSR_FP)
445 __giveup_fpu(tsk);
446 if (usermsr & MSR_VEC)
447 __giveup_altivec(tsk);
448 if (usermsr & MSR_SPE)
449 __giveup_spe(tsk);
450
451 msr_check_and_clear(msr_all_available);
452}
453EXPORT_SYMBOL(giveup_all);
454
455#ifdef CONFIG_PPC_BOOK3S_64
456#ifdef CONFIG_PPC_FPU
457static bool should_restore_fp(void)
458{
459 if (current->thread.load_fp) {
460 current->thread.load_fp++;
461 return true;
462 }
463 return false;
464}
465
466static void do_restore_fp(void)
467{
468 load_fp_state(¤t->thread.fp_state);
469}
470#else
471static bool should_restore_fp(void) { return false; }
472static void do_restore_fp(void) { }
473#endif
474
475#ifdef CONFIG_ALTIVEC
476static bool should_restore_altivec(void)
477{
478 if (cpu_has_feature(CPU_FTR_ALTIVEC) && (current->thread.load_vec)) {
479 current->thread.load_vec++;
480 return true;
481 }
482 return false;
483}
484
485static void do_restore_altivec(void)
486{
487 load_vr_state(¤t->thread.vr_state);
488 current->thread.used_vr = 1;
489}
490#else
491static bool should_restore_altivec(void) { return false; }
492static void do_restore_altivec(void) { }
493#endif
494
495static bool should_restore_vsx(void)
496{
497 if (cpu_has_feature(CPU_FTR_VSX))
498 return true;
499 return false;
500}
501#ifdef CONFIG_VSX
502static void do_restore_vsx(void)
503{
504 current->thread.used_vsr = 1;
505}
506#else
507static void do_restore_vsx(void) { }
508#endif
509
510
511
512
513
514
515
516
517
518
519
520void notrace restore_math(struct pt_regs *regs)
521{
522 unsigned long msr;
523 unsigned long new_msr = 0;
524
525 msr = regs->msr;
526
527
528
529
530
531
532 if ((!(msr & MSR_FP)) && should_restore_fp())
533 new_msr |= MSR_FP;
534
535 if ((!(msr & MSR_VEC)) && should_restore_altivec())
536 new_msr |= MSR_VEC;
537
538 if ((!(msr & MSR_VSX)) && should_restore_vsx()) {
539 if (((msr | new_msr) & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC))
540 new_msr |= MSR_VSX;
541 }
542
543 if (new_msr) {
544 unsigned long fpexc_mode = 0;
545
546 msr_check_and_set(new_msr);
547
548 if (new_msr & MSR_FP) {
549 do_restore_fp();
550
551
552 fpexc_mode = current->thread.fpexc_mode;
553 }
554
555 if (new_msr & MSR_VEC)
556 do_restore_altivec();
557
558 if (new_msr & MSR_VSX)
559 do_restore_vsx();
560
561 msr_check_and_clear(new_msr);
562
563 regs_set_return_msr(regs, regs->msr | new_msr | fpexc_mode);
564 }
565}
566#endif
567
568static void save_all(struct task_struct *tsk)
569{
570 unsigned long usermsr;
571
572 if (!tsk->thread.regs)
573 return;
574
575 usermsr = tsk->thread.regs->msr;
576
577 if ((usermsr & msr_all_available) == 0)
578 return;
579
580 msr_check_and_set(msr_all_available);
581
582 WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
583
584 if (usermsr & MSR_FP)
585 save_fpu(tsk);
586
587 if (usermsr & MSR_VEC)
588 save_altivec(tsk);
589
590 if (usermsr & MSR_SPE)
591 __giveup_spe(tsk);
592
593 msr_check_and_clear(msr_all_available);
594}
595
596void flush_all_to_thread(struct task_struct *tsk)
597{
598 if (tsk->thread.regs) {
599 preempt_disable();
600 BUG_ON(tsk != current);
601#ifdef CONFIG_SPE
602 if (tsk->thread.regs->msr & MSR_SPE)
603 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
604#endif
605 save_all(tsk);
606
607 preempt_enable();
608 }
609}
610EXPORT_SYMBOL(flush_all_to_thread);
611
612#ifdef CONFIG_PPC_ADV_DEBUG_REGS
613void do_send_trap(struct pt_regs *regs, unsigned long address,
614 unsigned long error_code, int breakpt)
615{
616 current->thread.trap_nr = TRAP_HWBKPT;
617 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
618 11, SIGSEGV) == NOTIFY_STOP)
619 return;
620
621
622 force_sig_ptrace_errno_trap(breakpt,
623 (void __user *)address);
624}
625#else
626
627static void do_break_handler(struct pt_regs *regs)
628{
629 struct arch_hw_breakpoint null_brk = {0};
630 struct arch_hw_breakpoint *info;
631 struct ppc_inst instr = ppc_inst(0);
632 int type = 0;
633 int size = 0;
634 unsigned long ea;
635 int i;
636
637
638
639
640
641 if (nr_wp_slots() == 1) {
642 __set_breakpoint(0, &null_brk);
643 current->thread.hw_brk[0] = null_brk;
644 current->thread.hw_brk[0].flags |= HW_BRK_FLAG_DISABLED;
645 return;
646 }
647
648
649 wp_get_instr_detail(regs, &instr, &type, &size, &ea);
650
651 for (i = 0; i < nr_wp_slots(); i++) {
652 info = ¤t->thread.hw_brk[i];
653 if (!info->address)
654 continue;
655
656 if (wp_check_constraints(regs, instr, ea, type, size, info)) {
657 __set_breakpoint(i, &null_brk);
658 current->thread.hw_brk[i] = null_brk;
659 current->thread.hw_brk[i].flags |= HW_BRK_FLAG_DISABLED;
660 }
661 }
662}
663
664DEFINE_INTERRUPT_HANDLER(do_break)
665{
666 current->thread.trap_nr = TRAP_HWBKPT;
667 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, regs->dsisr,
668 11, SIGSEGV) == NOTIFY_STOP)
669 return;
670
671 if (debugger_break_match(regs))
672 return;
673
674
675
676
677
678
679
680
681 if (!IS_ENABLED(CONFIG_HAVE_HW_BREAKPOINT))
682 do_break_handler(regs);
683
684
685 force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void __user *)regs->dar);
686}
687#endif
688
689static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk[HBP_NUM_MAX]);
690
691#ifdef CONFIG_PPC_ADV_DEBUG_REGS
692
693
694
695static void set_debug_reg_defaults(struct thread_struct *thread)
696{
697 thread->debug.iac1 = thread->debug.iac2 = 0;
698#if CONFIG_PPC_ADV_DEBUG_IACS > 2
699 thread->debug.iac3 = thread->debug.iac4 = 0;
700#endif
701 thread->debug.dac1 = thread->debug.dac2 = 0;
702#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
703 thread->debug.dvc1 = thread->debug.dvc2 = 0;
704#endif
705 thread->debug.dbcr0 = 0;
706#ifdef CONFIG_BOOKE
707
708
709
710 thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |
711 DBCR1_IAC3US | DBCR1_IAC4US;
712
713
714
715
716 thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
717#else
718 thread->debug.dbcr1 = 0;
719#endif
720}
721
722static void prime_debug_regs(struct debug_reg *debug)
723{
724
725
726
727
728
729 mtmsr(mfmsr() & ~MSR_DE);
730
731 mtspr(SPRN_IAC1, debug->iac1);
732 mtspr(SPRN_IAC2, debug->iac2);
733#if CONFIG_PPC_ADV_DEBUG_IACS > 2
734 mtspr(SPRN_IAC3, debug->iac3);
735 mtspr(SPRN_IAC4, debug->iac4);
736#endif
737 mtspr(SPRN_DAC1, debug->dac1);
738 mtspr(SPRN_DAC2, debug->dac2);
739#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
740 mtspr(SPRN_DVC1, debug->dvc1);
741 mtspr(SPRN_DVC2, debug->dvc2);
742#endif
743 mtspr(SPRN_DBCR0, debug->dbcr0);
744 mtspr(SPRN_DBCR1, debug->dbcr1);
745#ifdef CONFIG_BOOKE
746 mtspr(SPRN_DBCR2, debug->dbcr2);
747#endif
748}
749
750
751
752
753
754void switch_booke_debug_regs(struct debug_reg *new_debug)
755{
756 if ((current->thread.debug.dbcr0 & DBCR0_IDM)
757 || (new_debug->dbcr0 & DBCR0_IDM))
758 prime_debug_regs(new_debug);
759}
760EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
761#else
762#ifndef CONFIG_HAVE_HW_BREAKPOINT
763static void set_breakpoint(int i, struct arch_hw_breakpoint *brk)
764{
765 preempt_disable();
766 __set_breakpoint(i, brk);
767 preempt_enable();
768}
769
770static void set_debug_reg_defaults(struct thread_struct *thread)
771{
772 int i;
773 struct arch_hw_breakpoint null_brk = {0};
774
775 for (i = 0; i < nr_wp_slots(); i++) {
776 thread->hw_brk[i] = null_brk;
777 if (ppc_breakpoint_available())
778 set_breakpoint(i, &thread->hw_brk[i]);
779 }
780}
781
782static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
783 struct arch_hw_breakpoint *b)
784{
785 if (a->address != b->address)
786 return false;
787 if (a->type != b->type)
788 return false;
789 if (a->len != b->len)
790 return false;
791
792 return true;
793}
794
795static void switch_hw_breakpoint(struct task_struct *new)
796{
797 int i;
798
799 for (i = 0; i < nr_wp_slots(); i++) {
800 if (likely(hw_brk_match(this_cpu_ptr(¤t_brk[i]),
801 &new->thread.hw_brk[i])))
802 continue;
803
804 __set_breakpoint(i, &new->thread.hw_brk[i]);
805 }
806}
807#endif
808#endif
809
810static inline int set_dabr(struct arch_hw_breakpoint *brk)
811{
812 unsigned long dabr, dabrx;
813
814 dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
815 dabrx = ((brk->type >> 3) & 0x7);
816
817 if (ppc_md.set_dabr)
818 return ppc_md.set_dabr(dabr, dabrx);
819
820 if (IS_ENABLED(CONFIG_PPC_ADV_DEBUG_REGS)) {
821 mtspr(SPRN_DAC1, dabr);
822 if (IS_ENABLED(CONFIG_PPC_47x))
823 isync();
824 return 0;
825 } else if (IS_ENABLED(CONFIG_PPC_BOOK3S)) {
826 mtspr(SPRN_DABR, dabr);
827 if (cpu_has_feature(CPU_FTR_DABRX))
828 mtspr(SPRN_DABRX, dabrx);
829 return 0;
830 } else {
831 return -EINVAL;
832 }
833}
834
835static inline int set_breakpoint_8xx(struct arch_hw_breakpoint *brk)
836{
837 unsigned long lctrl1 = LCTRL1_CTE_GT | LCTRL1_CTF_LT | LCTRL1_CRWE_RW |
838 LCTRL1_CRWF_RW;
839 unsigned long lctrl2 = LCTRL2_LW0EN | LCTRL2_LW0LADC | LCTRL2_SLW0EN;
840 unsigned long start_addr = ALIGN_DOWN(brk->address, HW_BREAKPOINT_SIZE);
841 unsigned long end_addr = ALIGN(brk->address + brk->len, HW_BREAKPOINT_SIZE);
842
843 if (start_addr == 0)
844 lctrl2 |= LCTRL2_LW0LA_F;
845 else if (end_addr == 0)
846 lctrl2 |= LCTRL2_LW0LA_E;
847 else
848 lctrl2 |= LCTRL2_LW0LA_EandF;
849
850 mtspr(SPRN_LCTRL2, 0);
851
852 if ((brk->type & HW_BRK_TYPE_RDWR) == 0)
853 return 0;
854
855 if ((brk->type & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_READ)
856 lctrl1 |= LCTRL1_CRWE_RO | LCTRL1_CRWF_RO;
857 if ((brk->type & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_WRITE)
858 lctrl1 |= LCTRL1_CRWE_WO | LCTRL1_CRWF_WO;
859
860 mtspr(SPRN_CMPE, start_addr - 1);
861 mtspr(SPRN_CMPF, end_addr);
862 mtspr(SPRN_LCTRL1, lctrl1);
863 mtspr(SPRN_LCTRL2, lctrl2);
864
865 return 0;
866}
867
868void __set_breakpoint(int nr, struct arch_hw_breakpoint *brk)
869{
870 memcpy(this_cpu_ptr(¤t_brk[nr]), brk, sizeof(*brk));
871
872 if (dawr_enabled())
873
874 set_dawr(nr, brk);
875 else if (IS_ENABLED(CONFIG_PPC_8xx))
876 set_breakpoint_8xx(brk);
877 else if (!cpu_has_feature(CPU_FTR_ARCH_207S))
878
879 set_dabr(brk);
880 else
881
882 WARN_ON_ONCE(1);
883}
884
885
886bool ppc_breakpoint_available(void)
887{
888 if (dawr_enabled())
889 return true;
890 if (cpu_has_feature(CPU_FTR_ARCH_207S))
891 return false;
892
893 return true;
894}
895EXPORT_SYMBOL_GPL(ppc_breakpoint_available);
896
897#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
898
899static inline bool tm_enabled(struct task_struct *tsk)
900{
901 return tsk && tsk->thread.regs && (tsk->thread.regs->msr & MSR_TM);
902}
903
904static void tm_reclaim_thread(struct thread_struct *thr, uint8_t cause)
905{
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921 if (!MSR_TM_SUSPENDED(mfmsr()))
922 return;
923
924 giveup_all(container_of(thr, struct task_struct, thread));
925
926 tm_reclaim(thr, cause);
927
928
929
930
931
932
933
934
935
936
937
938
939
940 if ((thr->ckpt_regs.msr & MSR_FP) == 0)
941 memcpy(&thr->ckfp_state, &thr->fp_state,
942 sizeof(struct thread_fp_state));
943 if ((thr->ckpt_regs.msr & MSR_VEC) == 0)
944 memcpy(&thr->ckvr_state, &thr->vr_state,
945 sizeof(struct thread_vr_state));
946}
947
948void tm_reclaim_current(uint8_t cause)
949{
950 tm_enable();
951 tm_reclaim_thread(¤t->thread, cause);
952}
953
954static inline void tm_reclaim_task(struct task_struct *tsk)
955{
956
957
958
959
960
961
962
963
964
965
966 struct thread_struct *thr = &tsk->thread;
967
968 if (!thr->regs)
969 return;
970
971 if (!MSR_TM_ACTIVE(thr->regs->msr))
972 goto out_and_saveregs;
973
974 WARN_ON(tm_suspend_disabled);
975
976 TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
977 "ccr=%lx, msr=%lx, trap=%lx)\n",
978 tsk->pid, thr->regs->nip,
979 thr->regs->ccr, thr->regs->msr,
980 thr->regs->trap);
981
982 tm_reclaim_thread(thr, TM_CAUSE_RESCHED);
983
984 TM_DEBUG("--- tm_reclaim on pid %d complete\n",
985 tsk->pid);
986
987out_and_saveregs:
988
989
990
991
992
993 tm_save_sprs(thr);
994}
995
996extern void __tm_recheckpoint(struct thread_struct *thread);
997
998void tm_recheckpoint(struct thread_struct *thread)
999{
1000 unsigned long flags;
1001
1002 if (!(thread->regs->msr & MSR_TM))
1003 return;
1004
1005
1006
1007
1008
1009 local_irq_save(flags);
1010 hard_irq_disable();
1011
1012
1013
1014
1015 tm_restore_sprs(thread);
1016
1017 __tm_recheckpoint(thread);
1018
1019 local_irq_restore(flags);
1020}
1021
1022static inline void tm_recheckpoint_new_task(struct task_struct *new)
1023{
1024 if (!cpu_has_feature(CPU_FTR_TM))
1025 return;
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035 if (!tm_enabled(new))
1036 return;
1037
1038 if (!MSR_TM_ACTIVE(new->thread.regs->msr)){
1039 tm_restore_sprs(&new->thread);
1040 return;
1041 }
1042
1043 TM_DEBUG("*** tm_recheckpoint of pid %d (new->msr 0x%lx)\n",
1044 new->pid, new->thread.regs->msr);
1045
1046 tm_recheckpoint(&new->thread);
1047
1048
1049
1050
1051
1052
1053 new->thread.regs->msr &= ~(MSR_FP | MSR_VEC | MSR_VSX);
1054
1055 TM_DEBUG("*** tm_recheckpoint of pid %d complete "
1056 "(kernel msr 0x%lx)\n",
1057 new->pid, mfmsr());
1058}
1059
1060static inline void __switch_to_tm(struct task_struct *prev,
1061 struct task_struct *new)
1062{
1063 if (cpu_has_feature(CPU_FTR_TM)) {
1064 if (tm_enabled(prev) || tm_enabled(new))
1065 tm_enable();
1066
1067 if (tm_enabled(prev)) {
1068 prev->thread.load_tm++;
1069 tm_reclaim_task(prev);
1070 if (!MSR_TM_ACTIVE(prev->thread.regs->msr) && prev->thread.load_tm == 0)
1071 prev->thread.regs->msr &= ~MSR_TM;
1072 }
1073
1074 tm_recheckpoint_new_task(new);
1075 }
1076}
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092void restore_tm_state(struct pt_regs *regs)
1093{
1094 unsigned long msr_diff;
1095
1096
1097
1098
1099
1100
1101
1102 clear_thread_flag(TIF_RESTORE_TM);
1103 if (!MSR_TM_ACTIVE(regs->msr))
1104 return;
1105
1106 msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
1107 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
1108
1109
1110 if (msr_diff & MSR_FP)
1111 current->thread.load_fp = 1;
1112#ifdef CONFIG_ALTIVEC
1113 if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)
1114 current->thread.load_vec = 1;
1115#endif
1116 restore_math(regs);
1117
1118 regs_set_return_msr(regs, regs->msr | msr_diff);
1119}
1120
1121#else
1122#define tm_recheckpoint_new_task(new)
1123#define __switch_to_tm(prev, new)
1124void tm_reclaim_current(uint8_t cause) {}
1125#endif
1126
1127static inline void save_sprs(struct thread_struct *t)
1128{
1129#ifdef CONFIG_ALTIVEC
1130 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1131 t->vrsave = mfspr(SPRN_VRSAVE);
1132#endif
1133#ifdef CONFIG_SPE
1134 if (cpu_has_feature(CPU_FTR_SPE))
1135 t->spefscr = mfspr(SPRN_SPEFSCR);
1136#endif
1137#ifdef CONFIG_PPC_BOOK3S_64
1138 if (cpu_has_feature(CPU_FTR_DSCR))
1139 t->dscr = mfspr(SPRN_DSCR);
1140
1141 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1142 t->bescr = mfspr(SPRN_BESCR);
1143 t->ebbhr = mfspr(SPRN_EBBHR);
1144 t->ebbrr = mfspr(SPRN_EBBRR);
1145
1146 t->fscr = mfspr(SPRN_FSCR);
1147
1148
1149
1150
1151
1152
1153
1154 t->tar = mfspr(SPRN_TAR);
1155 }
1156#endif
1157}
1158
1159static inline void restore_sprs(struct thread_struct *old_thread,
1160 struct thread_struct *new_thread)
1161{
1162#ifdef CONFIG_ALTIVEC
1163 if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
1164 old_thread->vrsave != new_thread->vrsave)
1165 mtspr(SPRN_VRSAVE, new_thread->vrsave);
1166#endif
1167#ifdef CONFIG_SPE
1168 if (cpu_has_feature(CPU_FTR_SPE) &&
1169 old_thread->spefscr != new_thread->spefscr)
1170 mtspr(SPRN_SPEFSCR, new_thread->spefscr);
1171#endif
1172#ifdef CONFIG_PPC_BOOK3S_64
1173 if (cpu_has_feature(CPU_FTR_DSCR)) {
1174 u64 dscr = get_paca()->dscr_default;
1175 if (new_thread->dscr_inherit)
1176 dscr = new_thread->dscr;
1177
1178 if (old_thread->dscr != dscr)
1179 mtspr(SPRN_DSCR, dscr);
1180 }
1181
1182 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1183 if (old_thread->bescr != new_thread->bescr)
1184 mtspr(SPRN_BESCR, new_thread->bescr);
1185 if (old_thread->ebbhr != new_thread->ebbhr)
1186 mtspr(SPRN_EBBHR, new_thread->ebbhr);
1187 if (old_thread->ebbrr != new_thread->ebbrr)
1188 mtspr(SPRN_EBBRR, new_thread->ebbrr);
1189
1190 if (old_thread->fscr != new_thread->fscr)
1191 mtspr(SPRN_FSCR, new_thread->fscr);
1192
1193 if (old_thread->tar != new_thread->tar)
1194 mtspr(SPRN_TAR, new_thread->tar);
1195 }
1196
1197 if (cpu_has_feature(CPU_FTR_P9_TIDR) &&
1198 old_thread->tidr != new_thread->tidr)
1199 mtspr(SPRN_TIDR, new_thread->tidr);
1200#endif
1201
1202}
1203
1204struct task_struct *__switch_to(struct task_struct *prev,
1205 struct task_struct *new)
1206{
1207 struct thread_struct *new_thread, *old_thread;
1208 struct task_struct *last;
1209#ifdef CONFIG_PPC_BOOK3S_64
1210 struct ppc64_tlb_batch *batch;
1211#endif
1212
1213 new_thread = &new->thread;
1214 old_thread = ¤t->thread;
1215
1216 WARN_ON(!irqs_disabled());
1217
1218#ifdef CONFIG_PPC_BOOK3S_64
1219 batch = this_cpu_ptr(&ppc64_tlb_batch);
1220 if (batch->active) {
1221 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
1222 if (batch->index)
1223 __flush_tlb_pending(batch);
1224 batch->active = 0;
1225 }
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236 if (new->mm && (cpu_has_feature(CPU_FTR_ARCH_31) ||
1237 atomic_read(&new->mm->context.vas_windows)))
1238 asm volatile(PPC_CP_ABORT);
1239#endif
1240
1241#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1242 switch_booke_debug_regs(&new->thread.debug);
1243#else
1244
1245
1246
1247
1248#ifndef CONFIG_HAVE_HW_BREAKPOINT
1249 switch_hw_breakpoint(new);
1250#endif
1251#endif
1252
1253
1254
1255
1256
1257 save_sprs(&prev->thread);
1258
1259
1260 giveup_all(prev);
1261
1262 __switch_to_tm(prev, new);
1263
1264 if (!radix_enabled()) {
1265
1266
1267
1268
1269
1270 hard_irq_disable();
1271 }
1272
1273
1274
1275
1276
1277
1278
1279
1280 restore_sprs(old_thread, new_thread);
1281
1282 set_return_regs_changed();
1283
1284#ifdef CONFIG_PPC32
1285 kuap_assert_locked();
1286#endif
1287 last = _switch(old_thread, new_thread);
1288
1289
1290
1291
1292
1293
1294
1295
1296#ifdef CONFIG_PPC_BOOK3S_64
1297
1298
1299
1300
1301
1302
1303 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
1304 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
1305 batch = this_cpu_ptr(&ppc64_tlb_batch);
1306 batch->active = 1;
1307 }
1308
1309
1310
1311
1312
1313
1314 if (current->thread.regs)
1315 restore_math(current->thread.regs);
1316#endif
1317
1318 return last;
1319}
1320
1321#define NR_INSN_TO_PRINT 16
1322
1323static void show_instructions(struct pt_regs *regs)
1324{
1325 int i;
1326 unsigned long nip = regs->nip;
1327 unsigned long pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int));
1328
1329 printk("Instruction dump:");
1330
1331
1332
1333
1334
1335 if (!IS_ENABLED(CONFIG_BOOKE) && !(regs->msr & MSR_IR)) {
1336 pc = (unsigned long)phys_to_virt(pc);
1337 nip = (unsigned long)phys_to_virt(regs->nip);
1338 }
1339
1340 for (i = 0; i < NR_INSN_TO_PRINT; i++) {
1341 int instr;
1342
1343 if (!(i % 8))
1344 pr_cont("\n");
1345
1346 if (!__kernel_text_address(pc) ||
1347 get_kernel_nofault(instr, (const void *)pc)) {
1348 pr_cont("XXXXXXXX ");
1349 } else {
1350 if (nip == pc)
1351 pr_cont("<%08x> ", instr);
1352 else
1353 pr_cont("%08x ", instr);
1354 }
1355
1356 pc += sizeof(int);
1357 }
1358
1359 pr_cont("\n");
1360}
1361
1362void show_user_instructions(struct pt_regs *regs)
1363{
1364 unsigned long pc;
1365 int n = NR_INSN_TO_PRINT;
1366 struct seq_buf s;
1367 char buf[96];
1368
1369 pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int));
1370
1371 seq_buf_init(&s, buf, sizeof(buf));
1372
1373 while (n) {
1374 int i;
1375
1376 seq_buf_clear(&s);
1377
1378 for (i = 0; i < 8 && n; i++, n--, pc += sizeof(int)) {
1379 int instr;
1380
1381 if (copy_from_user_nofault(&instr, (void __user *)pc,
1382 sizeof(instr))) {
1383 seq_buf_printf(&s, "XXXXXXXX ");
1384 continue;
1385 }
1386 seq_buf_printf(&s, regs->nip == pc ? "<%08x> " : "%08x ", instr);
1387 }
1388
1389 if (!seq_buf_has_overflowed(&s))
1390 pr_info("%s[%d]: code: %s\n", current->comm,
1391 current->pid, s.buffer);
1392 }
1393}
1394
1395struct regbit {
1396 unsigned long bit;
1397 const char *name;
1398};
1399
1400static struct regbit msr_bits[] = {
1401#if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
1402 {MSR_SF, "SF"},
1403 {MSR_HV, "HV"},
1404#endif
1405 {MSR_VEC, "VEC"},
1406 {MSR_VSX, "VSX"},
1407#ifdef CONFIG_BOOKE
1408 {MSR_CE, "CE"},
1409#endif
1410 {MSR_EE, "EE"},
1411 {MSR_PR, "PR"},
1412 {MSR_FP, "FP"},
1413 {MSR_ME, "ME"},
1414#ifdef CONFIG_BOOKE
1415 {MSR_DE, "DE"},
1416#else
1417 {MSR_SE, "SE"},
1418 {MSR_BE, "BE"},
1419#endif
1420 {MSR_IR, "IR"},
1421 {MSR_DR, "DR"},
1422 {MSR_PMM, "PMM"},
1423#ifndef CONFIG_BOOKE
1424 {MSR_RI, "RI"},
1425 {MSR_LE, "LE"},
1426#endif
1427 {0, NULL}
1428};
1429
1430static void print_bits(unsigned long val, struct regbit *bits, const char *sep)
1431{
1432 const char *s = "";
1433
1434 for (; bits->bit; ++bits)
1435 if (val & bits->bit) {
1436 pr_cont("%s%s", s, bits->name);
1437 s = sep;
1438 }
1439}
1440
1441#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1442static struct regbit msr_tm_bits[] = {
1443 {MSR_TS_T, "T"},
1444 {MSR_TS_S, "S"},
1445 {MSR_TM, "E"},
1446 {0, NULL}
1447};
1448
1449static void print_tm_bits(unsigned long val)
1450{
1451
1452
1453
1454
1455
1456
1457
1458 if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
1459 pr_cont(",TM[");
1460 print_bits(val, msr_tm_bits, "");
1461 pr_cont("]");
1462 }
1463}
1464#else
1465static void print_tm_bits(unsigned long val) {}
1466#endif
1467
1468static void print_msr_bits(unsigned long val)
1469{
1470 pr_cont("<");
1471 print_bits(val, msr_bits, ",");
1472 print_tm_bits(val);
1473 pr_cont(">");
1474}
1475
1476#ifdef CONFIG_PPC64
1477#define REG "%016lx"
1478#define REGS_PER_LINE 4
1479#else
1480#define REG "%08lx"
1481#define REGS_PER_LINE 8
1482#endif
1483
1484static void __show_regs(struct pt_regs *regs)
1485{
1486 int i, trap;
1487
1488 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
1489 regs->nip, regs->link, regs->ctr);
1490 printk("REGS: %px TRAP: %04lx %s (%s)\n",
1491 regs, regs->trap, print_tainted(), init_utsname()->release);
1492 printk("MSR: "REG" ", regs->msr);
1493 print_msr_bits(regs->msr);
1494 pr_cont(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
1495 trap = TRAP(regs);
1496 if (!trap_is_syscall(regs) && cpu_has_feature(CPU_FTR_CFAR))
1497 pr_cont("CFAR: "REG" ", regs->orig_gpr3);
1498 if (trap == INTERRUPT_MACHINE_CHECK ||
1499 trap == INTERRUPT_DATA_STORAGE ||
1500 trap == INTERRUPT_ALIGNMENT) {
1501 if (IS_ENABLED(CONFIG_4xx) || IS_ENABLED(CONFIG_BOOKE))
1502 pr_cont("DEAR: "REG" ESR: "REG" ", regs->dear, regs->esr);
1503 else
1504 pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
1505 }
1506
1507#ifdef CONFIG_PPC64
1508 pr_cont("IRQMASK: %lx ", regs->softe);
1509#endif
1510#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1511 if (MSR_TM_ACTIVE(regs->msr))
1512 pr_cont("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
1513#endif
1514
1515 for (i = 0; i < 32; i++) {
1516 if ((i % REGS_PER_LINE) == 0)
1517 pr_cont("\nGPR%02d: ", i);
1518 pr_cont(REG " ", regs->gpr[i]);
1519 }
1520 pr_cont("\n");
1521
1522
1523
1524
1525 if (IS_ENABLED(CONFIG_KALLSYMS)) {
1526 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
1527 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
1528 }
1529}
1530
1531void show_regs(struct pt_regs *regs)
1532{
1533 show_regs_print_info(KERN_DEFAULT);
1534 __show_regs(regs);
1535 show_stack(current, (unsigned long *) regs->gpr[1], KERN_DEFAULT);
1536 if (!user_mode(regs))
1537 show_instructions(regs);
1538}
1539
1540void flush_thread(void)
1541{
1542#ifdef CONFIG_HAVE_HW_BREAKPOINT
1543 flush_ptrace_hw_breakpoint(current);
1544#else
1545 set_debug_reg_defaults(¤t->thread);
1546#endif
1547}
1548
1549void arch_setup_new_exec(void)
1550{
1551
1552#ifdef CONFIG_PPC_BOOK3S_64
1553 if (!radix_enabled())
1554 hash__setup_new_exec();
1555#endif
1556
1557
1558
1559
1560 if (!current->thread.regs) {
1561 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
1562 current->thread.regs = regs - 1;
1563 }
1564
1565#ifdef CONFIG_PPC_MEM_KEYS
1566 current->thread.regs->amr = default_amr;
1567 current->thread.regs->iamr = default_iamr;
1568#endif
1569}
1570
1571#ifdef CONFIG_PPC64
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605int set_thread_tidr(struct task_struct *t)
1606{
1607 if (!cpu_has_feature(CPU_FTR_P9_TIDR))
1608 return -EINVAL;
1609
1610 if (t != current)
1611 return -EINVAL;
1612
1613 if (t->thread.tidr)
1614 return 0;
1615
1616 t->thread.tidr = (u16)task_pid_nr(t);
1617 mtspr(SPRN_TIDR, t->thread.tidr);
1618
1619 return 0;
1620}
1621EXPORT_SYMBOL_GPL(set_thread_tidr);
1622
1623#endif
1624
1625void
1626release_thread(struct task_struct *t)
1627{
1628}
1629
1630
1631
1632
1633
1634int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
1635{
1636 flush_all_to_thread(src);
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647 __switch_to_tm(src, src);
1648
1649 *dst = *src;
1650
1651 clear_task_ebb(dst);
1652
1653 return 0;
1654}
1655
1656static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
1657{
1658#ifdef CONFIG_PPC_BOOK3S_64
1659 unsigned long sp_vsid;
1660 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
1661
1662 if (radix_enabled())
1663 return;
1664
1665 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1666 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
1667 << SLB_VSID_SHIFT_1T;
1668 else
1669 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
1670 << SLB_VSID_SHIFT;
1671 sp_vsid |= SLB_VSID_KERNEL | llp;
1672 p->thread.ksp_vsid = sp_vsid;
1673#endif
1674}
1675
1676
1677
1678
1679
1680
1681
1682
1683int copy_thread(unsigned long clone_flags, unsigned long usp,
1684 unsigned long kthread_arg, struct task_struct *p,
1685 unsigned long tls)
1686{
1687 struct pt_regs *childregs, *kregs;
1688 extern void ret_from_fork(void);
1689 extern void ret_from_fork_scv(void);
1690 extern void ret_from_kernel_thread(void);
1691 void (*f)(void);
1692 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
1693 struct thread_info *ti = task_thread_info(p);
1694#ifdef CONFIG_HAVE_HW_BREAKPOINT
1695 int i;
1696#endif
1697
1698 klp_init_thread_info(p);
1699
1700
1701 sp -= sizeof(struct pt_regs);
1702 childregs = (struct pt_regs *) sp;
1703 if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
1704
1705 memset(childregs, 0, sizeof(struct pt_regs));
1706 childregs->gpr[1] = sp + sizeof(struct pt_regs);
1707
1708 if (usp)
1709 childregs->gpr[14] = ppc_function_entry((void *)usp);
1710#ifdef CONFIG_PPC64
1711 clear_tsk_thread_flag(p, TIF_32BIT);
1712 childregs->softe = IRQS_ENABLED;
1713#endif
1714 childregs->gpr[15] = kthread_arg;
1715 p->thread.regs = NULL;
1716 ti->flags |= _TIF_RESTOREALL;
1717 f = ret_from_kernel_thread;
1718 } else {
1719
1720 struct pt_regs *regs = current_pt_regs();
1721 *childregs = *regs;
1722 if (usp)
1723 childregs->gpr[1] = usp;
1724 p->thread.regs = childregs;
1725
1726 if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64))
1727 childregs->gpr[3] = 0;
1728 if (clone_flags & CLONE_SETTLS) {
1729 if (!is_32bit_task())
1730 childregs->gpr[13] = tls;
1731 else
1732 childregs->gpr[2] = tls;
1733 }
1734
1735 if (trap_is_scv(regs))
1736 f = ret_from_fork_scv;
1737 else
1738 f = ret_from_fork;
1739 }
1740 childregs->msr &= ~(MSR_FP|MSR_VEC|MSR_VSX);
1741 sp -= STACK_FRAME_OVERHEAD;
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751 ((unsigned long *)sp)[0] = 0;
1752 sp -= sizeof(struct pt_regs);
1753 kregs = (struct pt_regs *) sp;
1754 sp -= STACK_FRAME_OVERHEAD;
1755 p->thread.ksp = sp;
1756#ifdef CONFIG_HAVE_HW_BREAKPOINT
1757 for (i = 0; i < nr_wp_slots(); i++)
1758 p->thread.ptrace_bps[i] = NULL;
1759#endif
1760
1761#ifdef CONFIG_PPC_FPU_REGS
1762 p->thread.fp_save_area = NULL;
1763#endif
1764#ifdef CONFIG_ALTIVEC
1765 p->thread.vr_save_area = NULL;
1766#endif
1767#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
1768 p->thread.kuap = KUAP_NONE;
1769#endif
1770
1771 setup_ksp_vsid(p, sp);
1772
1773#ifdef CONFIG_PPC64
1774 if (cpu_has_feature(CPU_FTR_DSCR)) {
1775 p->thread.dscr_inherit = current->thread.dscr_inherit;
1776 p->thread.dscr = mfspr(SPRN_DSCR);
1777 }
1778 if (cpu_has_feature(CPU_FTR_HAS_PPR))
1779 childregs->ppr = DEFAULT_PPR;
1780
1781 p->thread.tidr = 0;
1782#endif
1783
1784
1785
1786#ifdef CONFIG_PPC_PKEY
1787 if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
1788 kregs->amr = AMR_KUAP_BLOCKED;
1789
1790 if (mmu_has_feature(MMU_FTR_BOOK3S_KUEP))
1791 kregs->iamr = AMR_KUEP_BLOCKED;
1792#endif
1793 kregs->nip = ppc_function_entry(f);
1794 return 0;
1795}
1796
1797void preload_new_slb_context(unsigned long start, unsigned long sp);
1798
1799
1800
1801
1802void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1803{
1804#ifdef CONFIG_PPC64
1805 unsigned long load_addr = regs->gpr[2];
1806
1807 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !radix_enabled())
1808 preload_new_slb_context(start, sp);
1809#endif
1810
1811#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1812
1813
1814
1815
1816
1817 if (MSR_TM_SUSPENDED(mfmsr()))
1818 tm_reclaim_current(0);
1819#endif
1820
1821 memset(regs->gpr, 0, sizeof(regs->gpr));
1822 regs->ctr = 0;
1823 regs->link = 0;
1824 regs->xer = 0;
1825 regs->ccr = 0;
1826 regs->gpr[1] = sp;
1827
1828#ifdef CONFIG_PPC32
1829 regs->mq = 0;
1830 regs->nip = start;
1831 regs->msr = MSR_USER;
1832#else
1833 if (!is_32bit_task()) {
1834 unsigned long entry;
1835
1836 if (is_elf2_task()) {
1837
1838 entry = start;
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848 regs->gpr[12] = start;
1849
1850 set_thread_flag(TIF_RESTOREALL);
1851 } else {
1852 unsigned long toc;
1853
1854
1855
1856
1857
1858
1859
1860 __get_user(entry, (unsigned long __user *)start);
1861 __get_user(toc, (unsigned long __user *)start+1);
1862
1863
1864
1865
1866 if (load_addr != 0) {
1867 entry += load_addr;
1868 toc += load_addr;
1869 }
1870 regs->gpr[2] = toc;
1871 }
1872 regs_set_return_ip(regs, entry);
1873 regs_set_return_msr(regs, MSR_USER64);
1874 } else {
1875 regs->gpr[2] = 0;
1876 regs_set_return_ip(regs, start);
1877 regs_set_return_msr(regs, MSR_USER32);
1878 }
1879
1880#endif
1881#ifdef CONFIG_VSX
1882 current->thread.used_vsr = 0;
1883#endif
1884 current->thread.load_slb = 0;
1885 current->thread.load_fp = 0;
1886#ifdef CONFIG_PPC_FPU_REGS
1887 memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state));
1888 current->thread.fp_save_area = NULL;
1889#endif
1890#ifdef CONFIG_ALTIVEC
1891 memset(¤t->thread.vr_state, 0, sizeof(current->thread.vr_state));
1892 current->thread.vr_state.vscr.u[3] = 0x00010000;
1893 current->thread.vr_save_area = NULL;
1894 current->thread.vrsave = 0;
1895 current->thread.used_vr = 0;
1896 current->thread.load_vec = 0;
1897#endif
1898#ifdef CONFIG_SPE
1899 memset(current->thread.evr, 0, sizeof(current->thread.evr));
1900 current->thread.acc = 0;
1901 current->thread.spefscr = 0;
1902 current->thread.used_spe = 0;
1903#endif
1904#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1905 current->thread.tm_tfhar = 0;
1906 current->thread.tm_texasr = 0;
1907 current->thread.tm_tfiar = 0;
1908 current->thread.load_tm = 0;
1909#endif
1910}
1911EXPORT_SYMBOL(start_thread);
1912
1913#define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
1914 | PR_FP_EXC_RES | PR_FP_EXC_INV)
1915
1916int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
1917{
1918 struct pt_regs *regs = tsk->thread.regs;
1919
1920
1921
1922
1923
1924 if (val & PR_FP_EXC_SW_ENABLE) {
1925 if (cpu_has_feature(CPU_FTR_SPE)) {
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938#ifdef CONFIG_SPE
1939 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1940 tsk->thread.fpexc_mode = val &
1941 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
1942#endif
1943 return 0;
1944 } else {
1945 return -EINVAL;
1946 }
1947 }
1948
1949
1950
1951
1952
1953
1954 if (val > PR_FP_EXC_PRECISE)
1955 return -EINVAL;
1956 tsk->thread.fpexc_mode = __pack_fe01(val);
1957 if (regs != NULL && (regs->msr & MSR_FP) != 0) {
1958 regs_set_return_msr(regs, (regs->msr & ~(MSR_FE0|MSR_FE1))
1959 | tsk->thread.fpexc_mode);
1960 }
1961 return 0;
1962}
1963
1964int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
1965{
1966 unsigned int val = 0;
1967
1968 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) {
1969 if (cpu_has_feature(CPU_FTR_SPE)) {
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982#ifdef CONFIG_SPE
1983 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1984 val = tsk->thread.fpexc_mode;
1985#endif
1986 } else
1987 return -EINVAL;
1988 } else {
1989 val = __unpack_fe01(tsk->thread.fpexc_mode);
1990 }
1991 return put_user(val, (unsigned int __user *) adr);
1992}
1993
1994int set_endian(struct task_struct *tsk, unsigned int val)
1995{
1996 struct pt_regs *regs = tsk->thread.regs;
1997
1998 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
1999 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
2000 return -EINVAL;
2001
2002 if (regs == NULL)
2003 return -EINVAL;
2004
2005 if (val == PR_ENDIAN_BIG)
2006 regs_set_return_msr(regs, regs->msr & ~MSR_LE);
2007 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
2008 regs_set_return_msr(regs, regs->msr | MSR_LE);
2009 else
2010 return -EINVAL;
2011
2012 return 0;
2013}
2014
2015int get_endian(struct task_struct *tsk, unsigned long adr)
2016{
2017 struct pt_regs *regs = tsk->thread.regs;
2018 unsigned int val;
2019
2020 if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
2021 !cpu_has_feature(CPU_FTR_REAL_LE))
2022 return -EINVAL;
2023
2024 if (regs == NULL)
2025 return -EINVAL;
2026
2027 if (regs->msr & MSR_LE) {
2028 if (cpu_has_feature(CPU_FTR_REAL_LE))
2029 val = PR_ENDIAN_LITTLE;
2030 else
2031 val = PR_ENDIAN_PPC_LITTLE;
2032 } else
2033 val = PR_ENDIAN_BIG;
2034
2035 return put_user(val, (unsigned int __user *)adr);
2036}
2037
2038int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
2039{
2040 tsk->thread.align_ctl = val;
2041 return 0;
2042}
2043
2044int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
2045{
2046 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
2047}
2048
2049static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
2050 unsigned long nbytes)
2051{
2052 unsigned long stack_page;
2053 unsigned long cpu = task_cpu(p);
2054
2055 stack_page = (unsigned long)hardirq_ctx[cpu];
2056 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2057 return 1;
2058
2059 stack_page = (unsigned long)softirq_ctx[cpu];
2060 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2061 return 1;
2062
2063 return 0;
2064}
2065
2066static inline int valid_emergency_stack(unsigned long sp, struct task_struct *p,
2067 unsigned long nbytes)
2068{
2069#ifdef CONFIG_PPC64
2070 unsigned long stack_page;
2071 unsigned long cpu = task_cpu(p);
2072
2073 if (!paca_ptrs)
2074 return 0;
2075
2076 stack_page = (unsigned long)paca_ptrs[cpu]->emergency_sp - THREAD_SIZE;
2077 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2078 return 1;
2079
2080# ifdef CONFIG_PPC_BOOK3S_64
2081 stack_page = (unsigned long)paca_ptrs[cpu]->nmi_emergency_sp - THREAD_SIZE;
2082 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2083 return 1;
2084
2085 stack_page = (unsigned long)paca_ptrs[cpu]->mc_emergency_sp - THREAD_SIZE;
2086 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2087 return 1;
2088# endif
2089#endif
2090
2091 return 0;
2092}
2093
2094
2095int validate_sp(unsigned long sp, struct task_struct *p,
2096 unsigned long nbytes)
2097{
2098 unsigned long stack_page = (unsigned long)task_stack_page(p);
2099
2100 if (sp < THREAD_SIZE)
2101 return 0;
2102
2103 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2104 return 1;
2105
2106 if (valid_irq_stack(sp, p, nbytes))
2107 return 1;
2108
2109 return valid_emergency_stack(sp, p, nbytes);
2110}
2111
2112EXPORT_SYMBOL(validate_sp);
2113
2114static unsigned long __get_wchan(struct task_struct *p)
2115{
2116 unsigned long ip, sp;
2117 int count = 0;
2118
2119 if (!p || p == current || task_is_running(p))
2120 return 0;
2121
2122 sp = p->thread.ksp;
2123 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
2124 return 0;
2125
2126 do {
2127 sp = *(unsigned long *)sp;
2128 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) ||
2129 task_is_running(p))
2130 return 0;
2131 if (count > 0) {
2132 ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
2133 if (!in_sched_functions(ip))
2134 return ip;
2135 }
2136 } while (count++ < 16);
2137 return 0;
2138}
2139
2140unsigned long get_wchan(struct task_struct *p)
2141{
2142 unsigned long ret;
2143
2144 if (!try_get_task_stack(p))
2145 return 0;
2146
2147 ret = __get_wchan(p);
2148
2149 put_task_stack(p);
2150
2151 return ret;
2152}
2153
2154static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
2155
2156void __no_sanitize_address show_stack(struct task_struct *tsk,
2157 unsigned long *stack,
2158 const char *loglvl)
2159{
2160 unsigned long sp, ip, lr, newsp;
2161 int count = 0;
2162 int firstframe = 1;
2163 unsigned long ret_addr;
2164 int ftrace_idx = 0;
2165
2166 if (tsk == NULL)
2167 tsk = current;
2168
2169 if (!try_get_task_stack(tsk))
2170 return;
2171
2172 sp = (unsigned long) stack;
2173 if (sp == 0) {
2174 if (tsk == current)
2175 sp = current_stack_frame();
2176 else
2177 sp = tsk->thread.ksp;
2178 }
2179
2180 lr = 0;
2181 printk("%sCall Trace:\n", loglvl);
2182 do {
2183 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
2184 break;
2185
2186 stack = (unsigned long *) sp;
2187 newsp = stack[0];
2188 ip = stack[STACK_FRAME_LR_SAVE];
2189 if (!firstframe || ip != lr) {
2190 printk("%s["REG"] ["REG"] %pS",
2191 loglvl, sp, ip, (void *)ip);
2192 ret_addr = ftrace_graph_ret_addr(current,
2193 &ftrace_idx, ip, stack);
2194 if (ret_addr != ip)
2195 pr_cont(" (%pS)", (void *)ret_addr);
2196 if (firstframe)
2197 pr_cont(" (unreliable)");
2198 pr_cont("\n");
2199 }
2200 firstframe = 0;
2201
2202
2203
2204
2205
2206 if (validate_sp(sp, tsk, STACK_FRAME_WITH_PT_REGS)
2207 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
2208 struct pt_regs *regs = (struct pt_regs *)
2209 (sp + STACK_FRAME_OVERHEAD);
2210
2211 lr = regs->link;
2212 printk("%s--- interrupt: %lx at %pS\n",
2213 loglvl, regs->trap, (void *)regs->nip);
2214 __show_regs(regs);
2215 printk("%s--- interrupt: %lx\n",
2216 loglvl, regs->trap);
2217
2218 firstframe = 1;
2219 }
2220
2221 sp = newsp;
2222 } while (count++ < kstack_depth_to_print);
2223
2224 put_task_stack(tsk);
2225}
2226
2227#ifdef CONFIG_PPC64
2228
2229void notrace __ppc64_runlatch_on(void)
2230{
2231 struct thread_info *ti = current_thread_info();
2232
2233 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
2234
2235
2236
2237
2238
2239 mtspr(SPRN_CTRLT, CTRL_RUNLATCH);
2240 } else {
2241 unsigned long ctrl;
2242
2243
2244
2245
2246
2247 ctrl = mfspr(SPRN_CTRLF);
2248 ctrl |= CTRL_RUNLATCH;
2249 mtspr(SPRN_CTRLT, ctrl);
2250 }
2251
2252 ti->local_flags |= _TLF_RUNLATCH;
2253}
2254
2255
2256void notrace __ppc64_runlatch_off(void)
2257{
2258 struct thread_info *ti = current_thread_info();
2259
2260 ti->local_flags &= ~_TLF_RUNLATCH;
2261
2262 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
2263 mtspr(SPRN_CTRLT, 0);
2264 } else {
2265 unsigned long ctrl;
2266
2267 ctrl = mfspr(SPRN_CTRLF);
2268 ctrl &= ~CTRL_RUNLATCH;
2269 mtspr(SPRN_CTRLT, ctrl);
2270 }
2271}
2272#endif
2273
2274unsigned long arch_align_stack(unsigned long sp)
2275{
2276 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2277 sp -= get_random_int() & ~PAGE_MASK;
2278 return sp & ~0xf;
2279}
2280
2281static inline unsigned long brk_rnd(void)
2282{
2283 unsigned long rnd = 0;
2284
2285
2286 if (is_32bit_task())
2287 rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT)));
2288 else
2289 rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT)));
2290
2291 return rnd << PAGE_SHIFT;
2292}
2293
2294unsigned long arch_randomize_brk(struct mm_struct *mm)
2295{
2296 unsigned long base = mm->brk;
2297 unsigned long ret;
2298
2299#ifdef CONFIG_PPC_BOOK3S_64
2300
2301
2302
2303
2304
2305
2306
2307
2308 if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2309 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2310#endif
2311
2312 ret = PAGE_ALIGN(base + brk_rnd());
2313
2314 if (ret < mm->brk)
2315 return mm->brk;
2316
2317 return ret;
2318}
2319
2320