1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/errno.h>
14#include <linux/sched.h>
15#include <linux/sched/debug.h>
16#include <linux/sched/task.h>
17#include <linux/sched/task_stack.h>
18#include <linux/kernel.h>
19#include <linux/mm.h>
20#include <linux/smp.h>
21#include <linux/stddef.h>
22#include <linux/unistd.h>
23#include <linux/ptrace.h>
24#include <linux/slab.h>
25#include <linux/user.h>
26#include <linux/elf.h>
27#include <linux/prctl.h>
28#include <linux/init_task.h>
29#include <linux/export.h>
30#include <linux/kallsyms.h>
31#include <linux/mqueue.h>
32#include <linux/hardirq.h>
33#include <linux/utsname.h>
34#include <linux/ftrace.h>
35#include <linux/kernel_stat.h>
36#include <linux/personality.h>
37#include <linux/hw_breakpoint.h>
38#include <linux/uaccess.h>
39#include <linux/pkeys.h>
40#include <linux/seq_buf.h>
41
42#include <asm/interrupt.h>
43#include <asm/io.h>
44#include <asm/processor.h>
45#include <asm/mmu.h>
46#include <asm/machdep.h>
47#include <asm/time.h>
48#include <asm/runlatch.h>
49#include <asm/syscalls.h>
50#include <asm/switch_to.h>
51#include <asm/tm.h>
52#include <asm/debug.h>
53#ifdef CONFIG_PPC64
54#include <asm/firmware.h>
55#include <asm/hw_irq.h>
56#endif
57#include <asm/code-patching.h>
58#include <asm/exec.h>
59#include <asm/livepatch.h>
60#include <asm/cpu_has_feature.h>
61#include <asm/asm-prototypes.h>
62#include <asm/stacktrace.h>
63#include <asm/hw_breakpoint.h>
64
65#include <linux/kprobes.h>
66#include <linux/kdebug.h>
67
68
69#ifdef TM_DEBUG_SW
70#define TM_DEBUG(x...) printk(KERN_INFO x)
71#else
72#define TM_DEBUG(x...) do { } while(0)
73#endif
74
75extern unsigned long _get_SP(void);
76
77#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
78
79
80
81
82
83bool tm_suspend_disabled __ro_after_init = false;
84
85static void check_if_tm_restore_required(struct task_struct *tsk)
86{
87
88
89
90
91
92
93 if (tsk == current && tsk->thread.regs &&
94 MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
95 !test_thread_flag(TIF_RESTORE_TM)) {
96 regs_set_return_msr(&tsk->thread.ckpt_regs,
97 tsk->thread.regs->msr);
98 set_thread_flag(TIF_RESTORE_TM);
99 }
100}
101
102#else
103static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
104#endif
105
106bool strict_msr_control;
107EXPORT_SYMBOL(strict_msr_control);
108
109static int __init enable_strict_msr_control(char *str)
110{
111 strict_msr_control = true;
112 pr_info("Enabling strict facility control\n");
113
114 return 0;
115}
116early_param("ppc_strict_facility_enable", enable_strict_msr_control);
117
118
119unsigned long notrace msr_check_and_set(unsigned long bits)
120{
121 unsigned long oldmsr = mfmsr();
122 unsigned long newmsr;
123
124 newmsr = oldmsr | bits;
125
126 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
127 newmsr |= MSR_VSX;
128
129 if (oldmsr != newmsr)
130 mtmsr_isync(newmsr);
131
132 return newmsr;
133}
134EXPORT_SYMBOL_GPL(msr_check_and_set);
135
136
137void notrace __msr_check_and_clear(unsigned long bits)
138{
139 unsigned long oldmsr = mfmsr();
140 unsigned long newmsr;
141
142 newmsr = oldmsr & ~bits;
143
144 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
145 newmsr &= ~MSR_VSX;
146
147 if (oldmsr != newmsr)
148 mtmsr_isync(newmsr);
149}
150EXPORT_SYMBOL(__msr_check_and_clear);
151
152#ifdef CONFIG_PPC_FPU
153static void __giveup_fpu(struct task_struct *tsk)
154{
155 unsigned long msr;
156
157 save_fpu(tsk);
158 msr = tsk->thread.regs->msr;
159 msr &= ~(MSR_FP|MSR_FE0|MSR_FE1);
160 if (cpu_has_feature(CPU_FTR_VSX))
161 msr &= ~MSR_VSX;
162 regs_set_return_msr(tsk->thread.regs, msr);
163}
164
165void giveup_fpu(struct task_struct *tsk)
166{
167 check_if_tm_restore_required(tsk);
168
169 msr_check_and_set(MSR_FP);
170 __giveup_fpu(tsk);
171 msr_check_and_clear(MSR_FP);
172}
173EXPORT_SYMBOL(giveup_fpu);
174
175
176
177
178
179void flush_fp_to_thread(struct task_struct *tsk)
180{
181 if (tsk->thread.regs) {
182
183
184
185
186
187
188
189
190 preempt_disable();
191 if (tsk->thread.regs->msr & MSR_FP) {
192
193
194
195
196
197
198
199 BUG_ON(tsk != current);
200 giveup_fpu(tsk);
201 }
202 preempt_enable();
203 }
204}
205EXPORT_SYMBOL_GPL(flush_fp_to_thread);
206
207void enable_kernel_fp(void)
208{
209 unsigned long cpumsr;
210
211 WARN_ON(preemptible());
212
213 cpumsr = msr_check_and_set(MSR_FP);
214
215 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
216 check_if_tm_restore_required(current);
217
218
219
220
221
222
223
224 if (!MSR_TM_ACTIVE(cpumsr) &&
225 MSR_TM_ACTIVE(current->thread.regs->msr))
226 return;
227 __giveup_fpu(current);
228 }
229}
230EXPORT_SYMBOL(enable_kernel_fp);
231#else
232static inline void __giveup_fpu(struct task_struct *tsk) { }
233#endif
234
235#ifdef CONFIG_ALTIVEC
236static void __giveup_altivec(struct task_struct *tsk)
237{
238 unsigned long msr;
239
240 save_altivec(tsk);
241 msr = tsk->thread.regs->msr;
242 msr &= ~MSR_VEC;
243 if (cpu_has_feature(CPU_FTR_VSX))
244 msr &= ~MSR_VSX;
245 regs_set_return_msr(tsk->thread.regs, msr);
246}
247
248void giveup_altivec(struct task_struct *tsk)
249{
250 check_if_tm_restore_required(tsk);
251
252 msr_check_and_set(MSR_VEC);
253 __giveup_altivec(tsk);
254 msr_check_and_clear(MSR_VEC);
255}
256EXPORT_SYMBOL(giveup_altivec);
257
258void enable_kernel_altivec(void)
259{
260 unsigned long cpumsr;
261
262 WARN_ON(preemptible());
263
264 cpumsr = msr_check_and_set(MSR_VEC);
265
266 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
267 check_if_tm_restore_required(current);
268
269
270
271
272
273
274
275 if (!MSR_TM_ACTIVE(cpumsr) &&
276 MSR_TM_ACTIVE(current->thread.regs->msr))
277 return;
278 __giveup_altivec(current);
279 }
280}
281EXPORT_SYMBOL(enable_kernel_altivec);
282
283
284
285
286
287void flush_altivec_to_thread(struct task_struct *tsk)
288{
289 if (tsk->thread.regs) {
290 preempt_disable();
291 if (tsk->thread.regs->msr & MSR_VEC) {
292 BUG_ON(tsk != current);
293 giveup_altivec(tsk);
294 }
295 preempt_enable();
296 }
297}
298EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
299#endif
300
301#ifdef CONFIG_VSX
302static void __giveup_vsx(struct task_struct *tsk)
303{
304 unsigned long msr = tsk->thread.regs->msr;
305
306
307
308
309
310 WARN_ON((msr & MSR_VSX) && !((msr & MSR_FP) && (msr & MSR_VEC)));
311
312
313 if (msr & MSR_FP)
314 __giveup_fpu(tsk);
315 if (msr & MSR_VEC)
316 __giveup_altivec(tsk);
317}
318
319static void giveup_vsx(struct task_struct *tsk)
320{
321 check_if_tm_restore_required(tsk);
322
323 msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
324 __giveup_vsx(tsk);
325 msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
326}
327
328void enable_kernel_vsx(void)
329{
330 unsigned long cpumsr;
331
332 WARN_ON(preemptible());
333
334 cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
335
336 if (current->thread.regs &&
337 (current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) {
338 check_if_tm_restore_required(current);
339
340
341
342
343
344
345
346 if (!MSR_TM_ACTIVE(cpumsr) &&
347 MSR_TM_ACTIVE(current->thread.regs->msr))
348 return;
349 __giveup_vsx(current);
350 }
351}
352EXPORT_SYMBOL(enable_kernel_vsx);
353
354void flush_vsx_to_thread(struct task_struct *tsk)
355{
356 if (tsk->thread.regs) {
357 preempt_disable();
358 if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) {
359 BUG_ON(tsk != current);
360 giveup_vsx(tsk);
361 }
362 preempt_enable();
363 }
364}
365EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
366#endif
367
368#ifdef CONFIG_SPE
369void giveup_spe(struct task_struct *tsk)
370{
371 check_if_tm_restore_required(tsk);
372
373 msr_check_and_set(MSR_SPE);
374 __giveup_spe(tsk);
375 msr_check_and_clear(MSR_SPE);
376}
377EXPORT_SYMBOL(giveup_spe);
378
379void enable_kernel_spe(void)
380{
381 WARN_ON(preemptible());
382
383 msr_check_and_set(MSR_SPE);
384
385 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
386 check_if_tm_restore_required(current);
387 __giveup_spe(current);
388 }
389}
390EXPORT_SYMBOL(enable_kernel_spe);
391
392void flush_spe_to_thread(struct task_struct *tsk)
393{
394 if (tsk->thread.regs) {
395 preempt_disable();
396 if (tsk->thread.regs->msr & MSR_SPE) {
397 BUG_ON(tsk != current);
398 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
399 giveup_spe(tsk);
400 }
401 preempt_enable();
402 }
403}
404#endif
405
406static unsigned long msr_all_available;
407
408static int __init init_msr_all_available(void)
409{
410 if (IS_ENABLED(CONFIG_PPC_FPU))
411 msr_all_available |= MSR_FP;
412 if (cpu_has_feature(CPU_FTR_ALTIVEC))
413 msr_all_available |= MSR_VEC;
414 if (cpu_has_feature(CPU_FTR_VSX))
415 msr_all_available |= MSR_VSX;
416 if (cpu_has_feature(CPU_FTR_SPE))
417 msr_all_available |= MSR_SPE;
418
419 return 0;
420}
421early_initcall(init_msr_all_available);
422
423void giveup_all(struct task_struct *tsk)
424{
425 unsigned long usermsr;
426
427 if (!tsk->thread.regs)
428 return;
429
430 check_if_tm_restore_required(tsk);
431
432 usermsr = tsk->thread.regs->msr;
433
434 if ((usermsr & msr_all_available) == 0)
435 return;
436
437 msr_check_and_set(msr_all_available);
438
439 WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
440
441 if (usermsr & MSR_FP)
442 __giveup_fpu(tsk);
443 if (usermsr & MSR_VEC)
444 __giveup_altivec(tsk);
445 if (usermsr & MSR_SPE)
446 __giveup_spe(tsk);
447
448 msr_check_and_clear(msr_all_available);
449}
450EXPORT_SYMBOL(giveup_all);
451
452#ifdef CONFIG_PPC_BOOK3S_64
453#ifdef CONFIG_PPC_FPU
454static bool should_restore_fp(void)
455{
456 if (current->thread.load_fp) {
457 current->thread.load_fp++;
458 return true;
459 }
460 return false;
461}
462
463static void do_restore_fp(void)
464{
465 load_fp_state(¤t->thread.fp_state);
466}
467#else
468static bool should_restore_fp(void) { return false; }
469static void do_restore_fp(void) { }
470#endif
471
472#ifdef CONFIG_ALTIVEC
473static bool should_restore_altivec(void)
474{
475 if (cpu_has_feature(CPU_FTR_ALTIVEC) && (current->thread.load_vec)) {
476 current->thread.load_vec++;
477 return true;
478 }
479 return false;
480}
481
482static void do_restore_altivec(void)
483{
484 load_vr_state(¤t->thread.vr_state);
485 current->thread.used_vr = 1;
486}
487#else
488static bool should_restore_altivec(void) { return false; }
489static void do_restore_altivec(void) { }
490#endif
491
492static bool should_restore_vsx(void)
493{
494 if (cpu_has_feature(CPU_FTR_VSX))
495 return true;
496 return false;
497}
498#ifdef CONFIG_VSX
499static void do_restore_vsx(void)
500{
501 current->thread.used_vsr = 1;
502}
503#else
504static void do_restore_vsx(void) { }
505#endif
506
507
508
509
510
511
512
513
514
515
516
517void notrace restore_math(struct pt_regs *regs)
518{
519 unsigned long msr;
520 unsigned long new_msr = 0;
521
522 msr = regs->msr;
523
524
525
526
527
528
529 if ((!(msr & MSR_FP)) && should_restore_fp())
530 new_msr |= MSR_FP;
531
532 if ((!(msr & MSR_VEC)) && should_restore_altivec())
533 new_msr |= MSR_VEC;
534
535 if ((!(msr & MSR_VSX)) && should_restore_vsx()) {
536 if (((msr | new_msr) & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC))
537 new_msr |= MSR_VSX;
538 }
539
540 if (new_msr) {
541 unsigned long fpexc_mode = 0;
542
543 msr_check_and_set(new_msr);
544
545 if (new_msr & MSR_FP) {
546 do_restore_fp();
547
548
549 fpexc_mode = current->thread.fpexc_mode;
550 }
551
552 if (new_msr & MSR_VEC)
553 do_restore_altivec();
554
555 if (new_msr & MSR_VSX)
556 do_restore_vsx();
557
558 msr_check_and_clear(new_msr);
559
560 regs_set_return_msr(regs, regs->msr | new_msr | fpexc_mode);
561 }
562}
563#endif
564
565static void save_all(struct task_struct *tsk)
566{
567 unsigned long usermsr;
568
569 if (!tsk->thread.regs)
570 return;
571
572 usermsr = tsk->thread.regs->msr;
573
574 if ((usermsr & msr_all_available) == 0)
575 return;
576
577 msr_check_and_set(msr_all_available);
578
579 WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
580
581 if (usermsr & MSR_FP)
582 save_fpu(tsk);
583
584 if (usermsr & MSR_VEC)
585 save_altivec(tsk);
586
587 if (usermsr & MSR_SPE)
588 __giveup_spe(tsk);
589
590 msr_check_and_clear(msr_all_available);
591}
592
593void flush_all_to_thread(struct task_struct *tsk)
594{
595 if (tsk->thread.regs) {
596 preempt_disable();
597 BUG_ON(tsk != current);
598#ifdef CONFIG_SPE
599 if (tsk->thread.regs->msr & MSR_SPE)
600 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
601#endif
602 save_all(tsk);
603
604 preempt_enable();
605 }
606}
607EXPORT_SYMBOL(flush_all_to_thread);
608
609#ifdef CONFIG_PPC_ADV_DEBUG_REGS
610void do_send_trap(struct pt_regs *regs, unsigned long address,
611 unsigned long error_code, int breakpt)
612{
613 current->thread.trap_nr = TRAP_HWBKPT;
614 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
615 11, SIGSEGV) == NOTIFY_STOP)
616 return;
617
618
619 force_sig_ptrace_errno_trap(breakpt,
620 (void __user *)address);
621}
622#else
623
624static void do_break_handler(struct pt_regs *regs)
625{
626 struct arch_hw_breakpoint null_brk = {0};
627 struct arch_hw_breakpoint *info;
628 ppc_inst_t instr = ppc_inst(0);
629 int type = 0;
630 int size = 0;
631 unsigned long ea;
632 int i;
633
634
635
636
637
638 if (nr_wp_slots() == 1) {
639 __set_breakpoint(0, &null_brk);
640 current->thread.hw_brk[0] = null_brk;
641 current->thread.hw_brk[0].flags |= HW_BRK_FLAG_DISABLED;
642 return;
643 }
644
645
646 wp_get_instr_detail(regs, &instr, &type, &size, &ea);
647
648 for (i = 0; i < nr_wp_slots(); i++) {
649 info = ¤t->thread.hw_brk[i];
650 if (!info->address)
651 continue;
652
653 if (wp_check_constraints(regs, instr, ea, type, size, info)) {
654 __set_breakpoint(i, &null_brk);
655 current->thread.hw_brk[i] = null_brk;
656 current->thread.hw_brk[i].flags |= HW_BRK_FLAG_DISABLED;
657 }
658 }
659}
660
661DEFINE_INTERRUPT_HANDLER(do_break)
662{
663 current->thread.trap_nr = TRAP_HWBKPT;
664 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, regs->dsisr,
665 11, SIGSEGV) == NOTIFY_STOP)
666 return;
667
668 if (debugger_break_match(regs))
669 return;
670
671
672
673
674
675
676
677
678 if (!IS_ENABLED(CONFIG_HAVE_HW_BREAKPOINT))
679 do_break_handler(regs);
680
681
682 force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void __user *)regs->dar);
683}
684#endif
685
686static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk[HBP_NUM_MAX]);
687
688#ifdef CONFIG_PPC_ADV_DEBUG_REGS
689
690
691
692static void set_debug_reg_defaults(struct thread_struct *thread)
693{
694 thread->debug.iac1 = thread->debug.iac2 = 0;
695#if CONFIG_PPC_ADV_DEBUG_IACS > 2
696 thread->debug.iac3 = thread->debug.iac4 = 0;
697#endif
698 thread->debug.dac1 = thread->debug.dac2 = 0;
699#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
700 thread->debug.dvc1 = thread->debug.dvc2 = 0;
701#endif
702 thread->debug.dbcr0 = 0;
703#ifdef CONFIG_BOOKE
704
705
706
707 thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |
708 DBCR1_IAC3US | DBCR1_IAC4US;
709
710
711
712
713 thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
714#else
715 thread->debug.dbcr1 = 0;
716#endif
717}
718
719static void prime_debug_regs(struct debug_reg *debug)
720{
721
722
723
724
725
726 mtmsr(mfmsr() & ~MSR_DE);
727
728 mtspr(SPRN_IAC1, debug->iac1);
729 mtspr(SPRN_IAC2, debug->iac2);
730#if CONFIG_PPC_ADV_DEBUG_IACS > 2
731 mtspr(SPRN_IAC3, debug->iac3);
732 mtspr(SPRN_IAC4, debug->iac4);
733#endif
734 mtspr(SPRN_DAC1, debug->dac1);
735 mtspr(SPRN_DAC2, debug->dac2);
736#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
737 mtspr(SPRN_DVC1, debug->dvc1);
738 mtspr(SPRN_DVC2, debug->dvc2);
739#endif
740 mtspr(SPRN_DBCR0, debug->dbcr0);
741 mtspr(SPRN_DBCR1, debug->dbcr1);
742#ifdef CONFIG_BOOKE
743 mtspr(SPRN_DBCR2, debug->dbcr2);
744#endif
745}
746
747
748
749
750
751void switch_booke_debug_regs(struct debug_reg *new_debug)
752{
753 if ((current->thread.debug.dbcr0 & DBCR0_IDM)
754 || (new_debug->dbcr0 & DBCR0_IDM))
755 prime_debug_regs(new_debug);
756}
757EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
758#else
759#ifndef CONFIG_HAVE_HW_BREAKPOINT
760static void set_breakpoint(int i, struct arch_hw_breakpoint *brk)
761{
762 preempt_disable();
763 __set_breakpoint(i, brk);
764 preempt_enable();
765}
766
767static void set_debug_reg_defaults(struct thread_struct *thread)
768{
769 int i;
770 struct arch_hw_breakpoint null_brk = {0};
771
772 for (i = 0; i < nr_wp_slots(); i++) {
773 thread->hw_brk[i] = null_brk;
774 if (ppc_breakpoint_available())
775 set_breakpoint(i, &thread->hw_brk[i]);
776 }
777}
778
779static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
780 struct arch_hw_breakpoint *b)
781{
782 if (a->address != b->address)
783 return false;
784 if (a->type != b->type)
785 return false;
786 if (a->len != b->len)
787 return false;
788
789 return true;
790}
791
792static void switch_hw_breakpoint(struct task_struct *new)
793{
794 int i;
795
796 for (i = 0; i < nr_wp_slots(); i++) {
797 if (likely(hw_brk_match(this_cpu_ptr(¤t_brk[i]),
798 &new->thread.hw_brk[i])))
799 continue;
800
801 __set_breakpoint(i, &new->thread.hw_brk[i]);
802 }
803}
804#endif
805#endif
806
807static inline int set_dabr(struct arch_hw_breakpoint *brk)
808{
809 unsigned long dabr, dabrx;
810
811 dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
812 dabrx = ((brk->type >> 3) & 0x7);
813
814 if (ppc_md.set_dabr)
815 return ppc_md.set_dabr(dabr, dabrx);
816
817 if (IS_ENABLED(CONFIG_PPC_ADV_DEBUG_REGS)) {
818 mtspr(SPRN_DAC1, dabr);
819 if (IS_ENABLED(CONFIG_PPC_47x))
820 isync();
821 return 0;
822 } else if (IS_ENABLED(CONFIG_PPC_BOOK3S)) {
823 mtspr(SPRN_DABR, dabr);
824 if (cpu_has_feature(CPU_FTR_DABRX))
825 mtspr(SPRN_DABRX, dabrx);
826 return 0;
827 } else {
828 return -EINVAL;
829 }
830}
831
832static inline int set_breakpoint_8xx(struct arch_hw_breakpoint *brk)
833{
834 unsigned long lctrl1 = LCTRL1_CTE_GT | LCTRL1_CTF_LT | LCTRL1_CRWE_RW |
835 LCTRL1_CRWF_RW;
836 unsigned long lctrl2 = LCTRL2_LW0EN | LCTRL2_LW0LADC | LCTRL2_SLW0EN;
837 unsigned long start_addr = ALIGN_DOWN(brk->address, HW_BREAKPOINT_SIZE);
838 unsigned long end_addr = ALIGN(brk->address + brk->len, HW_BREAKPOINT_SIZE);
839
840 if (start_addr == 0)
841 lctrl2 |= LCTRL2_LW0LA_F;
842 else if (end_addr == 0)
843 lctrl2 |= LCTRL2_LW0LA_E;
844 else
845 lctrl2 |= LCTRL2_LW0LA_EandF;
846
847 mtspr(SPRN_LCTRL2, 0);
848
849 if ((brk->type & HW_BRK_TYPE_RDWR) == 0)
850 return 0;
851
852 if ((brk->type & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_READ)
853 lctrl1 |= LCTRL1_CRWE_RO | LCTRL1_CRWF_RO;
854 if ((brk->type & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_WRITE)
855 lctrl1 |= LCTRL1_CRWE_WO | LCTRL1_CRWF_WO;
856
857 mtspr(SPRN_CMPE, start_addr - 1);
858 mtspr(SPRN_CMPF, end_addr);
859 mtspr(SPRN_LCTRL1, lctrl1);
860 mtspr(SPRN_LCTRL2, lctrl2);
861
862 return 0;
863}
864
865void __set_breakpoint(int nr, struct arch_hw_breakpoint *brk)
866{
867 memcpy(this_cpu_ptr(¤t_brk[nr]), brk, sizeof(*brk));
868
869 if (dawr_enabled())
870
871 set_dawr(nr, brk);
872 else if (IS_ENABLED(CONFIG_PPC_8xx))
873 set_breakpoint_8xx(brk);
874 else if (!cpu_has_feature(CPU_FTR_ARCH_207S))
875
876 set_dabr(brk);
877 else
878
879 WARN_ON_ONCE(1);
880}
881
882
883bool ppc_breakpoint_available(void)
884{
885 if (dawr_enabled())
886 return true;
887 if (cpu_has_feature(CPU_FTR_ARCH_207S))
888 return false;
889
890 return true;
891}
892EXPORT_SYMBOL_GPL(ppc_breakpoint_available);
893
894#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
895
896static inline bool tm_enabled(struct task_struct *tsk)
897{
898 return tsk && tsk->thread.regs && (tsk->thread.regs->msr & MSR_TM);
899}
900
901static void tm_reclaim_thread(struct thread_struct *thr, uint8_t cause)
902{
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918 if (!MSR_TM_SUSPENDED(mfmsr()))
919 return;
920
921 giveup_all(container_of(thr, struct task_struct, thread));
922
923 tm_reclaim(thr, cause);
924
925
926
927
928
929
930
931
932
933
934
935
936
937 if ((thr->ckpt_regs.msr & MSR_FP) == 0)
938 memcpy(&thr->ckfp_state, &thr->fp_state,
939 sizeof(struct thread_fp_state));
940 if ((thr->ckpt_regs.msr & MSR_VEC) == 0)
941 memcpy(&thr->ckvr_state, &thr->vr_state,
942 sizeof(struct thread_vr_state));
943}
944
945void tm_reclaim_current(uint8_t cause)
946{
947 tm_enable();
948 tm_reclaim_thread(¤t->thread, cause);
949}
950
951static inline void tm_reclaim_task(struct task_struct *tsk)
952{
953
954
955
956
957
958
959
960
961
962
963 struct thread_struct *thr = &tsk->thread;
964
965 if (!thr->regs)
966 return;
967
968 if (!MSR_TM_ACTIVE(thr->regs->msr))
969 goto out_and_saveregs;
970
971 WARN_ON(tm_suspend_disabled);
972
973 TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
974 "ccr=%lx, msr=%lx, trap=%lx)\n",
975 tsk->pid, thr->regs->nip,
976 thr->regs->ccr, thr->regs->msr,
977 thr->regs->trap);
978
979 tm_reclaim_thread(thr, TM_CAUSE_RESCHED);
980
981 TM_DEBUG("--- tm_reclaim on pid %d complete\n",
982 tsk->pid);
983
984out_and_saveregs:
985
986
987
988
989
990 tm_save_sprs(thr);
991}
992
993extern void __tm_recheckpoint(struct thread_struct *thread);
994
995void tm_recheckpoint(struct thread_struct *thread)
996{
997 unsigned long flags;
998
999 if (!(thread->regs->msr & MSR_TM))
1000 return;
1001
1002
1003
1004
1005
1006 local_irq_save(flags);
1007 hard_irq_disable();
1008
1009
1010
1011
1012 tm_restore_sprs(thread);
1013
1014 __tm_recheckpoint(thread);
1015
1016 local_irq_restore(flags);
1017}
1018
1019static inline void tm_recheckpoint_new_task(struct task_struct *new)
1020{
1021 if (!cpu_has_feature(CPU_FTR_TM))
1022 return;
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032 if (!tm_enabled(new))
1033 return;
1034
1035 if (!MSR_TM_ACTIVE(new->thread.regs->msr)){
1036 tm_restore_sprs(&new->thread);
1037 return;
1038 }
1039
1040 TM_DEBUG("*** tm_recheckpoint of pid %d (new->msr 0x%lx)\n",
1041 new->pid, new->thread.regs->msr);
1042
1043 tm_recheckpoint(&new->thread);
1044
1045
1046
1047
1048
1049
1050 new->thread.regs->msr &= ~(MSR_FP | MSR_VEC | MSR_VSX);
1051
1052 TM_DEBUG("*** tm_recheckpoint of pid %d complete "
1053 "(kernel msr 0x%lx)\n",
1054 new->pid, mfmsr());
1055}
1056
1057static inline void __switch_to_tm(struct task_struct *prev,
1058 struct task_struct *new)
1059{
1060 if (cpu_has_feature(CPU_FTR_TM)) {
1061 if (tm_enabled(prev) || tm_enabled(new))
1062 tm_enable();
1063
1064 if (tm_enabled(prev)) {
1065 prev->thread.load_tm++;
1066 tm_reclaim_task(prev);
1067 if (!MSR_TM_ACTIVE(prev->thread.regs->msr) && prev->thread.load_tm == 0)
1068 prev->thread.regs->msr &= ~MSR_TM;
1069 }
1070
1071 tm_recheckpoint_new_task(new);
1072 }
1073}
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089void restore_tm_state(struct pt_regs *regs)
1090{
1091 unsigned long msr_diff;
1092
1093
1094
1095
1096
1097
1098
1099 clear_thread_flag(TIF_RESTORE_TM);
1100 if (!MSR_TM_ACTIVE(regs->msr))
1101 return;
1102
1103 msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
1104 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
1105
1106
1107 if (msr_diff & MSR_FP)
1108 current->thread.load_fp = 1;
1109#ifdef CONFIG_ALTIVEC
1110 if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)
1111 current->thread.load_vec = 1;
1112#endif
1113 restore_math(regs);
1114
1115 regs_set_return_msr(regs, regs->msr | msr_diff);
1116}
1117
1118#else
1119#define tm_recheckpoint_new_task(new)
1120#define __switch_to_tm(prev, new)
1121void tm_reclaim_current(uint8_t cause) {}
1122#endif
1123
1124static inline void save_sprs(struct thread_struct *t)
1125{
1126#ifdef CONFIG_ALTIVEC
1127 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1128 t->vrsave = mfspr(SPRN_VRSAVE);
1129#endif
1130#ifdef CONFIG_SPE
1131 if (cpu_has_feature(CPU_FTR_SPE))
1132 t->spefscr = mfspr(SPRN_SPEFSCR);
1133#endif
1134#ifdef CONFIG_PPC_BOOK3S_64
1135 if (cpu_has_feature(CPU_FTR_DSCR))
1136 t->dscr = mfspr(SPRN_DSCR);
1137
1138 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1139 t->bescr = mfspr(SPRN_BESCR);
1140 t->ebbhr = mfspr(SPRN_EBBHR);
1141 t->ebbrr = mfspr(SPRN_EBBRR);
1142
1143 t->fscr = mfspr(SPRN_FSCR);
1144
1145
1146
1147
1148
1149
1150
1151 t->tar = mfspr(SPRN_TAR);
1152 }
1153#endif
1154}
1155
1156#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1157void kvmppc_save_user_regs(void)
1158{
1159 unsigned long usermsr;
1160
1161 if (!current->thread.regs)
1162 return;
1163
1164 usermsr = current->thread.regs->msr;
1165
1166 if (usermsr & MSR_FP)
1167 save_fpu(current);
1168
1169 if (usermsr & MSR_VEC)
1170 save_altivec(current);
1171
1172#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1173 if (usermsr & MSR_TM) {
1174 current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
1175 current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
1176 current->thread.tm_texasr = mfspr(SPRN_TEXASR);
1177 current->thread.regs->msr &= ~MSR_TM;
1178 }
1179#endif
1180}
1181EXPORT_SYMBOL_GPL(kvmppc_save_user_regs);
1182
1183void kvmppc_save_current_sprs(void)
1184{
1185 save_sprs(¤t->thread);
1186}
1187EXPORT_SYMBOL_GPL(kvmppc_save_current_sprs);
1188#endif
1189
1190static inline void restore_sprs(struct thread_struct *old_thread,
1191 struct thread_struct *new_thread)
1192{
1193#ifdef CONFIG_ALTIVEC
1194 if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
1195 old_thread->vrsave != new_thread->vrsave)
1196 mtspr(SPRN_VRSAVE, new_thread->vrsave);
1197#endif
1198#ifdef CONFIG_SPE
1199 if (cpu_has_feature(CPU_FTR_SPE) &&
1200 old_thread->spefscr != new_thread->spefscr)
1201 mtspr(SPRN_SPEFSCR, new_thread->spefscr);
1202#endif
1203#ifdef CONFIG_PPC_BOOK3S_64
1204 if (cpu_has_feature(CPU_FTR_DSCR)) {
1205 u64 dscr = get_paca()->dscr_default;
1206 if (new_thread->dscr_inherit)
1207 dscr = new_thread->dscr;
1208
1209 if (old_thread->dscr != dscr)
1210 mtspr(SPRN_DSCR, dscr);
1211 }
1212
1213 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1214 if (old_thread->bescr != new_thread->bescr)
1215 mtspr(SPRN_BESCR, new_thread->bescr);
1216 if (old_thread->ebbhr != new_thread->ebbhr)
1217 mtspr(SPRN_EBBHR, new_thread->ebbhr);
1218 if (old_thread->ebbrr != new_thread->ebbrr)
1219 mtspr(SPRN_EBBRR, new_thread->ebbrr);
1220
1221 if (old_thread->fscr != new_thread->fscr)
1222 mtspr(SPRN_FSCR, new_thread->fscr);
1223
1224 if (old_thread->tar != new_thread->tar)
1225 mtspr(SPRN_TAR, new_thread->tar);
1226 }
1227
1228 if (cpu_has_feature(CPU_FTR_P9_TIDR) &&
1229 old_thread->tidr != new_thread->tidr)
1230 mtspr(SPRN_TIDR, new_thread->tidr);
1231#endif
1232
1233}
1234
1235struct task_struct *__switch_to(struct task_struct *prev,
1236 struct task_struct *new)
1237{
1238 struct thread_struct *new_thread, *old_thread;
1239 struct task_struct *last;
1240#ifdef CONFIG_PPC_64S_HASH_MMU
1241 struct ppc64_tlb_batch *batch;
1242#endif
1243
1244 new_thread = &new->thread;
1245 old_thread = ¤t->thread;
1246
1247 WARN_ON(!irqs_disabled());
1248
1249#ifdef CONFIG_PPC_64S_HASH_MMU
1250 batch = this_cpu_ptr(&ppc64_tlb_batch);
1251 if (batch->active) {
1252 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
1253 if (batch->index)
1254 __flush_tlb_pending(batch);
1255 batch->active = 0;
1256 }
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267 if (new->mm && (cpu_has_feature(CPU_FTR_ARCH_31) ||
1268 atomic_read(&new->mm->context.vas_windows)))
1269 asm volatile(PPC_CP_ABORT);
1270#endif
1271
1272#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1273 switch_booke_debug_regs(&new->thread.debug);
1274#else
1275
1276
1277
1278
1279#ifndef CONFIG_HAVE_HW_BREAKPOINT
1280 switch_hw_breakpoint(new);
1281#endif
1282#endif
1283
1284
1285
1286
1287
1288 save_sprs(&prev->thread);
1289
1290
1291 giveup_all(prev);
1292
1293 __switch_to_tm(prev, new);
1294
1295 if (!radix_enabled()) {
1296
1297
1298
1299
1300
1301 hard_irq_disable();
1302 }
1303
1304
1305
1306
1307
1308
1309
1310
1311 restore_sprs(old_thread, new_thread);
1312
1313 set_return_regs_changed();
1314
1315 if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64))
1316 kuap_assert_locked();
1317
1318 last = _switch(old_thread, new_thread);
1319
1320
1321
1322
1323
1324
1325
1326
1327#ifdef CONFIG_PPC_BOOK3S_64
1328#ifdef CONFIG_PPC_64S_HASH_MMU
1329
1330
1331
1332
1333
1334
1335 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
1336 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
1337 batch = this_cpu_ptr(&ppc64_tlb_batch);
1338 batch->active = 1;
1339 }
1340#endif
1341
1342
1343
1344
1345
1346
1347 if (current->thread.regs)
1348 restore_math(current->thread.regs);
1349#endif
1350
1351 return last;
1352}
1353
1354#define NR_INSN_TO_PRINT 16
1355
1356static void show_instructions(struct pt_regs *regs)
1357{
1358 int i;
1359 unsigned long nip = regs->nip;
1360 unsigned long pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int));
1361
1362 printk("Instruction dump:");
1363
1364
1365
1366
1367
1368 if (!IS_ENABLED(CONFIG_BOOKE) && !(regs->msr & MSR_IR)) {
1369 pc = (unsigned long)phys_to_virt(pc);
1370 nip = (unsigned long)phys_to_virt(regs->nip);
1371 }
1372
1373 for (i = 0; i < NR_INSN_TO_PRINT; i++) {
1374 int instr;
1375
1376 if (!(i % 8))
1377 pr_cont("\n");
1378
1379 if (!__kernel_text_address(pc) ||
1380 get_kernel_nofault(instr, (const void *)pc)) {
1381 pr_cont("XXXXXXXX ");
1382 } else {
1383 if (nip == pc)
1384 pr_cont("<%08x> ", instr);
1385 else
1386 pr_cont("%08x ", instr);
1387 }
1388
1389 pc += sizeof(int);
1390 }
1391
1392 pr_cont("\n");
1393}
1394
1395void show_user_instructions(struct pt_regs *regs)
1396{
1397 unsigned long pc;
1398 int n = NR_INSN_TO_PRINT;
1399 struct seq_buf s;
1400 char buf[96];
1401
1402 pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int));
1403
1404 seq_buf_init(&s, buf, sizeof(buf));
1405
1406 while (n) {
1407 int i;
1408
1409 seq_buf_clear(&s);
1410
1411 for (i = 0; i < 8 && n; i++, n--, pc += sizeof(int)) {
1412 int instr;
1413
1414 if (copy_from_user_nofault(&instr, (void __user *)pc,
1415 sizeof(instr))) {
1416 seq_buf_printf(&s, "XXXXXXXX ");
1417 continue;
1418 }
1419 seq_buf_printf(&s, regs->nip == pc ? "<%08x> " : "%08x ", instr);
1420 }
1421
1422 if (!seq_buf_has_overflowed(&s))
1423 pr_info("%s[%d]: code: %s\n", current->comm,
1424 current->pid, s.buffer);
1425 }
1426}
1427
1428struct regbit {
1429 unsigned long bit;
1430 const char *name;
1431};
1432
1433static struct regbit msr_bits[] = {
1434#if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
1435 {MSR_SF, "SF"},
1436 {MSR_HV, "HV"},
1437#endif
1438 {MSR_VEC, "VEC"},
1439 {MSR_VSX, "VSX"},
1440#ifdef CONFIG_BOOKE
1441 {MSR_CE, "CE"},
1442#endif
1443 {MSR_EE, "EE"},
1444 {MSR_PR, "PR"},
1445 {MSR_FP, "FP"},
1446 {MSR_ME, "ME"},
1447#ifdef CONFIG_BOOKE
1448 {MSR_DE, "DE"},
1449#else
1450 {MSR_SE, "SE"},
1451 {MSR_BE, "BE"},
1452#endif
1453 {MSR_IR, "IR"},
1454 {MSR_DR, "DR"},
1455 {MSR_PMM, "PMM"},
1456#ifndef CONFIG_BOOKE
1457 {MSR_RI, "RI"},
1458 {MSR_LE, "LE"},
1459#endif
1460 {0, NULL}
1461};
1462
1463static void print_bits(unsigned long val, struct regbit *bits, const char *sep)
1464{
1465 const char *s = "";
1466
1467 for (; bits->bit; ++bits)
1468 if (val & bits->bit) {
1469 pr_cont("%s%s", s, bits->name);
1470 s = sep;
1471 }
1472}
1473
1474#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1475static struct regbit msr_tm_bits[] = {
1476 {MSR_TS_T, "T"},
1477 {MSR_TS_S, "S"},
1478 {MSR_TM, "E"},
1479 {0, NULL}
1480};
1481
1482static void print_tm_bits(unsigned long val)
1483{
1484
1485
1486
1487
1488
1489
1490
1491 if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
1492 pr_cont(",TM[");
1493 print_bits(val, msr_tm_bits, "");
1494 pr_cont("]");
1495 }
1496}
1497#else
1498static void print_tm_bits(unsigned long val) {}
1499#endif
1500
1501static void print_msr_bits(unsigned long val)
1502{
1503 pr_cont("<");
1504 print_bits(val, msr_bits, ",");
1505 print_tm_bits(val);
1506 pr_cont(">");
1507}
1508
1509#ifdef CONFIG_PPC64
1510#define REG "%016lx"
1511#define REGS_PER_LINE 4
1512#else
1513#define REG "%08lx"
1514#define REGS_PER_LINE 8
1515#endif
1516
1517static void __show_regs(struct pt_regs *regs)
1518{
1519 int i, trap;
1520
1521 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
1522 regs->nip, regs->link, regs->ctr);
1523 printk("REGS: %px TRAP: %04lx %s (%s)\n",
1524 regs, regs->trap, print_tainted(), init_utsname()->release);
1525 printk("MSR: "REG" ", regs->msr);
1526 print_msr_bits(regs->msr);
1527 pr_cont(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
1528 trap = TRAP(regs);
1529 if (!trap_is_syscall(regs) && cpu_has_feature(CPU_FTR_CFAR))
1530 pr_cont("CFAR: "REG" ", regs->orig_gpr3);
1531 if (trap == INTERRUPT_MACHINE_CHECK ||
1532 trap == INTERRUPT_DATA_STORAGE ||
1533 trap == INTERRUPT_ALIGNMENT) {
1534 if (IS_ENABLED(CONFIG_4xx) || IS_ENABLED(CONFIG_BOOKE))
1535 pr_cont("DEAR: "REG" ESR: "REG" ", regs->dear, regs->esr);
1536 else
1537 pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
1538 }
1539
1540#ifdef CONFIG_PPC64
1541 pr_cont("IRQMASK: %lx ", regs->softe);
1542#endif
1543#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1544 if (MSR_TM_ACTIVE(regs->msr))
1545 pr_cont("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
1546#endif
1547
1548 for (i = 0; i < 32; i++) {
1549 if ((i % REGS_PER_LINE) == 0)
1550 pr_cont("\nGPR%02d: ", i);
1551 pr_cont(REG " ", regs->gpr[i]);
1552 }
1553 pr_cont("\n");
1554
1555
1556
1557
1558 if (IS_ENABLED(CONFIG_KALLSYMS)) {
1559 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
1560 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
1561 }
1562}
1563
1564void show_regs(struct pt_regs *regs)
1565{
1566 show_regs_print_info(KERN_DEFAULT);
1567 __show_regs(regs);
1568 show_stack(current, (unsigned long *) regs->gpr[1], KERN_DEFAULT);
1569 if (!user_mode(regs))
1570 show_instructions(regs);
1571}
1572
1573void flush_thread(void)
1574{
1575#ifdef CONFIG_HAVE_HW_BREAKPOINT
1576 flush_ptrace_hw_breakpoint(current);
1577#else
1578 set_debug_reg_defaults(¤t->thread);
1579#endif
1580}
1581
1582void arch_setup_new_exec(void)
1583{
1584
1585#ifdef CONFIG_PPC_BOOK3S_64
1586 if (!radix_enabled())
1587 hash__setup_new_exec();
1588#endif
1589
1590
1591
1592
1593 if (!current->thread.regs) {
1594 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
1595 current->thread.regs = regs - 1;
1596 }
1597
1598#ifdef CONFIG_PPC_MEM_KEYS
1599 current->thread.regs->amr = default_amr;
1600 current->thread.regs->iamr = default_iamr;
1601#endif
1602}
1603
1604#ifdef CONFIG_PPC64
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638int set_thread_tidr(struct task_struct *t)
1639{
1640 if (!cpu_has_feature(CPU_FTR_P9_TIDR))
1641 return -EINVAL;
1642
1643 if (t != current)
1644 return -EINVAL;
1645
1646 if (t->thread.tidr)
1647 return 0;
1648
1649 t->thread.tidr = (u16)task_pid_nr(t);
1650 mtspr(SPRN_TIDR, t->thread.tidr);
1651
1652 return 0;
1653}
1654EXPORT_SYMBOL_GPL(set_thread_tidr);
1655
1656#endif
1657
1658void
1659release_thread(struct task_struct *t)
1660{
1661}
1662
1663
1664
1665
1666
1667int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
1668{
1669 flush_all_to_thread(src);
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680 __switch_to_tm(src, src);
1681
1682 *dst = *src;
1683
1684 clear_task_ebb(dst);
1685
1686 return 0;
1687}
1688
1689static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
1690{
1691#ifdef CONFIG_PPC_64S_HASH_MMU
1692 unsigned long sp_vsid;
1693 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
1694
1695 if (radix_enabled())
1696 return;
1697
1698 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1699 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
1700 << SLB_VSID_SHIFT_1T;
1701 else
1702 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
1703 << SLB_VSID_SHIFT;
1704 sp_vsid |= SLB_VSID_KERNEL | llp;
1705 p->thread.ksp_vsid = sp_vsid;
1706#endif
1707}
1708
1709
1710
1711
1712
1713
1714
1715
1716int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
1717{
1718 unsigned long clone_flags = args->flags;
1719 unsigned long usp = args->stack;
1720 unsigned long tls = args->tls;
1721 struct pt_regs *childregs, *kregs;
1722 extern void ret_from_fork(void);
1723 extern void ret_from_fork_scv(void);
1724 extern void ret_from_kernel_thread(void);
1725 void (*f)(void);
1726 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
1727 struct thread_info *ti = task_thread_info(p);
1728#ifdef CONFIG_HAVE_HW_BREAKPOINT
1729 int i;
1730#endif
1731
1732 klp_init_thread_info(p);
1733
1734
1735 sp -= sizeof(struct pt_regs);
1736 childregs = (struct pt_regs *) sp;
1737 if (unlikely(args->fn)) {
1738
1739 memset(childregs, 0, sizeof(struct pt_regs));
1740 childregs->gpr[1] = sp + sizeof(struct pt_regs);
1741
1742 if (args->fn)
1743 childregs->gpr[14] = ppc_function_entry((void *)args->fn);
1744#ifdef CONFIG_PPC64
1745 clear_tsk_thread_flag(p, TIF_32BIT);
1746 childregs->softe = IRQS_ENABLED;
1747#endif
1748 childregs->gpr[15] = (unsigned long)args->fn_arg;
1749 p->thread.regs = NULL;
1750 ti->flags |= _TIF_RESTOREALL;
1751 f = ret_from_kernel_thread;
1752 } else {
1753
1754 struct pt_regs *regs = current_pt_regs();
1755 *childregs = *regs;
1756 if (usp)
1757 childregs->gpr[1] = usp;
1758 p->thread.regs = childregs;
1759
1760 if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64))
1761 childregs->gpr[3] = 0;
1762 if (clone_flags & CLONE_SETTLS) {
1763 if (!is_32bit_task())
1764 childregs->gpr[13] = tls;
1765 else
1766 childregs->gpr[2] = tls;
1767 }
1768
1769 if (trap_is_scv(regs))
1770 f = ret_from_fork_scv;
1771 else
1772 f = ret_from_fork;
1773 }
1774 childregs->msr &= ~(MSR_FP|MSR_VEC|MSR_VSX);
1775 sp -= STACK_FRAME_OVERHEAD;
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785 ((unsigned long *)sp)[0] = 0;
1786 sp -= sizeof(struct pt_regs);
1787 kregs = (struct pt_regs *) sp;
1788 sp -= STACK_FRAME_OVERHEAD;
1789 p->thread.ksp = sp;
1790#ifdef CONFIG_HAVE_HW_BREAKPOINT
1791 for (i = 0; i < nr_wp_slots(); i++)
1792 p->thread.ptrace_bps[i] = NULL;
1793#endif
1794
1795#ifdef CONFIG_PPC_FPU_REGS
1796 p->thread.fp_save_area = NULL;
1797#endif
1798#ifdef CONFIG_ALTIVEC
1799 p->thread.vr_save_area = NULL;
1800#endif
1801#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
1802 p->thread.kuap = KUAP_NONE;
1803#endif
1804#if defined(CONFIG_BOOKE_OR_40x) && defined(CONFIG_PPC_KUAP)
1805 p->thread.pid = MMU_NO_CONTEXT;
1806#endif
1807
1808 setup_ksp_vsid(p, sp);
1809
1810#ifdef CONFIG_PPC64
1811 if (cpu_has_feature(CPU_FTR_DSCR)) {
1812 p->thread.dscr_inherit = current->thread.dscr_inherit;
1813 p->thread.dscr = mfspr(SPRN_DSCR);
1814 }
1815 if (cpu_has_feature(CPU_FTR_HAS_PPR))
1816 childregs->ppr = DEFAULT_PPR;
1817
1818 p->thread.tidr = 0;
1819#endif
1820
1821
1822
1823#ifdef CONFIG_PPC_PKEY
1824 if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
1825 kregs->amr = AMR_KUAP_BLOCKED;
1826
1827 if (mmu_has_feature(MMU_FTR_BOOK3S_KUEP))
1828 kregs->iamr = AMR_KUEP_BLOCKED;
1829#endif
1830 kregs->nip = ppc_function_entry(f);
1831 return 0;
1832}
1833
1834void preload_new_slb_context(unsigned long start, unsigned long sp);
1835
1836
1837
1838
1839void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1840{
1841#ifdef CONFIG_PPC64
1842 unsigned long load_addr = regs->gpr[2];
1843
1844 if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !radix_enabled())
1845 preload_new_slb_context(start, sp);
1846#endif
1847
1848#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1849
1850
1851
1852
1853
1854 if (MSR_TM_SUSPENDED(mfmsr()))
1855 tm_reclaim_current(0);
1856#endif
1857
1858 memset(®s->gpr[1], 0, sizeof(regs->gpr) - sizeof(regs->gpr[0]));
1859 regs->ctr = 0;
1860 regs->link = 0;
1861 regs->xer = 0;
1862 regs->ccr = 0;
1863 regs->gpr[1] = sp;
1864
1865#ifdef CONFIG_PPC32
1866 regs->mq = 0;
1867 regs->nip = start;
1868 regs->msr = MSR_USER;
1869#else
1870 if (!is_32bit_task()) {
1871 unsigned long entry;
1872
1873 if (is_elf2_task()) {
1874
1875 entry = start;
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885 regs->gpr[12] = start;
1886
1887 set_thread_flag(TIF_RESTOREALL);
1888 } else {
1889 unsigned long toc;
1890
1891
1892
1893
1894
1895
1896
1897 __get_user(entry, (unsigned long __user *)start);
1898 __get_user(toc, (unsigned long __user *)start+1);
1899
1900
1901
1902
1903 if (load_addr != 0) {
1904 entry += load_addr;
1905 toc += load_addr;
1906 }
1907 regs->gpr[2] = toc;
1908 }
1909 regs_set_return_ip(regs, entry);
1910 regs_set_return_msr(regs, MSR_USER64);
1911 } else {
1912 regs->gpr[2] = 0;
1913 regs_set_return_ip(regs, start);
1914 regs_set_return_msr(regs, MSR_USER32);
1915 }
1916
1917#endif
1918#ifdef CONFIG_VSX
1919 current->thread.used_vsr = 0;
1920#endif
1921 current->thread.load_slb = 0;
1922 current->thread.load_fp = 0;
1923#ifdef CONFIG_PPC_FPU_REGS
1924 memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state));
1925 current->thread.fp_save_area = NULL;
1926#endif
1927#ifdef CONFIG_ALTIVEC
1928 memset(¤t->thread.vr_state, 0, sizeof(current->thread.vr_state));
1929 current->thread.vr_state.vscr.u[3] = 0x00010000;
1930 current->thread.vr_save_area = NULL;
1931 current->thread.vrsave = 0;
1932 current->thread.used_vr = 0;
1933 current->thread.load_vec = 0;
1934#endif
1935#ifdef CONFIG_SPE
1936 memset(current->thread.evr, 0, sizeof(current->thread.evr));
1937 current->thread.acc = 0;
1938 current->thread.spefscr = 0;
1939 current->thread.used_spe = 0;
1940#endif
1941#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1942 current->thread.tm_tfhar = 0;
1943 current->thread.tm_texasr = 0;
1944 current->thread.tm_tfiar = 0;
1945 current->thread.load_tm = 0;
1946#endif
1947}
1948EXPORT_SYMBOL(start_thread);
1949
1950#define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
1951 | PR_FP_EXC_RES | PR_FP_EXC_INV)
1952
1953int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
1954{
1955 struct pt_regs *regs = tsk->thread.regs;
1956
1957
1958
1959
1960
1961 if (val & PR_FP_EXC_SW_ENABLE) {
1962 if (cpu_has_feature(CPU_FTR_SPE)) {
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975#ifdef CONFIG_SPE
1976 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1977 tsk->thread.fpexc_mode = val &
1978 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
1979#endif
1980 return 0;
1981 } else {
1982 return -EINVAL;
1983 }
1984 }
1985
1986
1987
1988
1989
1990
1991 if (val > PR_FP_EXC_PRECISE)
1992 return -EINVAL;
1993 tsk->thread.fpexc_mode = __pack_fe01(val);
1994 if (regs != NULL && (regs->msr & MSR_FP) != 0) {
1995 regs_set_return_msr(regs, (regs->msr & ~(MSR_FE0|MSR_FE1))
1996 | tsk->thread.fpexc_mode);
1997 }
1998 return 0;
1999}
2000
2001int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
2002{
2003 unsigned int val = 0;
2004
2005 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) {
2006 if (cpu_has_feature(CPU_FTR_SPE)) {
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019#ifdef CONFIG_SPE
2020 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
2021 val = tsk->thread.fpexc_mode;
2022#endif
2023 } else
2024 return -EINVAL;
2025 } else {
2026 val = __unpack_fe01(tsk->thread.fpexc_mode);
2027 }
2028 return put_user(val, (unsigned int __user *) adr);
2029}
2030
2031int set_endian(struct task_struct *tsk, unsigned int val)
2032{
2033 struct pt_regs *regs = tsk->thread.regs;
2034
2035 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
2036 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
2037 return -EINVAL;
2038
2039 if (regs == NULL)
2040 return -EINVAL;
2041
2042 if (val == PR_ENDIAN_BIG)
2043 regs_set_return_msr(regs, regs->msr & ~MSR_LE);
2044 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
2045 regs_set_return_msr(regs, regs->msr | MSR_LE);
2046 else
2047 return -EINVAL;
2048
2049 return 0;
2050}
2051
2052int get_endian(struct task_struct *tsk, unsigned long adr)
2053{
2054 struct pt_regs *regs = tsk->thread.regs;
2055 unsigned int val;
2056
2057 if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
2058 !cpu_has_feature(CPU_FTR_REAL_LE))
2059 return -EINVAL;
2060
2061 if (regs == NULL)
2062 return -EINVAL;
2063
2064 if (regs->msr & MSR_LE) {
2065 if (cpu_has_feature(CPU_FTR_REAL_LE))
2066 val = PR_ENDIAN_LITTLE;
2067 else
2068 val = PR_ENDIAN_PPC_LITTLE;
2069 } else
2070 val = PR_ENDIAN_BIG;
2071
2072 return put_user(val, (unsigned int __user *)adr);
2073}
2074
2075int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
2076{
2077 tsk->thread.align_ctl = val;
2078 return 0;
2079}
2080
2081int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
2082{
2083 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
2084}
2085
2086static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
2087 unsigned long nbytes)
2088{
2089 unsigned long stack_page;
2090 unsigned long cpu = task_cpu(p);
2091
2092 stack_page = (unsigned long)hardirq_ctx[cpu];
2093 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2094 return 1;
2095
2096 stack_page = (unsigned long)softirq_ctx[cpu];
2097 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2098 return 1;
2099
2100 return 0;
2101}
2102
2103static inline int valid_emergency_stack(unsigned long sp, struct task_struct *p,
2104 unsigned long nbytes)
2105{
2106#ifdef CONFIG_PPC64
2107 unsigned long stack_page;
2108 unsigned long cpu = task_cpu(p);
2109
2110 if (!paca_ptrs)
2111 return 0;
2112
2113 stack_page = (unsigned long)paca_ptrs[cpu]->emergency_sp - THREAD_SIZE;
2114 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2115 return 1;
2116
2117# ifdef CONFIG_PPC_BOOK3S_64
2118 stack_page = (unsigned long)paca_ptrs[cpu]->nmi_emergency_sp - THREAD_SIZE;
2119 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2120 return 1;
2121
2122 stack_page = (unsigned long)paca_ptrs[cpu]->mc_emergency_sp - THREAD_SIZE;
2123 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2124 return 1;
2125# endif
2126#endif
2127
2128 return 0;
2129}
2130
2131
2132int validate_sp(unsigned long sp, struct task_struct *p,
2133 unsigned long nbytes)
2134{
2135 unsigned long stack_page = (unsigned long)task_stack_page(p);
2136
2137 if (sp < THREAD_SIZE)
2138 return 0;
2139
2140 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2141 return 1;
2142
2143 if (valid_irq_stack(sp, p, nbytes))
2144 return 1;
2145
2146 return valid_emergency_stack(sp, p, nbytes);
2147}
2148
2149EXPORT_SYMBOL(validate_sp);
2150
2151static unsigned long ___get_wchan(struct task_struct *p)
2152{
2153 unsigned long ip, sp;
2154 int count = 0;
2155
2156 sp = p->thread.ksp;
2157 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
2158 return 0;
2159
2160 do {
2161 sp = READ_ONCE_NOCHECK(*(unsigned long *)sp);
2162 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) ||
2163 task_is_running(p))
2164 return 0;
2165 if (count > 0) {
2166 ip = READ_ONCE_NOCHECK(((unsigned long *)sp)[STACK_FRAME_LR_SAVE]);
2167 if (!in_sched_functions(ip))
2168 return ip;
2169 }
2170 } while (count++ < 16);
2171 return 0;
2172}
2173
2174unsigned long __get_wchan(struct task_struct *p)
2175{
2176 unsigned long ret;
2177
2178 if (!try_get_task_stack(p))
2179 return 0;
2180
2181 ret = ___get_wchan(p);
2182
2183 put_task_stack(p);
2184
2185 return ret;
2186}
2187
2188static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
2189
2190void __no_sanitize_address show_stack(struct task_struct *tsk,
2191 unsigned long *stack,
2192 const char *loglvl)
2193{
2194 unsigned long sp, ip, lr, newsp;
2195 int count = 0;
2196 int firstframe = 1;
2197 unsigned long ret_addr;
2198 int ftrace_idx = 0;
2199
2200 if (tsk == NULL)
2201 tsk = current;
2202
2203 if (!try_get_task_stack(tsk))
2204 return;
2205
2206 sp = (unsigned long) stack;
2207 if (sp == 0) {
2208 if (tsk == current)
2209 sp = current_stack_frame();
2210 else
2211 sp = tsk->thread.ksp;
2212 }
2213
2214 lr = 0;
2215 printk("%sCall Trace:\n", loglvl);
2216 do {
2217 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
2218 break;
2219
2220 stack = (unsigned long *) sp;
2221 newsp = stack[0];
2222 ip = stack[STACK_FRAME_LR_SAVE];
2223 if (!firstframe || ip != lr) {
2224 printk("%s["REG"] ["REG"] %pS",
2225 loglvl, sp, ip, (void *)ip);
2226 ret_addr = ftrace_graph_ret_addr(current,
2227 &ftrace_idx, ip, stack);
2228 if (ret_addr != ip)
2229 pr_cont(" (%pS)", (void *)ret_addr);
2230 if (firstframe)
2231 pr_cont(" (unreliable)");
2232 pr_cont("\n");
2233 }
2234 firstframe = 0;
2235
2236
2237
2238
2239
2240 if (validate_sp(sp, tsk, STACK_FRAME_WITH_PT_REGS)
2241 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
2242 struct pt_regs *regs = (struct pt_regs *)
2243 (sp + STACK_FRAME_OVERHEAD);
2244
2245 lr = regs->link;
2246 printk("%s--- interrupt: %lx at %pS\n",
2247 loglvl, regs->trap, (void *)regs->nip);
2248 __show_regs(regs);
2249 printk("%s--- interrupt: %lx\n",
2250 loglvl, regs->trap);
2251
2252 firstframe = 1;
2253 }
2254
2255 sp = newsp;
2256 } while (count++ < kstack_depth_to_print);
2257
2258 put_task_stack(tsk);
2259}
2260
2261#ifdef CONFIG_PPC64
2262
2263void notrace __ppc64_runlatch_on(void)
2264{
2265 struct thread_info *ti = current_thread_info();
2266
2267 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
2268
2269
2270
2271
2272
2273 mtspr(SPRN_CTRLT, CTRL_RUNLATCH);
2274 } else {
2275 unsigned long ctrl;
2276
2277
2278
2279
2280
2281 ctrl = mfspr(SPRN_CTRLF);
2282 ctrl |= CTRL_RUNLATCH;
2283 mtspr(SPRN_CTRLT, ctrl);
2284 }
2285
2286 ti->local_flags |= _TLF_RUNLATCH;
2287}
2288
2289
2290void notrace __ppc64_runlatch_off(void)
2291{
2292 struct thread_info *ti = current_thread_info();
2293
2294 ti->local_flags &= ~_TLF_RUNLATCH;
2295
2296 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
2297 mtspr(SPRN_CTRLT, 0);
2298 } else {
2299 unsigned long ctrl;
2300
2301 ctrl = mfspr(SPRN_CTRLF);
2302 ctrl &= ~CTRL_RUNLATCH;
2303 mtspr(SPRN_CTRLT, ctrl);
2304 }
2305}
2306#endif
2307
2308unsigned long arch_align_stack(unsigned long sp)
2309{
2310 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2311 sp -= get_random_int() & ~PAGE_MASK;
2312 return sp & ~0xf;
2313}
2314