1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/errno.h>
14#include <linux/sched.h>
15#include <linux/sched/debug.h>
16#include <linux/sched/task.h>
17#include <linux/sched/task_stack.h>
18#include <linux/kernel.h>
19#include <linux/mm.h>
20#include <linux/smp.h>
21#include <linux/stddef.h>
22#include <linux/unistd.h>
23#include <linux/ptrace.h>
24#include <linux/slab.h>
25#include <linux/user.h>
26#include <linux/elf.h>
27#include <linux/prctl.h>
28#include <linux/init_task.h>
29#include <linux/export.h>
30#include <linux/kallsyms.h>
31#include <linux/mqueue.h>
32#include <linux/hardirq.h>
33#include <linux/utsname.h>
34#include <linux/ftrace.h>
35#include <linux/kernel_stat.h>
36#include <linux/personality.h>
37#include <linux/random.h>
38#include <linux/hw_breakpoint.h>
39#include <linux/uaccess.h>
40#include <linux/elf-randomize.h>
41#include <linux/pkeys.h>
42#include <linux/seq_buf.h>
43
44#include <asm/pgtable.h>
45#include <asm/io.h>
46#include <asm/processor.h>
47#include <asm/mmu.h>
48#include <asm/prom.h>
49#include <asm/machdep.h>
50#include <asm/time.h>
51#include <asm/runlatch.h>
52#include <asm/syscalls.h>
53#include <asm/switch_to.h>
54#include <asm/tm.h>
55#include <asm/debug.h>
56#ifdef CONFIG_PPC64
57#include <asm/firmware.h>
58#include <asm/hw_irq.h>
59#endif
60#include <asm/code-patching.h>
61#include <asm/exec.h>
62#include <asm/livepatch.h>
63#include <asm/cpu_has_feature.h>
64#include <asm/asm-prototypes.h>
65#include <asm/stacktrace.h>
66#include <asm/hw_breakpoint.h>
67
68#include <linux/kprobes.h>
69#include <linux/kdebug.h>
70
71
72#ifdef TM_DEBUG_SW
73#define TM_DEBUG(x...) printk(KERN_INFO x)
74#else
75#define TM_DEBUG(x...) do { } while(0)
76#endif
77
78extern unsigned long _get_SP(void);
79
80#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
81
82
83
84
85
86bool tm_suspend_disabled __ro_after_init = false;
87
88static void check_if_tm_restore_required(struct task_struct *tsk)
89{
90
91
92
93
94
95
96 if (tsk == current && tsk->thread.regs &&
97 MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
98 !test_thread_flag(TIF_RESTORE_TM)) {
99 tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
100 set_thread_flag(TIF_RESTORE_TM);
101 }
102}
103
104#else
105static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
106#endif
107
108bool strict_msr_control;
109EXPORT_SYMBOL(strict_msr_control);
110
111static int __init enable_strict_msr_control(char *str)
112{
113 strict_msr_control = true;
114 pr_info("Enabling strict facility control\n");
115
116 return 0;
117}
118early_param("ppc_strict_facility_enable", enable_strict_msr_control);
119
120
121unsigned long notrace msr_check_and_set(unsigned long bits)
122{
123 unsigned long oldmsr = mfmsr();
124 unsigned long newmsr;
125
126 newmsr = oldmsr | bits;
127
128#ifdef CONFIG_VSX
129 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
130 newmsr |= MSR_VSX;
131#endif
132
133 if (oldmsr != newmsr)
134 mtmsr_isync(newmsr);
135
136 return newmsr;
137}
138EXPORT_SYMBOL_GPL(msr_check_and_set);
139
140
141void notrace __msr_check_and_clear(unsigned long bits)
142{
143 unsigned long oldmsr = mfmsr();
144 unsigned long newmsr;
145
146 newmsr = oldmsr & ~bits;
147
148#ifdef CONFIG_VSX
149 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
150 newmsr &= ~MSR_VSX;
151#endif
152
153 if (oldmsr != newmsr)
154 mtmsr_isync(newmsr);
155}
156EXPORT_SYMBOL(__msr_check_and_clear);
157
158#ifdef CONFIG_PPC_FPU
159static void __giveup_fpu(struct task_struct *tsk)
160{
161 unsigned long msr;
162
163 save_fpu(tsk);
164 msr = tsk->thread.regs->msr;
165 msr &= ~(MSR_FP|MSR_FE0|MSR_FE1);
166#ifdef CONFIG_VSX
167 if (cpu_has_feature(CPU_FTR_VSX))
168 msr &= ~MSR_VSX;
169#endif
170 tsk->thread.regs->msr = msr;
171}
172
173void giveup_fpu(struct task_struct *tsk)
174{
175 check_if_tm_restore_required(tsk);
176
177 msr_check_and_set(MSR_FP);
178 __giveup_fpu(tsk);
179 msr_check_and_clear(MSR_FP);
180}
181EXPORT_SYMBOL(giveup_fpu);
182
183
184
185
186
187void flush_fp_to_thread(struct task_struct *tsk)
188{
189 if (tsk->thread.regs) {
190
191
192
193
194
195
196
197
198 preempt_disable();
199 if (tsk->thread.regs->msr & MSR_FP) {
200
201
202
203
204
205
206
207 BUG_ON(tsk != current);
208 giveup_fpu(tsk);
209 }
210 preempt_enable();
211 }
212}
213EXPORT_SYMBOL_GPL(flush_fp_to_thread);
214
215void enable_kernel_fp(void)
216{
217 unsigned long cpumsr;
218
219 WARN_ON(preemptible());
220
221 cpumsr = msr_check_and_set(MSR_FP);
222
223 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
224 check_if_tm_restore_required(current);
225
226
227
228
229
230
231
232 if (!MSR_TM_ACTIVE(cpumsr) &&
233 MSR_TM_ACTIVE(current->thread.regs->msr))
234 return;
235 __giveup_fpu(current);
236 }
237}
238EXPORT_SYMBOL(enable_kernel_fp);
239
240static int restore_fp(struct task_struct *tsk)
241{
242 if (tsk->thread.load_fp) {
243 load_fp_state(¤t->thread.fp_state);
244 current->thread.load_fp++;
245 return 1;
246 }
247 return 0;
248}
249#else
250static int restore_fp(struct task_struct *tsk) { return 0; }
251#endif
252
253#ifdef CONFIG_ALTIVEC
254#define loadvec(thr) ((thr).load_vec)
255
256static void __giveup_altivec(struct task_struct *tsk)
257{
258 unsigned long msr;
259
260 save_altivec(tsk);
261 msr = tsk->thread.regs->msr;
262 msr &= ~MSR_VEC;
263#ifdef CONFIG_VSX
264 if (cpu_has_feature(CPU_FTR_VSX))
265 msr &= ~MSR_VSX;
266#endif
267 tsk->thread.regs->msr = msr;
268}
269
270void giveup_altivec(struct task_struct *tsk)
271{
272 check_if_tm_restore_required(tsk);
273
274 msr_check_and_set(MSR_VEC);
275 __giveup_altivec(tsk);
276 msr_check_and_clear(MSR_VEC);
277}
278EXPORT_SYMBOL(giveup_altivec);
279
280void enable_kernel_altivec(void)
281{
282 unsigned long cpumsr;
283
284 WARN_ON(preemptible());
285
286 cpumsr = msr_check_and_set(MSR_VEC);
287
288 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
289 check_if_tm_restore_required(current);
290
291
292
293
294
295
296
297 if (!MSR_TM_ACTIVE(cpumsr) &&
298 MSR_TM_ACTIVE(current->thread.regs->msr))
299 return;
300 __giveup_altivec(current);
301 }
302}
303EXPORT_SYMBOL(enable_kernel_altivec);
304
305
306
307
308
309void flush_altivec_to_thread(struct task_struct *tsk)
310{
311 if (tsk->thread.regs) {
312 preempt_disable();
313 if (tsk->thread.regs->msr & MSR_VEC) {
314 BUG_ON(tsk != current);
315 giveup_altivec(tsk);
316 }
317 preempt_enable();
318 }
319}
320EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
321
322static int restore_altivec(struct task_struct *tsk)
323{
324 if (cpu_has_feature(CPU_FTR_ALTIVEC) && (tsk->thread.load_vec)) {
325 load_vr_state(&tsk->thread.vr_state);
326 tsk->thread.used_vr = 1;
327 tsk->thread.load_vec++;
328
329 return 1;
330 }
331 return 0;
332}
333#else
334#define loadvec(thr) 0
335static inline int restore_altivec(struct task_struct *tsk) { return 0; }
336#endif
337
338#ifdef CONFIG_VSX
339static void __giveup_vsx(struct task_struct *tsk)
340{
341 unsigned long msr = tsk->thread.regs->msr;
342
343
344
345
346
347 WARN_ON((msr & MSR_VSX) && !((msr & MSR_FP) && (msr & MSR_VEC)));
348
349
350 if (msr & MSR_FP)
351 __giveup_fpu(tsk);
352 if (msr & MSR_VEC)
353 __giveup_altivec(tsk);
354}
355
356static void giveup_vsx(struct task_struct *tsk)
357{
358 check_if_tm_restore_required(tsk);
359
360 msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
361 __giveup_vsx(tsk);
362 msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
363}
364
365void enable_kernel_vsx(void)
366{
367 unsigned long cpumsr;
368
369 WARN_ON(preemptible());
370
371 cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
372
373 if (current->thread.regs &&
374 (current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) {
375 check_if_tm_restore_required(current);
376
377
378
379
380
381
382
383 if (!MSR_TM_ACTIVE(cpumsr) &&
384 MSR_TM_ACTIVE(current->thread.regs->msr))
385 return;
386 __giveup_vsx(current);
387 }
388}
389EXPORT_SYMBOL(enable_kernel_vsx);
390
391void flush_vsx_to_thread(struct task_struct *tsk)
392{
393 if (tsk->thread.regs) {
394 preempt_disable();
395 if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) {
396 BUG_ON(tsk != current);
397 giveup_vsx(tsk);
398 }
399 preempt_enable();
400 }
401}
402EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
403
404static int restore_vsx(struct task_struct *tsk)
405{
406 if (cpu_has_feature(CPU_FTR_VSX)) {
407 tsk->thread.used_vsr = 1;
408 return 1;
409 }
410
411 return 0;
412}
413#else
414static inline int restore_vsx(struct task_struct *tsk) { return 0; }
415#endif
416
417#ifdef CONFIG_SPE
418void giveup_spe(struct task_struct *tsk)
419{
420 check_if_tm_restore_required(tsk);
421
422 msr_check_and_set(MSR_SPE);
423 __giveup_spe(tsk);
424 msr_check_and_clear(MSR_SPE);
425}
426EXPORT_SYMBOL(giveup_spe);
427
428void enable_kernel_spe(void)
429{
430 WARN_ON(preemptible());
431
432 msr_check_and_set(MSR_SPE);
433
434 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
435 check_if_tm_restore_required(current);
436 __giveup_spe(current);
437 }
438}
439EXPORT_SYMBOL(enable_kernel_spe);
440
441void flush_spe_to_thread(struct task_struct *tsk)
442{
443 if (tsk->thread.regs) {
444 preempt_disable();
445 if (tsk->thread.regs->msr & MSR_SPE) {
446 BUG_ON(tsk != current);
447 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
448 giveup_spe(tsk);
449 }
450 preempt_enable();
451 }
452}
453#endif
454
455static unsigned long msr_all_available;
456
457static int __init init_msr_all_available(void)
458{
459#ifdef CONFIG_PPC_FPU
460 msr_all_available |= MSR_FP;
461#endif
462#ifdef CONFIG_ALTIVEC
463 if (cpu_has_feature(CPU_FTR_ALTIVEC))
464 msr_all_available |= MSR_VEC;
465#endif
466#ifdef CONFIG_VSX
467 if (cpu_has_feature(CPU_FTR_VSX))
468 msr_all_available |= MSR_VSX;
469#endif
470#ifdef CONFIG_SPE
471 if (cpu_has_feature(CPU_FTR_SPE))
472 msr_all_available |= MSR_SPE;
473#endif
474
475 return 0;
476}
477early_initcall(init_msr_all_available);
478
479void giveup_all(struct task_struct *tsk)
480{
481 unsigned long usermsr;
482
483 if (!tsk->thread.regs)
484 return;
485
486 check_if_tm_restore_required(tsk);
487
488 usermsr = tsk->thread.regs->msr;
489
490 if ((usermsr & msr_all_available) == 0)
491 return;
492
493 msr_check_and_set(msr_all_available);
494
495 WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
496
497#ifdef CONFIG_PPC_FPU
498 if (usermsr & MSR_FP)
499 __giveup_fpu(tsk);
500#endif
501#ifdef CONFIG_ALTIVEC
502 if (usermsr & MSR_VEC)
503 __giveup_altivec(tsk);
504#endif
505#ifdef CONFIG_SPE
506 if (usermsr & MSR_SPE)
507 __giveup_spe(tsk);
508#endif
509
510 msr_check_and_clear(msr_all_available);
511}
512EXPORT_SYMBOL(giveup_all);
513
514
515
516
517
518
519
520
521
522
523
524void notrace restore_math(struct pt_regs *regs)
525{
526 unsigned long msr;
527
528 if (!MSR_TM_ACTIVE(regs->msr) &&
529 !current->thread.load_fp && !loadvec(current->thread))
530 return;
531
532 msr = regs->msr;
533 msr_check_and_set(msr_all_available);
534
535
536
537
538
539 if ((!(msr & MSR_FP)) && restore_fp(current))
540 msr |= MSR_FP | current->thread.fpexc_mode;
541
542 if ((!(msr & MSR_VEC)) && restore_altivec(current))
543 msr |= MSR_VEC;
544
545 if ((msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC) &&
546 restore_vsx(current)) {
547 msr |= MSR_VSX;
548 }
549
550 msr_check_and_clear(msr_all_available);
551
552 regs->msr = msr;
553}
554
555static void save_all(struct task_struct *tsk)
556{
557 unsigned long usermsr;
558
559 if (!tsk->thread.regs)
560 return;
561
562 usermsr = tsk->thread.regs->msr;
563
564 if ((usermsr & msr_all_available) == 0)
565 return;
566
567 msr_check_and_set(msr_all_available);
568
569 WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
570
571 if (usermsr & MSR_FP)
572 save_fpu(tsk);
573
574 if (usermsr & MSR_VEC)
575 save_altivec(tsk);
576
577 if (usermsr & MSR_SPE)
578 __giveup_spe(tsk);
579
580 msr_check_and_clear(msr_all_available);
581 thread_pkey_regs_save(&tsk->thread);
582}
583
584void flush_all_to_thread(struct task_struct *tsk)
585{
586 if (tsk->thread.regs) {
587 preempt_disable();
588 BUG_ON(tsk != current);
589#ifdef CONFIG_SPE
590 if (tsk->thread.regs->msr & MSR_SPE)
591 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
592#endif
593 save_all(tsk);
594
595 preempt_enable();
596 }
597}
598EXPORT_SYMBOL(flush_all_to_thread);
599
600#ifdef CONFIG_PPC_ADV_DEBUG_REGS
601void do_send_trap(struct pt_regs *regs, unsigned long address,
602 unsigned long error_code, int breakpt)
603{
604 current->thread.trap_nr = TRAP_HWBKPT;
605 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
606 11, SIGSEGV) == NOTIFY_STOP)
607 return;
608
609
610 force_sig_ptrace_errno_trap(breakpt,
611 (void __user *)address);
612}
613#else
614void do_break (struct pt_regs *regs, unsigned long address,
615 unsigned long error_code)
616{
617 current->thread.trap_nr = TRAP_HWBKPT;
618 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
619 11, SIGSEGV) == NOTIFY_STOP)
620 return;
621
622 if (debugger_break_match(regs))
623 return;
624
625
626 hw_breakpoint_disable();
627
628
629 force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void __user *)address);
630}
631#endif
632
633static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk);
634
635#ifdef CONFIG_PPC_ADV_DEBUG_REGS
636
637
638
639static void set_debug_reg_defaults(struct thread_struct *thread)
640{
641 thread->debug.iac1 = thread->debug.iac2 = 0;
642#if CONFIG_PPC_ADV_DEBUG_IACS > 2
643 thread->debug.iac3 = thread->debug.iac4 = 0;
644#endif
645 thread->debug.dac1 = thread->debug.dac2 = 0;
646#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
647 thread->debug.dvc1 = thread->debug.dvc2 = 0;
648#endif
649 thread->debug.dbcr0 = 0;
650#ifdef CONFIG_BOOKE
651
652
653
654 thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |
655 DBCR1_IAC3US | DBCR1_IAC4US;
656
657
658
659
660 thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
661#else
662 thread->debug.dbcr1 = 0;
663#endif
664}
665
666static void prime_debug_regs(struct debug_reg *debug)
667{
668
669
670
671
672
673 mtmsr(mfmsr() & ~MSR_DE);
674
675 mtspr(SPRN_IAC1, debug->iac1);
676 mtspr(SPRN_IAC2, debug->iac2);
677#if CONFIG_PPC_ADV_DEBUG_IACS > 2
678 mtspr(SPRN_IAC3, debug->iac3);
679 mtspr(SPRN_IAC4, debug->iac4);
680#endif
681 mtspr(SPRN_DAC1, debug->dac1);
682 mtspr(SPRN_DAC2, debug->dac2);
683#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
684 mtspr(SPRN_DVC1, debug->dvc1);
685 mtspr(SPRN_DVC2, debug->dvc2);
686#endif
687 mtspr(SPRN_DBCR0, debug->dbcr0);
688 mtspr(SPRN_DBCR1, debug->dbcr1);
689#ifdef CONFIG_BOOKE
690 mtspr(SPRN_DBCR2, debug->dbcr2);
691#endif
692}
693
694
695
696
697
698void switch_booke_debug_regs(struct debug_reg *new_debug)
699{
700 if ((current->thread.debug.dbcr0 & DBCR0_IDM)
701 || (new_debug->dbcr0 & DBCR0_IDM))
702 prime_debug_regs(new_debug);
703}
704EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
705#else
706#ifndef CONFIG_HAVE_HW_BREAKPOINT
707static void set_breakpoint(struct arch_hw_breakpoint *brk)
708{
709 preempt_disable();
710 __set_breakpoint(brk);
711 preempt_enable();
712}
713
714static void set_debug_reg_defaults(struct thread_struct *thread)
715{
716 thread->hw_brk.address = 0;
717 thread->hw_brk.type = 0;
718 if (ppc_breakpoint_available())
719 set_breakpoint(&thread->hw_brk);
720}
721#endif
722#endif
723
724#ifdef CONFIG_PPC_ADV_DEBUG_REGS
725static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
726{
727 mtspr(SPRN_DAC1, dabr);
728#ifdef CONFIG_PPC_47x
729 isync();
730#endif
731 return 0;
732}
733#elif defined(CONFIG_PPC_BOOK3S)
734static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
735{
736 mtspr(SPRN_DABR, dabr);
737 if (cpu_has_feature(CPU_FTR_DABRX))
738 mtspr(SPRN_DABRX, dabrx);
739 return 0;
740}
741#elif defined(CONFIG_PPC_8xx)
742static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
743{
744 unsigned long addr = dabr & ~HW_BRK_TYPE_DABR;
745 unsigned long lctrl1 = 0x90000000;
746 unsigned long lctrl2 = 0x8e000002;
747
748 if ((dabr & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_READ)
749 lctrl1 |= 0xa0000;
750 else if ((dabr & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_WRITE)
751 lctrl1 |= 0xf0000;
752 else if ((dabr & HW_BRK_TYPE_RDWR) == 0)
753 lctrl2 = 0;
754
755 mtspr(SPRN_LCTRL2, 0);
756 mtspr(SPRN_CMPE, addr);
757 mtspr(SPRN_CMPF, addr + 4);
758 mtspr(SPRN_LCTRL1, lctrl1);
759 mtspr(SPRN_LCTRL2, lctrl2);
760
761 return 0;
762}
763#else
764static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
765{
766 return -EINVAL;
767}
768#endif
769
770static inline int set_dabr(struct arch_hw_breakpoint *brk)
771{
772 unsigned long dabr, dabrx;
773
774 dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
775 dabrx = ((brk->type >> 3) & 0x7);
776
777 if (ppc_md.set_dabr)
778 return ppc_md.set_dabr(dabr, dabrx);
779
780 return __set_dabr(dabr, dabrx);
781}
782
783void __set_breakpoint(struct arch_hw_breakpoint *brk)
784{
785 memcpy(this_cpu_ptr(¤t_brk), brk, sizeof(*brk));
786
787 if (dawr_enabled())
788
789 set_dawr(brk);
790 else if (!cpu_has_feature(CPU_FTR_ARCH_207S))
791
792 set_dabr(brk);
793 else
794
795 WARN_ON_ONCE(1);
796}
797
798
799bool ppc_breakpoint_available(void)
800{
801 if (dawr_enabled())
802 return true;
803 if (cpu_has_feature(CPU_FTR_ARCH_207S))
804 return false;
805
806 return true;
807}
808EXPORT_SYMBOL_GPL(ppc_breakpoint_available);
809
810static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
811 struct arch_hw_breakpoint *b)
812{
813 if (a->address != b->address)
814 return false;
815 if (a->type != b->type)
816 return false;
817 if (a->len != b->len)
818 return false;
819 return true;
820}
821
822#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
823
824static inline bool tm_enabled(struct task_struct *tsk)
825{
826 return tsk && tsk->thread.regs && (tsk->thread.regs->msr & MSR_TM);
827}
828
829static void tm_reclaim_thread(struct thread_struct *thr, uint8_t cause)
830{
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846 if (!MSR_TM_SUSPENDED(mfmsr()))
847 return;
848
849 giveup_all(container_of(thr, struct task_struct, thread));
850
851 tm_reclaim(thr, cause);
852
853
854
855
856
857
858
859
860
861
862
863
864
865 if ((thr->ckpt_regs.msr & MSR_FP) == 0)
866 memcpy(&thr->ckfp_state, &thr->fp_state,
867 sizeof(struct thread_fp_state));
868 if ((thr->ckpt_regs.msr & MSR_VEC) == 0)
869 memcpy(&thr->ckvr_state, &thr->vr_state,
870 sizeof(struct thread_vr_state));
871}
872
873void tm_reclaim_current(uint8_t cause)
874{
875 tm_enable();
876 tm_reclaim_thread(¤t->thread, cause);
877}
878
879static inline void tm_reclaim_task(struct task_struct *tsk)
880{
881
882
883
884
885
886
887
888
889
890
891 struct thread_struct *thr = &tsk->thread;
892
893 if (!thr->regs)
894 return;
895
896 if (!MSR_TM_ACTIVE(thr->regs->msr))
897 goto out_and_saveregs;
898
899 WARN_ON(tm_suspend_disabled);
900
901 TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
902 "ccr=%lx, msr=%lx, trap=%lx)\n",
903 tsk->pid, thr->regs->nip,
904 thr->regs->ccr, thr->regs->msr,
905 thr->regs->trap);
906
907 tm_reclaim_thread(thr, TM_CAUSE_RESCHED);
908
909 TM_DEBUG("--- tm_reclaim on pid %d complete\n",
910 tsk->pid);
911
912out_and_saveregs:
913
914
915
916
917
918 tm_save_sprs(thr);
919}
920
921extern void __tm_recheckpoint(struct thread_struct *thread);
922
923void tm_recheckpoint(struct thread_struct *thread)
924{
925 unsigned long flags;
926
927 if (!(thread->regs->msr & MSR_TM))
928 return;
929
930
931
932
933
934 local_irq_save(flags);
935 hard_irq_disable();
936
937
938
939
940 tm_restore_sprs(thread);
941
942 __tm_recheckpoint(thread);
943
944 local_irq_restore(flags);
945}
946
947static inline void tm_recheckpoint_new_task(struct task_struct *new)
948{
949 if (!cpu_has_feature(CPU_FTR_TM))
950 return;
951
952
953
954
955
956
957
958
959
960 if (!tm_enabled(new))
961 return;
962
963 if (!MSR_TM_ACTIVE(new->thread.regs->msr)){
964 tm_restore_sprs(&new->thread);
965 return;
966 }
967
968 TM_DEBUG("*** tm_recheckpoint of pid %d (new->msr 0x%lx)\n",
969 new->pid, new->thread.regs->msr);
970
971 tm_recheckpoint(&new->thread);
972
973
974
975
976
977
978 new->thread.regs->msr &= ~(MSR_FP | MSR_VEC | MSR_VSX);
979
980 TM_DEBUG("*** tm_recheckpoint of pid %d complete "
981 "(kernel msr 0x%lx)\n",
982 new->pid, mfmsr());
983}
984
985static inline void __switch_to_tm(struct task_struct *prev,
986 struct task_struct *new)
987{
988 if (cpu_has_feature(CPU_FTR_TM)) {
989 if (tm_enabled(prev) || tm_enabled(new))
990 tm_enable();
991
992 if (tm_enabled(prev)) {
993 prev->thread.load_tm++;
994 tm_reclaim_task(prev);
995 if (!MSR_TM_ACTIVE(prev->thread.regs->msr) && prev->thread.load_tm == 0)
996 prev->thread.regs->msr &= ~MSR_TM;
997 }
998
999 tm_recheckpoint_new_task(new);
1000 }
1001}
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017void restore_tm_state(struct pt_regs *regs)
1018{
1019 unsigned long msr_diff;
1020
1021
1022
1023
1024
1025
1026
1027 clear_thread_flag(TIF_RESTORE_TM);
1028 if (!MSR_TM_ACTIVE(regs->msr))
1029 return;
1030
1031 msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
1032 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
1033
1034
1035 if (msr_diff & MSR_FP)
1036 current->thread.load_fp = 1;
1037#ifdef CONFIG_ALTIVEC
1038 if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)
1039 current->thread.load_vec = 1;
1040#endif
1041 restore_math(regs);
1042
1043 regs->msr |= msr_diff;
1044}
1045
1046#else
1047#define tm_recheckpoint_new_task(new)
1048#define __switch_to_tm(prev, new)
1049#endif
1050
1051static inline void save_sprs(struct thread_struct *t)
1052{
1053#ifdef CONFIG_ALTIVEC
1054 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1055 t->vrsave = mfspr(SPRN_VRSAVE);
1056#endif
1057#ifdef CONFIG_PPC_BOOK3S_64
1058 if (cpu_has_feature(CPU_FTR_DSCR))
1059 t->dscr = mfspr(SPRN_DSCR);
1060
1061 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1062 t->bescr = mfspr(SPRN_BESCR);
1063 t->ebbhr = mfspr(SPRN_EBBHR);
1064 t->ebbrr = mfspr(SPRN_EBBRR);
1065
1066 t->fscr = mfspr(SPRN_FSCR);
1067
1068
1069
1070
1071
1072
1073
1074 t->tar = mfspr(SPRN_TAR);
1075 }
1076#endif
1077
1078 thread_pkey_regs_save(t);
1079}
1080
1081static inline void restore_sprs(struct thread_struct *old_thread,
1082 struct thread_struct *new_thread)
1083{
1084#ifdef CONFIG_ALTIVEC
1085 if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
1086 old_thread->vrsave != new_thread->vrsave)
1087 mtspr(SPRN_VRSAVE, new_thread->vrsave);
1088#endif
1089#ifdef CONFIG_PPC_BOOK3S_64
1090 if (cpu_has_feature(CPU_FTR_DSCR)) {
1091 u64 dscr = get_paca()->dscr_default;
1092 if (new_thread->dscr_inherit)
1093 dscr = new_thread->dscr;
1094
1095 if (old_thread->dscr != dscr)
1096 mtspr(SPRN_DSCR, dscr);
1097 }
1098
1099 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1100 if (old_thread->bescr != new_thread->bescr)
1101 mtspr(SPRN_BESCR, new_thread->bescr);
1102 if (old_thread->ebbhr != new_thread->ebbhr)
1103 mtspr(SPRN_EBBHR, new_thread->ebbhr);
1104 if (old_thread->ebbrr != new_thread->ebbrr)
1105 mtspr(SPRN_EBBRR, new_thread->ebbrr);
1106
1107 if (old_thread->fscr != new_thread->fscr)
1108 mtspr(SPRN_FSCR, new_thread->fscr);
1109
1110 if (old_thread->tar != new_thread->tar)
1111 mtspr(SPRN_TAR, new_thread->tar);
1112 }
1113
1114 if (cpu_has_feature(CPU_FTR_P9_TIDR) &&
1115 old_thread->tidr != new_thread->tidr)
1116 mtspr(SPRN_TIDR, new_thread->tidr);
1117#endif
1118
1119 thread_pkey_regs_restore(new_thread, old_thread);
1120}
1121
1122struct task_struct *__switch_to(struct task_struct *prev,
1123 struct task_struct *new)
1124{
1125 struct thread_struct *new_thread, *old_thread;
1126 struct task_struct *last;
1127#ifdef CONFIG_PPC_BOOK3S_64
1128 struct ppc64_tlb_batch *batch;
1129#endif
1130
1131 new_thread = &new->thread;
1132 old_thread = ¤t->thread;
1133
1134 WARN_ON(!irqs_disabled());
1135
1136#ifdef CONFIG_PPC_BOOK3S_64
1137 batch = this_cpu_ptr(&ppc64_tlb_batch);
1138 if (batch->active) {
1139 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
1140 if (batch->index)
1141 __flush_tlb_pending(batch);
1142 batch->active = 0;
1143 }
1144#endif
1145
1146#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1147 switch_booke_debug_regs(&new->thread.debug);
1148#else
1149
1150
1151
1152
1153#ifndef CONFIG_HAVE_HW_BREAKPOINT
1154 if (unlikely(!hw_brk_match(this_cpu_ptr(¤t_brk), &new->thread.hw_brk)))
1155 __set_breakpoint(&new->thread.hw_brk);
1156#endif
1157#endif
1158
1159
1160
1161
1162
1163 save_sprs(&prev->thread);
1164
1165
1166 giveup_all(prev);
1167
1168 __switch_to_tm(prev, new);
1169
1170 if (!radix_enabled()) {
1171
1172
1173
1174
1175
1176 hard_irq_disable();
1177 }
1178
1179
1180
1181
1182
1183
1184
1185
1186 restore_sprs(old_thread, new_thread);
1187
1188 last = _switch(old_thread, new_thread);
1189
1190#ifdef CONFIG_PPC_BOOK3S_64
1191 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
1192 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
1193 batch = this_cpu_ptr(&ppc64_tlb_batch);
1194 batch->active = 1;
1195 }
1196
1197 if (current->thread.regs) {
1198 restore_math(current->thread.regs);
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208 if (current->thread.used_vas)
1209 asm volatile(PPC_CP_ABORT);
1210 }
1211#endif
1212
1213 return last;
1214}
1215
1216#define NR_INSN_TO_PRINT 16
1217
1218static void show_instructions(struct pt_regs *regs)
1219{
1220 int i;
1221 unsigned long pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int));
1222
1223 printk("Instruction dump:");
1224
1225 for (i = 0; i < NR_INSN_TO_PRINT; i++) {
1226 int instr;
1227
1228 if (!(i % 8))
1229 pr_cont("\n");
1230
1231#if !defined(CONFIG_BOOKE)
1232
1233
1234
1235 if (!(regs->msr & MSR_IR))
1236 pc = (unsigned long)phys_to_virt(pc);
1237#endif
1238
1239 if (!__kernel_text_address(pc) ||
1240 probe_kernel_address((const void *)pc, instr)) {
1241 pr_cont("XXXXXXXX ");
1242 } else {
1243 if (regs->nip == pc)
1244 pr_cont("<%08x> ", instr);
1245 else
1246 pr_cont("%08x ", instr);
1247 }
1248
1249 pc += sizeof(int);
1250 }
1251
1252 pr_cont("\n");
1253}
1254
1255void show_user_instructions(struct pt_regs *regs)
1256{
1257 unsigned long pc;
1258 int n = NR_INSN_TO_PRINT;
1259 struct seq_buf s;
1260 char buf[96];
1261
1262 pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int));
1263
1264
1265
1266
1267
1268 if (!__access_ok(pc, NR_INSN_TO_PRINT * sizeof(int), USER_DS)) {
1269 pr_info("%s[%d]: Bad NIP, not dumping instructions.\n",
1270 current->comm, current->pid);
1271 return;
1272 }
1273
1274 seq_buf_init(&s, buf, sizeof(buf));
1275
1276 while (n) {
1277 int i;
1278
1279 seq_buf_clear(&s);
1280
1281 for (i = 0; i < 8 && n; i++, n--, pc += sizeof(int)) {
1282 int instr;
1283
1284 if (probe_kernel_address((const void *)pc, instr)) {
1285 seq_buf_printf(&s, "XXXXXXXX ");
1286 continue;
1287 }
1288 seq_buf_printf(&s, regs->nip == pc ? "<%08x> " : "%08x ", instr);
1289 }
1290
1291 if (!seq_buf_has_overflowed(&s))
1292 pr_info("%s[%d]: code: %s\n", current->comm,
1293 current->pid, s.buffer);
1294 }
1295}
1296
1297struct regbit {
1298 unsigned long bit;
1299 const char *name;
1300};
1301
1302static struct regbit msr_bits[] = {
1303#if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
1304 {MSR_SF, "SF"},
1305 {MSR_HV, "HV"},
1306#endif
1307 {MSR_VEC, "VEC"},
1308 {MSR_VSX, "VSX"},
1309#ifdef CONFIG_BOOKE
1310 {MSR_CE, "CE"},
1311#endif
1312 {MSR_EE, "EE"},
1313 {MSR_PR, "PR"},
1314 {MSR_FP, "FP"},
1315 {MSR_ME, "ME"},
1316#ifdef CONFIG_BOOKE
1317 {MSR_DE, "DE"},
1318#else
1319 {MSR_SE, "SE"},
1320 {MSR_BE, "BE"},
1321#endif
1322 {MSR_IR, "IR"},
1323 {MSR_DR, "DR"},
1324 {MSR_PMM, "PMM"},
1325#ifndef CONFIG_BOOKE
1326 {MSR_RI, "RI"},
1327 {MSR_LE, "LE"},
1328#endif
1329 {0, NULL}
1330};
1331
1332static void print_bits(unsigned long val, struct regbit *bits, const char *sep)
1333{
1334 const char *s = "";
1335
1336 for (; bits->bit; ++bits)
1337 if (val & bits->bit) {
1338 pr_cont("%s%s", s, bits->name);
1339 s = sep;
1340 }
1341}
1342
1343#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1344static struct regbit msr_tm_bits[] = {
1345 {MSR_TS_T, "T"},
1346 {MSR_TS_S, "S"},
1347 {MSR_TM, "E"},
1348 {0, NULL}
1349};
1350
1351static void print_tm_bits(unsigned long val)
1352{
1353
1354
1355
1356
1357
1358
1359
1360 if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
1361 pr_cont(",TM[");
1362 print_bits(val, msr_tm_bits, "");
1363 pr_cont("]");
1364 }
1365}
1366#else
1367static void print_tm_bits(unsigned long val) {}
1368#endif
1369
1370static void print_msr_bits(unsigned long val)
1371{
1372 pr_cont("<");
1373 print_bits(val, msr_bits, ",");
1374 print_tm_bits(val);
1375 pr_cont(">");
1376}
1377
1378#ifdef CONFIG_PPC64
1379#define REG "%016lx"
1380#define REGS_PER_LINE 4
1381#define LAST_VOLATILE 13
1382#else
1383#define REG "%08lx"
1384#define REGS_PER_LINE 8
1385#define LAST_VOLATILE 12
1386#endif
1387
1388void show_regs(struct pt_regs * regs)
1389{
1390 int i, trap;
1391
1392 show_regs_print_info(KERN_DEFAULT);
1393
1394 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
1395 regs->nip, regs->link, regs->ctr);
1396 printk("REGS: %px TRAP: %04lx %s (%s)\n",
1397 regs, regs->trap, print_tainted(), init_utsname()->release);
1398 printk("MSR: "REG" ", regs->msr);
1399 print_msr_bits(regs->msr);
1400 pr_cont(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
1401 trap = TRAP(regs);
1402 if ((TRAP(regs) != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
1403 pr_cont("CFAR: "REG" ", regs->orig_gpr3);
1404 if (trap == 0x200 || trap == 0x300 || trap == 0x600)
1405#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
1406 pr_cont("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
1407#else
1408 pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
1409#endif
1410#ifdef CONFIG_PPC64
1411 pr_cont("IRQMASK: %lx ", regs->softe);
1412#endif
1413#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1414 if (MSR_TM_ACTIVE(regs->msr))
1415 pr_cont("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
1416#endif
1417
1418 for (i = 0; i < 32; i++) {
1419 if ((i % REGS_PER_LINE) == 0)
1420 pr_cont("\nGPR%02d: ", i);
1421 pr_cont(REG " ", regs->gpr[i]);
1422 if (i == LAST_VOLATILE && !FULL_REGS(regs))
1423 break;
1424 }
1425 pr_cont("\n");
1426#ifdef CONFIG_KALLSYMS
1427
1428
1429
1430
1431 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
1432 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
1433#endif
1434 show_stack(current, (unsigned long *) regs->gpr[1]);
1435 if (!user_mode(regs))
1436 show_instructions(regs);
1437}
1438
1439void flush_thread(void)
1440{
1441#ifdef CONFIG_HAVE_HW_BREAKPOINT
1442 flush_ptrace_hw_breakpoint(current);
1443#else
1444 set_debug_reg_defaults(¤t->thread);
1445#endif
1446}
1447
1448#ifdef CONFIG_PPC_BOOK3S_64
1449void arch_setup_new_exec(void)
1450{
1451 if (radix_enabled())
1452 return;
1453 hash__setup_new_exec();
1454}
1455#endif
1456
1457int set_thread_uses_vas(void)
1458{
1459#ifdef CONFIG_PPC_BOOK3S_64
1460 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1461 return -EINVAL;
1462
1463 current->thread.used_vas = 1;
1464
1465
1466
1467
1468
1469
1470
1471
1472 asm volatile(PPC_CP_ABORT);
1473
1474#endif
1475 return 0;
1476}
1477
1478#ifdef CONFIG_PPC64
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512int set_thread_tidr(struct task_struct *t)
1513{
1514 if (!cpu_has_feature(CPU_FTR_P9_TIDR))
1515 return -EINVAL;
1516
1517 if (t != current)
1518 return -EINVAL;
1519
1520 if (t->thread.tidr)
1521 return 0;
1522
1523 t->thread.tidr = (u16)task_pid_nr(t);
1524 mtspr(SPRN_TIDR, t->thread.tidr);
1525
1526 return 0;
1527}
1528EXPORT_SYMBOL_GPL(set_thread_tidr);
1529
1530#endif
1531
1532void
1533release_thread(struct task_struct *t)
1534{
1535}
1536
1537
1538
1539
1540
1541int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
1542{
1543 flush_all_to_thread(src);
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554 __switch_to_tm(src, src);
1555
1556 *dst = *src;
1557
1558 clear_task_ebb(dst);
1559
1560 return 0;
1561}
1562
1563static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
1564{
1565#ifdef CONFIG_PPC_BOOK3S_64
1566 unsigned long sp_vsid;
1567 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
1568
1569 if (radix_enabled())
1570 return;
1571
1572 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1573 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
1574 << SLB_VSID_SHIFT_1T;
1575 else
1576 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
1577 << SLB_VSID_SHIFT;
1578 sp_vsid |= SLB_VSID_KERNEL | llp;
1579 p->thread.ksp_vsid = sp_vsid;
1580#endif
1581}
1582
1583
1584
1585
1586
1587
1588
1589
1590int copy_thread(unsigned long clone_flags, unsigned long usp,
1591 unsigned long kthread_arg, struct task_struct *p)
1592{
1593 struct pt_regs *childregs, *kregs;
1594 extern void ret_from_fork(void);
1595 extern void ret_from_kernel_thread(void);
1596 void (*f)(void);
1597 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
1598 struct thread_info *ti = task_thread_info(p);
1599
1600 klp_init_thread_info(p);
1601
1602
1603 sp -= sizeof(struct pt_regs);
1604 childregs = (struct pt_regs *) sp;
1605 if (unlikely(p->flags & PF_KTHREAD)) {
1606
1607 memset(childregs, 0, sizeof(struct pt_regs));
1608 childregs->gpr[1] = sp + sizeof(struct pt_regs);
1609
1610 if (usp)
1611 childregs->gpr[14] = ppc_function_entry((void *)usp);
1612#ifdef CONFIG_PPC64
1613 clear_tsk_thread_flag(p, TIF_32BIT);
1614 childregs->softe = IRQS_ENABLED;
1615#endif
1616 childregs->gpr[15] = kthread_arg;
1617 p->thread.regs = NULL;
1618 ti->flags |= _TIF_RESTOREALL;
1619 f = ret_from_kernel_thread;
1620 } else {
1621
1622 struct pt_regs *regs = current_pt_regs();
1623 CHECK_FULL_REGS(regs);
1624 *childregs = *regs;
1625 if (usp)
1626 childregs->gpr[1] = usp;
1627 p->thread.regs = childregs;
1628 childregs->gpr[3] = 0;
1629 if (clone_flags & CLONE_SETTLS) {
1630#ifdef CONFIG_PPC64
1631 if (!is_32bit_task())
1632 childregs->gpr[13] = childregs->gpr[6];
1633 else
1634#endif
1635 childregs->gpr[2] = childregs->gpr[6];
1636 }
1637
1638 f = ret_from_fork;
1639 }
1640 childregs->msr &= ~(MSR_FP|MSR_VEC|MSR_VSX);
1641 sp -= STACK_FRAME_OVERHEAD;
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651 ((unsigned long *)sp)[0] = 0;
1652 sp -= sizeof(struct pt_regs);
1653 kregs = (struct pt_regs *) sp;
1654 sp -= STACK_FRAME_OVERHEAD;
1655 p->thread.ksp = sp;
1656#ifdef CONFIG_PPC32
1657 p->thread.ksp_limit = (unsigned long)end_of_stack(p);
1658#endif
1659#ifdef CONFIG_HAVE_HW_BREAKPOINT
1660 p->thread.ptrace_bps[0] = NULL;
1661#endif
1662
1663 p->thread.fp_save_area = NULL;
1664#ifdef CONFIG_ALTIVEC
1665 p->thread.vr_save_area = NULL;
1666#endif
1667
1668 setup_ksp_vsid(p, sp);
1669
1670#ifdef CONFIG_PPC64
1671 if (cpu_has_feature(CPU_FTR_DSCR)) {
1672 p->thread.dscr_inherit = current->thread.dscr_inherit;
1673 p->thread.dscr = mfspr(SPRN_DSCR);
1674 }
1675 if (cpu_has_feature(CPU_FTR_HAS_PPR))
1676 childregs->ppr = DEFAULT_PPR;
1677
1678 p->thread.tidr = 0;
1679#endif
1680 kregs->nip = ppc_function_entry(f);
1681 return 0;
1682}
1683
1684void preload_new_slb_context(unsigned long start, unsigned long sp);
1685
1686
1687
1688
1689void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1690{
1691#ifdef CONFIG_PPC64
1692 unsigned long load_addr = regs->gpr[2];
1693
1694#ifdef CONFIG_PPC_BOOK3S_64
1695 if (!radix_enabled())
1696 preload_new_slb_context(start, sp);
1697#endif
1698#endif
1699
1700
1701
1702
1703
1704 if (!current->thread.regs) {
1705 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
1706 current->thread.regs = regs - 1;
1707 }
1708
1709#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1710
1711
1712
1713
1714
1715 if (MSR_TM_SUSPENDED(mfmsr()))
1716 tm_reclaim_current(0);
1717#endif
1718
1719 memset(regs->gpr, 0, sizeof(regs->gpr));
1720 regs->ctr = 0;
1721 regs->link = 0;
1722 regs->xer = 0;
1723 regs->ccr = 0;
1724 regs->gpr[1] = sp;
1725
1726
1727
1728
1729
1730
1731 regs->trap &= ~1UL;
1732
1733#ifdef CONFIG_PPC32
1734 regs->mq = 0;
1735 regs->nip = start;
1736 regs->msr = MSR_USER;
1737#else
1738 if (!is_32bit_task()) {
1739 unsigned long entry;
1740
1741 if (is_elf2_task()) {
1742
1743 entry = start;
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753 regs->gpr[12] = start;
1754
1755 set_thread_flag(TIF_RESTOREALL);
1756 } else {
1757 unsigned long toc;
1758
1759
1760
1761
1762
1763
1764
1765 __get_user(entry, (unsigned long __user *)start);
1766 __get_user(toc, (unsigned long __user *)start+1);
1767
1768
1769
1770
1771 if (load_addr != 0) {
1772 entry += load_addr;
1773 toc += load_addr;
1774 }
1775 regs->gpr[2] = toc;
1776 }
1777 regs->nip = entry;
1778 regs->msr = MSR_USER64;
1779 } else {
1780 regs->nip = start;
1781 regs->gpr[2] = 0;
1782 regs->msr = MSR_USER32;
1783 }
1784#endif
1785#ifdef CONFIG_VSX
1786 current->thread.used_vsr = 0;
1787#endif
1788 current->thread.load_slb = 0;
1789 current->thread.load_fp = 0;
1790 memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state));
1791 current->thread.fp_save_area = NULL;
1792#ifdef CONFIG_ALTIVEC
1793 memset(¤t->thread.vr_state, 0, sizeof(current->thread.vr_state));
1794 current->thread.vr_state.vscr.u[3] = 0x00010000;
1795 current->thread.vr_save_area = NULL;
1796 current->thread.vrsave = 0;
1797 current->thread.used_vr = 0;
1798 current->thread.load_vec = 0;
1799#endif
1800#ifdef CONFIG_SPE
1801 memset(current->thread.evr, 0, sizeof(current->thread.evr));
1802 current->thread.acc = 0;
1803 current->thread.spefscr = 0;
1804 current->thread.used_spe = 0;
1805#endif
1806#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1807 current->thread.tm_tfhar = 0;
1808 current->thread.tm_texasr = 0;
1809 current->thread.tm_tfiar = 0;
1810 current->thread.load_tm = 0;
1811#endif
1812
1813 thread_pkey_regs_init(¤t->thread);
1814}
1815EXPORT_SYMBOL(start_thread);
1816
1817#define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
1818 | PR_FP_EXC_RES | PR_FP_EXC_INV)
1819
1820int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
1821{
1822 struct pt_regs *regs = tsk->thread.regs;
1823
1824
1825
1826
1827
1828 if (val & PR_FP_EXC_SW_ENABLE) {
1829#ifdef CONFIG_SPE
1830 if (cpu_has_feature(CPU_FTR_SPE)) {
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1844 tsk->thread.fpexc_mode = val &
1845 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
1846 return 0;
1847 } else {
1848 return -EINVAL;
1849 }
1850#else
1851 return -EINVAL;
1852#endif
1853 }
1854
1855
1856
1857
1858
1859
1860 if (val > PR_FP_EXC_PRECISE)
1861 return -EINVAL;
1862 tsk->thread.fpexc_mode = __pack_fe01(val);
1863 if (regs != NULL && (regs->msr & MSR_FP) != 0)
1864 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
1865 | tsk->thread.fpexc_mode;
1866 return 0;
1867}
1868
1869int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
1870{
1871 unsigned int val;
1872
1873 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
1874#ifdef CONFIG_SPE
1875 if (cpu_has_feature(CPU_FTR_SPE)) {
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1889 val = tsk->thread.fpexc_mode;
1890 } else
1891 return -EINVAL;
1892#else
1893 return -EINVAL;
1894#endif
1895 else
1896 val = __unpack_fe01(tsk->thread.fpexc_mode);
1897 return put_user(val, (unsigned int __user *) adr);
1898}
1899
1900int set_endian(struct task_struct *tsk, unsigned int val)
1901{
1902 struct pt_regs *regs = tsk->thread.regs;
1903
1904 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
1905 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
1906 return -EINVAL;
1907
1908 if (regs == NULL)
1909 return -EINVAL;
1910
1911 if (val == PR_ENDIAN_BIG)
1912 regs->msr &= ~MSR_LE;
1913 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
1914 regs->msr |= MSR_LE;
1915 else
1916 return -EINVAL;
1917
1918 return 0;
1919}
1920
1921int get_endian(struct task_struct *tsk, unsigned long adr)
1922{
1923 struct pt_regs *regs = tsk->thread.regs;
1924 unsigned int val;
1925
1926 if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
1927 !cpu_has_feature(CPU_FTR_REAL_LE))
1928 return -EINVAL;
1929
1930 if (regs == NULL)
1931 return -EINVAL;
1932
1933 if (regs->msr & MSR_LE) {
1934 if (cpu_has_feature(CPU_FTR_REAL_LE))
1935 val = PR_ENDIAN_LITTLE;
1936 else
1937 val = PR_ENDIAN_PPC_LITTLE;
1938 } else
1939 val = PR_ENDIAN_BIG;
1940
1941 return put_user(val, (unsigned int __user *)adr);
1942}
1943
1944int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
1945{
1946 tsk->thread.align_ctl = val;
1947 return 0;
1948}
1949
1950int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
1951{
1952 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
1953}
1954
1955static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
1956 unsigned long nbytes)
1957{
1958 unsigned long stack_page;
1959 unsigned long cpu = task_cpu(p);
1960
1961 stack_page = (unsigned long)hardirq_ctx[cpu];
1962 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
1963 return 1;
1964
1965 stack_page = (unsigned long)softirq_ctx[cpu];
1966 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
1967 return 1;
1968
1969 return 0;
1970}
1971
1972int validate_sp(unsigned long sp, struct task_struct *p,
1973 unsigned long nbytes)
1974{
1975 unsigned long stack_page = (unsigned long)task_stack_page(p);
1976
1977 if (sp < THREAD_SIZE)
1978 return 0;
1979
1980 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
1981 return 1;
1982
1983 return valid_irq_stack(sp, p, nbytes);
1984}
1985
1986EXPORT_SYMBOL(validate_sp);
1987
1988static unsigned long __get_wchan(struct task_struct *p)
1989{
1990 unsigned long ip, sp;
1991 int count = 0;
1992
1993 if (!p || p == current || p->state == TASK_RUNNING)
1994 return 0;
1995
1996 sp = p->thread.ksp;
1997 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1998 return 0;
1999
2000 do {
2001 sp = *(unsigned long *)sp;
2002 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) ||
2003 p->state == TASK_RUNNING)
2004 return 0;
2005 if (count > 0) {
2006 ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
2007 if (!in_sched_functions(ip))
2008 return ip;
2009 }
2010 } while (count++ < 16);
2011 return 0;
2012}
2013
2014unsigned long get_wchan(struct task_struct *p)
2015{
2016 unsigned long ret;
2017
2018 if (!try_get_task_stack(p))
2019 return 0;
2020
2021 ret = __get_wchan(p);
2022
2023 put_task_stack(p);
2024
2025 return ret;
2026}
2027
2028static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
2029
2030void show_stack(struct task_struct *tsk, unsigned long *stack)
2031{
2032 unsigned long sp, ip, lr, newsp;
2033 int count = 0;
2034 int firstframe = 1;
2035#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2036 struct ftrace_ret_stack *ret_stack;
2037 extern void return_to_handler(void);
2038 unsigned long rth = (unsigned long)return_to_handler;
2039 int curr_frame = 0;
2040#endif
2041
2042 if (tsk == NULL)
2043 tsk = current;
2044
2045 if (!try_get_task_stack(tsk))
2046 return;
2047
2048 sp = (unsigned long) stack;
2049 if (sp == 0) {
2050 if (tsk == current)
2051 sp = current_stack_pointer();
2052 else
2053 sp = tsk->thread.ksp;
2054 }
2055
2056 lr = 0;
2057 printk("Call Trace:\n");
2058 do {
2059 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
2060 break;
2061
2062 stack = (unsigned long *) sp;
2063 newsp = stack[0];
2064 ip = stack[STACK_FRAME_LR_SAVE];
2065 if (!firstframe || ip != lr) {
2066 printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2067#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2068 if ((ip == rth) && curr_frame >= 0) {
2069 ret_stack = ftrace_graph_get_ret_stack(current,
2070 curr_frame++);
2071 if (ret_stack)
2072 pr_cont(" (%pS)",
2073 (void *)ret_stack->ret);
2074 else
2075 curr_frame = -1;
2076 }
2077#endif
2078 if (firstframe)
2079 pr_cont(" (unreliable)");
2080 pr_cont("\n");
2081 }
2082 firstframe = 0;
2083
2084
2085
2086
2087
2088 if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
2089 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
2090 struct pt_regs *regs = (struct pt_regs *)
2091 (sp + STACK_FRAME_OVERHEAD);
2092 lr = regs->link;
2093 printk("--- interrupt: %lx at %pS\n LR = %pS\n",
2094 regs->trap, (void *)regs->nip, (void *)lr);
2095 firstframe = 1;
2096 }
2097
2098 sp = newsp;
2099 } while (count++ < kstack_depth_to_print);
2100
2101 put_task_stack(tsk);
2102}
2103
2104#ifdef CONFIG_PPC64
2105
2106void notrace __ppc64_runlatch_on(void)
2107{
2108 struct thread_info *ti = current_thread_info();
2109
2110 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
2111
2112
2113
2114
2115
2116 mtspr(SPRN_CTRLT, CTRL_RUNLATCH);
2117 } else {
2118 unsigned long ctrl;
2119
2120
2121
2122
2123
2124 ctrl = mfspr(SPRN_CTRLF);
2125 ctrl |= CTRL_RUNLATCH;
2126 mtspr(SPRN_CTRLT, ctrl);
2127 }
2128
2129 ti->local_flags |= _TLF_RUNLATCH;
2130}
2131
2132
2133void notrace __ppc64_runlatch_off(void)
2134{
2135 struct thread_info *ti = current_thread_info();
2136
2137 ti->local_flags &= ~_TLF_RUNLATCH;
2138
2139 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
2140 mtspr(SPRN_CTRLT, 0);
2141 } else {
2142 unsigned long ctrl;
2143
2144 ctrl = mfspr(SPRN_CTRLF);
2145 ctrl &= ~CTRL_RUNLATCH;
2146 mtspr(SPRN_CTRLT, ctrl);
2147 }
2148}
2149#endif
2150
2151unsigned long arch_align_stack(unsigned long sp)
2152{
2153 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2154 sp -= get_random_int() & ~PAGE_MASK;
2155 return sp & ~0xf;
2156}
2157
2158static inline unsigned long brk_rnd(void)
2159{
2160 unsigned long rnd = 0;
2161
2162
2163 if (is_32bit_task())
2164 rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT)));
2165 else
2166 rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT)));
2167
2168 return rnd << PAGE_SHIFT;
2169}
2170
2171unsigned long arch_randomize_brk(struct mm_struct *mm)
2172{
2173 unsigned long base = mm->brk;
2174 unsigned long ret;
2175
2176#ifdef CONFIG_PPC_BOOK3S_64
2177
2178
2179
2180
2181
2182
2183
2184
2185 if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2186 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2187#endif
2188
2189 ret = PAGE_ALIGN(base + brk_rnd());
2190
2191 if (ret < mm->brk)
2192 return mm->brk;
2193
2194 return ret;
2195}
2196
2197