1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/errno.h>
18#include <linux/sched.h>
19#include <linux/sched/debug.h>
20#include <linux/sched/task.h>
21#include <linux/sched/task_stack.h>
22#include <linux/kernel.h>
23#include <linux/mm.h>
24#include <linux/smp.h>
25#include <linux/stddef.h>
26#include <linux/unistd.h>
27#include <linux/ptrace.h>
28#include <linux/slab.h>
29#include <linux/user.h>
30#include <linux/elf.h>
31#include <linux/prctl.h>
32#include <linux/init_task.h>
33#include <linux/export.h>
34#include <linux/kallsyms.h>
35#include <linux/mqueue.h>
36#include <linux/hardirq.h>
37#include <linux/utsname.h>
38#include <linux/ftrace.h>
39#include <linux/kernel_stat.h>
40#include <linux/personality.h>
41#include <linux/random.h>
42#include <linux/hw_breakpoint.h>
43#include <linux/uaccess.h>
44#include <linux/elf-randomize.h>
45#include <linux/pkeys.h>
46#include <linux/seq_buf.h>
47
48#include <asm/pgtable.h>
49#include <asm/io.h>
50#include <asm/processor.h>
51#include <asm/mmu.h>
52#include <asm/prom.h>
53#include <asm/machdep.h>
54#include <asm/time.h>
55#include <asm/runlatch.h>
56#include <asm/syscalls.h>
57#include <asm/switch_to.h>
58#include <asm/tm.h>
59#include <asm/debug.h>
60#ifdef CONFIG_PPC64
61#include <asm/firmware.h>
62#include <asm/hw_irq.h>
63#endif
64#include <asm/code-patching.h>
65#include <asm/exec.h>
66#include <asm/livepatch.h>
67#include <asm/cpu_has_feature.h>
68#include <asm/asm-prototypes.h>
69#include <asm/stacktrace.h>
70
71#include <linux/kprobes.h>
72#include <linux/kdebug.h>
73
74
75#ifdef TM_DEBUG_SW
76#define TM_DEBUG(x...) printk(KERN_INFO x)
77#else
78#define TM_DEBUG(x...) do { } while(0)
79#endif
80
81extern unsigned long _get_SP(void);
82
83#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
84
85
86
87
88
89bool tm_suspend_disabled __ro_after_init = false;
90
91static void check_if_tm_restore_required(struct task_struct *tsk)
92{
93
94
95
96
97
98
99 if (tsk == current && tsk->thread.regs &&
100 MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
101 !test_thread_flag(TIF_RESTORE_TM)) {
102 tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
103 set_thread_flag(TIF_RESTORE_TM);
104 }
105}
106
107static bool tm_active_with_fp(struct task_struct *tsk)
108{
109 return MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
110 (tsk->thread.ckpt_regs.msr & MSR_FP);
111}
112
113static bool tm_active_with_altivec(struct task_struct *tsk)
114{
115 return MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
116 (tsk->thread.ckpt_regs.msr & MSR_VEC);
117}
118#else
119static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
120static inline bool tm_active_with_fp(struct task_struct *tsk) { return false; }
121static inline bool tm_active_with_altivec(struct task_struct *tsk) { return false; }
122#endif
123
124bool strict_msr_control;
125EXPORT_SYMBOL(strict_msr_control);
126
127static int __init enable_strict_msr_control(char *str)
128{
129 strict_msr_control = true;
130 pr_info("Enabling strict facility control\n");
131
132 return 0;
133}
134early_param("ppc_strict_facility_enable", enable_strict_msr_control);
135
136unsigned long msr_check_and_set(unsigned long bits)
137{
138 unsigned long oldmsr = mfmsr();
139 unsigned long newmsr;
140
141 newmsr = oldmsr | bits;
142
143#ifdef CONFIG_VSX
144 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
145 newmsr |= MSR_VSX;
146#endif
147
148 if (oldmsr != newmsr)
149 mtmsr_isync(newmsr);
150
151 return newmsr;
152}
153EXPORT_SYMBOL_GPL(msr_check_and_set);
154
155void __msr_check_and_clear(unsigned long bits)
156{
157 unsigned long oldmsr = mfmsr();
158 unsigned long newmsr;
159
160 newmsr = oldmsr & ~bits;
161
162#ifdef CONFIG_VSX
163 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
164 newmsr &= ~MSR_VSX;
165#endif
166
167 if (oldmsr != newmsr)
168 mtmsr_isync(newmsr);
169}
170EXPORT_SYMBOL(__msr_check_and_clear);
171
172#ifdef CONFIG_PPC_FPU
173static void __giveup_fpu(struct task_struct *tsk)
174{
175 unsigned long msr;
176
177 save_fpu(tsk);
178 msr = tsk->thread.regs->msr;
179 msr &= ~MSR_FP;
180#ifdef CONFIG_VSX
181 if (cpu_has_feature(CPU_FTR_VSX))
182 msr &= ~MSR_VSX;
183#endif
184 tsk->thread.regs->msr = msr;
185}
186
187void giveup_fpu(struct task_struct *tsk)
188{
189 check_if_tm_restore_required(tsk);
190
191 msr_check_and_set(MSR_FP);
192 __giveup_fpu(tsk);
193 msr_check_and_clear(MSR_FP);
194}
195EXPORT_SYMBOL(giveup_fpu);
196
197
198
199
200
201void flush_fp_to_thread(struct task_struct *tsk)
202{
203 if (tsk->thread.regs) {
204
205
206
207
208
209
210
211
212 preempt_disable();
213 if (tsk->thread.regs->msr & MSR_FP) {
214
215
216
217
218
219
220
221 BUG_ON(tsk != current);
222 giveup_fpu(tsk);
223 }
224 preempt_enable();
225 }
226}
227EXPORT_SYMBOL_GPL(flush_fp_to_thread);
228
229void enable_kernel_fp(void)
230{
231 unsigned long cpumsr;
232
233 WARN_ON(preemptible());
234
235 cpumsr = msr_check_and_set(MSR_FP);
236
237 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
238 check_if_tm_restore_required(current);
239
240
241
242
243
244
245
246 if (!MSR_TM_ACTIVE(cpumsr) &&
247 MSR_TM_ACTIVE(current->thread.regs->msr))
248 return;
249 __giveup_fpu(current);
250 }
251}
252EXPORT_SYMBOL(enable_kernel_fp);
253
254static int restore_fp(struct task_struct *tsk)
255{
256 if (tsk->thread.load_fp || tm_active_with_fp(tsk)) {
257 load_fp_state(¤t->thread.fp_state);
258 current->thread.load_fp++;
259 return 1;
260 }
261 return 0;
262}
263#else
264static int restore_fp(struct task_struct *tsk) { return 0; }
265#endif
266
267#ifdef CONFIG_ALTIVEC
268#define loadvec(thr) ((thr).load_vec)
269
270static void __giveup_altivec(struct task_struct *tsk)
271{
272 unsigned long msr;
273
274 save_altivec(tsk);
275 msr = tsk->thread.regs->msr;
276 msr &= ~MSR_VEC;
277#ifdef CONFIG_VSX
278 if (cpu_has_feature(CPU_FTR_VSX))
279 msr &= ~MSR_VSX;
280#endif
281 tsk->thread.regs->msr = msr;
282}
283
284void giveup_altivec(struct task_struct *tsk)
285{
286 check_if_tm_restore_required(tsk);
287
288 msr_check_and_set(MSR_VEC);
289 __giveup_altivec(tsk);
290 msr_check_and_clear(MSR_VEC);
291}
292EXPORT_SYMBOL(giveup_altivec);
293
294void enable_kernel_altivec(void)
295{
296 unsigned long cpumsr;
297
298 WARN_ON(preemptible());
299
300 cpumsr = msr_check_and_set(MSR_VEC);
301
302 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
303 check_if_tm_restore_required(current);
304
305
306
307
308
309
310
311 if (!MSR_TM_ACTIVE(cpumsr) &&
312 MSR_TM_ACTIVE(current->thread.regs->msr))
313 return;
314 __giveup_altivec(current);
315 }
316}
317EXPORT_SYMBOL(enable_kernel_altivec);
318
319
320
321
322
323void flush_altivec_to_thread(struct task_struct *tsk)
324{
325 if (tsk->thread.regs) {
326 preempt_disable();
327 if (tsk->thread.regs->msr & MSR_VEC) {
328 BUG_ON(tsk != current);
329 giveup_altivec(tsk);
330 }
331 preempt_enable();
332 }
333}
334EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
335
336static int restore_altivec(struct task_struct *tsk)
337{
338 if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
339 (tsk->thread.load_vec || tm_active_with_altivec(tsk))) {
340 load_vr_state(&tsk->thread.vr_state);
341 tsk->thread.used_vr = 1;
342 tsk->thread.load_vec++;
343
344 return 1;
345 }
346 return 0;
347}
348#else
349#define loadvec(thr) 0
350static inline int restore_altivec(struct task_struct *tsk) { return 0; }
351#endif
352
353#ifdef CONFIG_VSX
354static void __giveup_vsx(struct task_struct *tsk)
355{
356 unsigned long msr = tsk->thread.regs->msr;
357
358
359
360
361
362 WARN_ON((msr & MSR_VSX) && !((msr & MSR_FP) && (msr & MSR_VEC)));
363
364
365 if (msr & MSR_FP)
366 __giveup_fpu(tsk);
367 if (msr & MSR_VEC)
368 __giveup_altivec(tsk);
369}
370
371static void giveup_vsx(struct task_struct *tsk)
372{
373 check_if_tm_restore_required(tsk);
374
375 msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
376 __giveup_vsx(tsk);
377 msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
378}
379
380void enable_kernel_vsx(void)
381{
382 unsigned long cpumsr;
383
384 WARN_ON(preemptible());
385
386 cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
387
388 if (current->thread.regs &&
389 (current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) {
390 check_if_tm_restore_required(current);
391
392
393
394
395
396
397
398 if (!MSR_TM_ACTIVE(cpumsr) &&
399 MSR_TM_ACTIVE(current->thread.regs->msr))
400 return;
401 __giveup_vsx(current);
402 }
403}
404EXPORT_SYMBOL(enable_kernel_vsx);
405
406void flush_vsx_to_thread(struct task_struct *tsk)
407{
408 if (tsk->thread.regs) {
409 preempt_disable();
410 if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) {
411 BUG_ON(tsk != current);
412 giveup_vsx(tsk);
413 }
414 preempt_enable();
415 }
416}
417EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
418
419static int restore_vsx(struct task_struct *tsk)
420{
421 if (cpu_has_feature(CPU_FTR_VSX)) {
422 tsk->thread.used_vsr = 1;
423 return 1;
424 }
425
426 return 0;
427}
428#else
429static inline int restore_vsx(struct task_struct *tsk) { return 0; }
430#endif
431
432#ifdef CONFIG_SPE
433void giveup_spe(struct task_struct *tsk)
434{
435 check_if_tm_restore_required(tsk);
436
437 msr_check_and_set(MSR_SPE);
438 __giveup_spe(tsk);
439 msr_check_and_clear(MSR_SPE);
440}
441EXPORT_SYMBOL(giveup_spe);
442
443void enable_kernel_spe(void)
444{
445 WARN_ON(preemptible());
446
447 msr_check_and_set(MSR_SPE);
448
449 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
450 check_if_tm_restore_required(current);
451 __giveup_spe(current);
452 }
453}
454EXPORT_SYMBOL(enable_kernel_spe);
455
456void flush_spe_to_thread(struct task_struct *tsk)
457{
458 if (tsk->thread.regs) {
459 preempt_disable();
460 if (tsk->thread.regs->msr & MSR_SPE) {
461 BUG_ON(tsk != current);
462 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
463 giveup_spe(tsk);
464 }
465 preempt_enable();
466 }
467}
468#endif
469
470static unsigned long msr_all_available;
471
472static int __init init_msr_all_available(void)
473{
474#ifdef CONFIG_PPC_FPU
475 msr_all_available |= MSR_FP;
476#endif
477#ifdef CONFIG_ALTIVEC
478 if (cpu_has_feature(CPU_FTR_ALTIVEC))
479 msr_all_available |= MSR_VEC;
480#endif
481#ifdef CONFIG_VSX
482 if (cpu_has_feature(CPU_FTR_VSX))
483 msr_all_available |= MSR_VSX;
484#endif
485#ifdef CONFIG_SPE
486 if (cpu_has_feature(CPU_FTR_SPE))
487 msr_all_available |= MSR_SPE;
488#endif
489
490 return 0;
491}
492early_initcall(init_msr_all_available);
493
494void giveup_all(struct task_struct *tsk)
495{
496 unsigned long usermsr;
497
498 if (!tsk->thread.regs)
499 return;
500
501 usermsr = tsk->thread.regs->msr;
502
503 if ((usermsr & msr_all_available) == 0)
504 return;
505
506 msr_check_and_set(msr_all_available);
507 check_if_tm_restore_required(tsk);
508
509 WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
510
511#ifdef CONFIG_PPC_FPU
512 if (usermsr & MSR_FP)
513 __giveup_fpu(tsk);
514#endif
515#ifdef CONFIG_ALTIVEC
516 if (usermsr & MSR_VEC)
517 __giveup_altivec(tsk);
518#endif
519#ifdef CONFIG_SPE
520 if (usermsr & MSR_SPE)
521 __giveup_spe(tsk);
522#endif
523
524 msr_check_and_clear(msr_all_available);
525}
526EXPORT_SYMBOL(giveup_all);
527
528void restore_math(struct pt_regs *regs)
529{
530 unsigned long msr;
531
532 if (!MSR_TM_ACTIVE(regs->msr) &&
533 !current->thread.load_fp && !loadvec(current->thread))
534 return;
535
536 msr = regs->msr;
537 msr_check_and_set(msr_all_available);
538
539
540
541
542
543 if ((!(msr & MSR_FP)) && restore_fp(current))
544 msr |= MSR_FP | current->thread.fpexc_mode;
545
546 if ((!(msr & MSR_VEC)) && restore_altivec(current))
547 msr |= MSR_VEC;
548
549 if ((msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC) &&
550 restore_vsx(current)) {
551 msr |= MSR_VSX;
552 }
553
554 msr_check_and_clear(msr_all_available);
555
556 regs->msr = msr;
557}
558
559static void save_all(struct task_struct *tsk)
560{
561 unsigned long usermsr;
562
563 if (!tsk->thread.regs)
564 return;
565
566 usermsr = tsk->thread.regs->msr;
567
568 if ((usermsr & msr_all_available) == 0)
569 return;
570
571 msr_check_and_set(msr_all_available);
572
573 WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
574
575 if (usermsr & MSR_FP)
576 save_fpu(tsk);
577
578 if (usermsr & MSR_VEC)
579 save_altivec(tsk);
580
581 if (usermsr & MSR_SPE)
582 __giveup_spe(tsk);
583
584 msr_check_and_clear(msr_all_available);
585 thread_pkey_regs_save(&tsk->thread);
586}
587
588void flush_all_to_thread(struct task_struct *tsk)
589{
590 if (tsk->thread.regs) {
591 preempt_disable();
592 BUG_ON(tsk != current);
593#ifdef CONFIG_SPE
594 if (tsk->thread.regs->msr & MSR_SPE)
595 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
596#endif
597 save_all(tsk);
598
599 preempt_enable();
600 }
601}
602EXPORT_SYMBOL(flush_all_to_thread);
603
604#ifdef CONFIG_PPC_ADV_DEBUG_REGS
605void do_send_trap(struct pt_regs *regs, unsigned long address,
606 unsigned long error_code, int breakpt)
607{
608 current->thread.trap_nr = TRAP_HWBKPT;
609 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
610 11, SIGSEGV) == NOTIFY_STOP)
611 return;
612
613
614 force_sig_ptrace_errno_trap(breakpt,
615 (void __user *)address);
616}
617#else
618void do_break (struct pt_regs *regs, unsigned long address,
619 unsigned long error_code)
620{
621 current->thread.trap_nr = TRAP_HWBKPT;
622 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
623 11, SIGSEGV) == NOTIFY_STOP)
624 return;
625
626 if (debugger_break_match(regs))
627 return;
628
629
630 hw_breakpoint_disable();
631
632
633 force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void __user *)address, current);
634}
635#endif
636
637static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk);
638
639#ifdef CONFIG_PPC_ADV_DEBUG_REGS
640
641
642
643static void set_debug_reg_defaults(struct thread_struct *thread)
644{
645 thread->debug.iac1 = thread->debug.iac2 = 0;
646#if CONFIG_PPC_ADV_DEBUG_IACS > 2
647 thread->debug.iac3 = thread->debug.iac4 = 0;
648#endif
649 thread->debug.dac1 = thread->debug.dac2 = 0;
650#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
651 thread->debug.dvc1 = thread->debug.dvc2 = 0;
652#endif
653 thread->debug.dbcr0 = 0;
654#ifdef CONFIG_BOOKE
655
656
657
658 thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |
659 DBCR1_IAC3US | DBCR1_IAC4US;
660
661
662
663
664 thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
665#else
666 thread->debug.dbcr1 = 0;
667#endif
668}
669
670static void prime_debug_regs(struct debug_reg *debug)
671{
672
673
674
675
676
677 mtmsr(mfmsr() & ~MSR_DE);
678
679 mtspr(SPRN_IAC1, debug->iac1);
680 mtspr(SPRN_IAC2, debug->iac2);
681#if CONFIG_PPC_ADV_DEBUG_IACS > 2
682 mtspr(SPRN_IAC3, debug->iac3);
683 mtspr(SPRN_IAC4, debug->iac4);
684#endif
685 mtspr(SPRN_DAC1, debug->dac1);
686 mtspr(SPRN_DAC2, debug->dac2);
687#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
688 mtspr(SPRN_DVC1, debug->dvc1);
689 mtspr(SPRN_DVC2, debug->dvc2);
690#endif
691 mtspr(SPRN_DBCR0, debug->dbcr0);
692 mtspr(SPRN_DBCR1, debug->dbcr1);
693#ifdef CONFIG_BOOKE
694 mtspr(SPRN_DBCR2, debug->dbcr2);
695#endif
696}
697
698
699
700
701
702void switch_booke_debug_regs(struct debug_reg *new_debug)
703{
704 if ((current->thread.debug.dbcr0 & DBCR0_IDM)
705 || (new_debug->dbcr0 & DBCR0_IDM))
706 prime_debug_regs(new_debug);
707}
708EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
709#else
710#ifndef CONFIG_HAVE_HW_BREAKPOINT
711static void set_breakpoint(struct arch_hw_breakpoint *brk)
712{
713 preempt_disable();
714 __set_breakpoint(brk);
715 preempt_enable();
716}
717
718static void set_debug_reg_defaults(struct thread_struct *thread)
719{
720 thread->hw_brk.address = 0;
721 thread->hw_brk.type = 0;
722 if (ppc_breakpoint_available())
723 set_breakpoint(&thread->hw_brk);
724}
725#endif
726#endif
727
728#ifdef CONFIG_PPC_ADV_DEBUG_REGS
729static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
730{
731 mtspr(SPRN_DAC1, dabr);
732#ifdef CONFIG_PPC_47x
733 isync();
734#endif
735 return 0;
736}
737#elif defined(CONFIG_PPC_BOOK3S)
738static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
739{
740 mtspr(SPRN_DABR, dabr);
741 if (cpu_has_feature(CPU_FTR_DABRX))
742 mtspr(SPRN_DABRX, dabrx);
743 return 0;
744}
745#elif defined(CONFIG_PPC_8xx)
746static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
747{
748 unsigned long addr = dabr & ~HW_BRK_TYPE_DABR;
749 unsigned long lctrl1 = 0x90000000;
750 unsigned long lctrl2 = 0x8e000002;
751
752 if ((dabr & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_READ)
753 lctrl1 |= 0xa0000;
754 else if ((dabr & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_WRITE)
755 lctrl1 |= 0xf0000;
756 else if ((dabr & HW_BRK_TYPE_RDWR) == 0)
757 lctrl2 = 0;
758
759 mtspr(SPRN_LCTRL2, 0);
760 mtspr(SPRN_CMPE, addr);
761 mtspr(SPRN_CMPF, addr + 4);
762 mtspr(SPRN_LCTRL1, lctrl1);
763 mtspr(SPRN_LCTRL2, lctrl2);
764
765 return 0;
766}
767#else
768static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
769{
770 return -EINVAL;
771}
772#endif
773
774static inline int set_dabr(struct arch_hw_breakpoint *brk)
775{
776 unsigned long dabr, dabrx;
777
778 dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
779 dabrx = ((brk->type >> 3) & 0x7);
780
781 if (ppc_md.set_dabr)
782 return ppc_md.set_dabr(dabr, dabrx);
783
784 return __set_dabr(dabr, dabrx);
785}
786
787static inline int set_dawr(struct arch_hw_breakpoint *brk)
788{
789 unsigned long dawr, dawrx, mrd;
790
791 dawr = brk->address;
792
793 dawrx = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)) \
794 << (63 - 58);
795 dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) \
796 << (63 - 59);
797 dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) \
798 >> 3;
799
800
801
802
803
804
805 mrd = ((brk->len + 7) >> 3) - 1;
806 dawrx |= (mrd & 0x3f) << (63 - 53);
807
808 if (ppc_md.set_dawr)
809 return ppc_md.set_dawr(dawr, dawrx);
810 mtspr(SPRN_DAWR, dawr);
811 mtspr(SPRN_DAWRX, dawrx);
812 return 0;
813}
814
815void __set_breakpoint(struct arch_hw_breakpoint *brk)
816{
817 memcpy(this_cpu_ptr(¤t_brk), brk, sizeof(*brk));
818
819 if (cpu_has_feature(CPU_FTR_DAWR))
820
821 set_dawr(brk);
822 else if (!cpu_has_feature(CPU_FTR_ARCH_207S))
823
824 set_dabr(brk);
825 else
826
827 WARN_ON_ONCE(1);
828}
829
830
831bool ppc_breakpoint_available(void)
832{
833 if (cpu_has_feature(CPU_FTR_DAWR))
834 return true;
835 if (cpu_has_feature(CPU_FTR_ARCH_207S))
836 return false;
837
838 return true;
839}
840EXPORT_SYMBOL_GPL(ppc_breakpoint_available);
841
842static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
843 struct arch_hw_breakpoint *b)
844{
845 if (a->address != b->address)
846 return false;
847 if (a->type != b->type)
848 return false;
849 if (a->len != b->len)
850 return false;
851 return true;
852}
853
854#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
855
856static inline bool tm_enabled(struct task_struct *tsk)
857{
858 return tsk && tsk->thread.regs && (tsk->thread.regs->msr & MSR_TM);
859}
860
861static void tm_reclaim_thread(struct thread_struct *thr, uint8_t cause)
862{
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878 if (!MSR_TM_SUSPENDED(mfmsr()))
879 return;
880
881 giveup_all(container_of(thr, struct task_struct, thread));
882
883 tm_reclaim(thr, cause);
884
885
886
887
888
889
890
891
892
893
894
895
896
897 if ((thr->ckpt_regs.msr & MSR_FP) == 0)
898 memcpy(&thr->ckfp_state, &thr->fp_state,
899 sizeof(struct thread_fp_state));
900 if ((thr->ckpt_regs.msr & MSR_VEC) == 0)
901 memcpy(&thr->ckvr_state, &thr->vr_state,
902 sizeof(struct thread_vr_state));
903}
904
905void tm_reclaim_current(uint8_t cause)
906{
907 tm_enable();
908 tm_reclaim_thread(¤t->thread, cause);
909}
910
911static inline void tm_reclaim_task(struct task_struct *tsk)
912{
913
914
915
916
917
918
919
920
921
922
923 struct thread_struct *thr = &tsk->thread;
924
925 if (!thr->regs)
926 return;
927
928 if (!MSR_TM_ACTIVE(thr->regs->msr))
929 goto out_and_saveregs;
930
931 WARN_ON(tm_suspend_disabled);
932
933 TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
934 "ccr=%lx, msr=%lx, trap=%lx)\n",
935 tsk->pid, thr->regs->nip,
936 thr->regs->ccr, thr->regs->msr,
937 thr->regs->trap);
938
939 tm_reclaim_thread(thr, TM_CAUSE_RESCHED);
940
941 TM_DEBUG("--- tm_reclaim on pid %d complete\n",
942 tsk->pid);
943
944out_and_saveregs:
945
946
947
948
949
950 tm_save_sprs(thr);
951}
952
953extern void __tm_recheckpoint(struct thread_struct *thread);
954
955void tm_recheckpoint(struct thread_struct *thread)
956{
957 unsigned long flags;
958
959 if (!(thread->regs->msr & MSR_TM))
960 return;
961
962
963
964
965
966 local_irq_save(flags);
967 hard_irq_disable();
968
969
970
971
972 tm_restore_sprs(thread);
973
974 __tm_recheckpoint(thread);
975
976 local_irq_restore(flags);
977}
978
979static inline void tm_recheckpoint_new_task(struct task_struct *new)
980{
981 if (!cpu_has_feature(CPU_FTR_TM))
982 return;
983
984
985
986
987
988
989
990
991
992 if (!tm_enabled(new))
993 return;
994
995 if (!MSR_TM_ACTIVE(new->thread.regs->msr)){
996 tm_restore_sprs(&new->thread);
997 return;
998 }
999
1000 TM_DEBUG("*** tm_recheckpoint of pid %d (new->msr 0x%lx)\n",
1001 new->pid, new->thread.regs->msr);
1002
1003 tm_recheckpoint(&new->thread);
1004
1005
1006
1007
1008
1009
1010 new->thread.regs->msr &= ~(MSR_FP | MSR_VEC | MSR_VSX);
1011
1012 TM_DEBUG("*** tm_recheckpoint of pid %d complete "
1013 "(kernel msr 0x%lx)\n",
1014 new->pid, mfmsr());
1015}
1016
1017static inline void __switch_to_tm(struct task_struct *prev,
1018 struct task_struct *new)
1019{
1020 if (cpu_has_feature(CPU_FTR_TM)) {
1021 if (tm_enabled(prev) || tm_enabled(new))
1022 tm_enable();
1023
1024 if (tm_enabled(prev)) {
1025 prev->thread.load_tm++;
1026 tm_reclaim_task(prev);
1027 if (!MSR_TM_ACTIVE(prev->thread.regs->msr) && prev->thread.load_tm == 0)
1028 prev->thread.regs->msr &= ~MSR_TM;
1029 }
1030
1031 tm_recheckpoint_new_task(new);
1032 }
1033}
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049void restore_tm_state(struct pt_regs *regs)
1050{
1051 unsigned long msr_diff;
1052
1053
1054
1055
1056
1057
1058
1059 clear_thread_flag(TIF_RESTORE_TM);
1060 if (!MSR_TM_ACTIVE(regs->msr))
1061 return;
1062
1063 msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
1064 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
1065
1066
1067 if (msr_diff & MSR_FP)
1068 current->thread.load_fp = 1;
1069#ifdef CONFIG_ALTIVEC
1070 if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)
1071 current->thread.load_vec = 1;
1072#endif
1073 restore_math(regs);
1074
1075 regs->msr |= msr_diff;
1076}
1077
1078#else
1079#define tm_recheckpoint_new_task(new)
1080#define __switch_to_tm(prev, new)
1081#endif
1082
1083static inline void save_sprs(struct thread_struct *t)
1084{
1085#ifdef CONFIG_ALTIVEC
1086 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1087 t->vrsave = mfspr(SPRN_VRSAVE);
1088#endif
1089#ifdef CONFIG_PPC_BOOK3S_64
1090 if (cpu_has_feature(CPU_FTR_DSCR))
1091 t->dscr = mfspr(SPRN_DSCR);
1092
1093 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1094 t->bescr = mfspr(SPRN_BESCR);
1095 t->ebbhr = mfspr(SPRN_EBBHR);
1096 t->ebbrr = mfspr(SPRN_EBBRR);
1097
1098 t->fscr = mfspr(SPRN_FSCR);
1099
1100
1101
1102
1103
1104
1105
1106 t->tar = mfspr(SPRN_TAR);
1107 }
1108#endif
1109
1110 thread_pkey_regs_save(t);
1111}
1112
1113static inline void restore_sprs(struct thread_struct *old_thread,
1114 struct thread_struct *new_thread)
1115{
1116#ifdef CONFIG_ALTIVEC
1117 if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
1118 old_thread->vrsave != new_thread->vrsave)
1119 mtspr(SPRN_VRSAVE, new_thread->vrsave);
1120#endif
1121#ifdef CONFIG_PPC_BOOK3S_64
1122 if (cpu_has_feature(CPU_FTR_DSCR)) {
1123 u64 dscr = get_paca()->dscr_default;
1124 if (new_thread->dscr_inherit)
1125 dscr = new_thread->dscr;
1126
1127 if (old_thread->dscr != dscr)
1128 mtspr(SPRN_DSCR, dscr);
1129 }
1130
1131 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1132 if (old_thread->bescr != new_thread->bescr)
1133 mtspr(SPRN_BESCR, new_thread->bescr);
1134 if (old_thread->ebbhr != new_thread->ebbhr)
1135 mtspr(SPRN_EBBHR, new_thread->ebbhr);
1136 if (old_thread->ebbrr != new_thread->ebbrr)
1137 mtspr(SPRN_EBBRR, new_thread->ebbrr);
1138
1139 if (old_thread->fscr != new_thread->fscr)
1140 mtspr(SPRN_FSCR, new_thread->fscr);
1141
1142 if (old_thread->tar != new_thread->tar)
1143 mtspr(SPRN_TAR, new_thread->tar);
1144 }
1145
1146 if (cpu_has_feature(CPU_FTR_P9_TIDR) &&
1147 old_thread->tidr != new_thread->tidr)
1148 mtspr(SPRN_TIDR, new_thread->tidr);
1149#endif
1150
1151 thread_pkey_regs_restore(new_thread, old_thread);
1152}
1153
1154#ifdef CONFIG_PPC_BOOK3S_64
1155#define CP_SIZE 128
1156static const u8 dummy_copy_buffer[CP_SIZE] __attribute__((aligned(CP_SIZE)));
1157#endif
1158
1159struct task_struct *__switch_to(struct task_struct *prev,
1160 struct task_struct *new)
1161{
1162 struct thread_struct *new_thread, *old_thread;
1163 struct task_struct *last;
1164#ifdef CONFIG_PPC_BOOK3S_64
1165 struct ppc64_tlb_batch *batch;
1166#endif
1167
1168 new_thread = &new->thread;
1169 old_thread = ¤t->thread;
1170
1171 WARN_ON(!irqs_disabled());
1172
1173#ifdef CONFIG_PPC_BOOK3S_64
1174 batch = this_cpu_ptr(&ppc64_tlb_batch);
1175 if (batch->active) {
1176 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
1177 if (batch->index)
1178 __flush_tlb_pending(batch);
1179 batch->active = 0;
1180 }
1181#endif
1182
1183#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1184 switch_booke_debug_regs(&new->thread.debug);
1185#else
1186
1187
1188
1189
1190#ifndef CONFIG_HAVE_HW_BREAKPOINT
1191 if (unlikely(!hw_brk_match(this_cpu_ptr(¤t_brk), &new->thread.hw_brk)))
1192 __set_breakpoint(&new->thread.hw_brk);
1193#endif
1194#endif
1195
1196
1197
1198
1199
1200 save_sprs(&prev->thread);
1201
1202
1203 giveup_all(prev);
1204
1205 __switch_to_tm(prev, new);
1206
1207 if (!radix_enabled()) {
1208
1209
1210
1211
1212
1213 hard_irq_disable();
1214 }
1215
1216
1217
1218
1219
1220
1221
1222
1223 restore_sprs(old_thread, new_thread);
1224
1225 last = _switch(old_thread, new_thread);
1226
1227#ifdef CONFIG_PPC_BOOK3S_64
1228 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
1229 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
1230 batch = this_cpu_ptr(&ppc64_tlb_batch);
1231 batch->active = 1;
1232 }
1233
1234 if (current_thread_info()->task->thread.regs) {
1235 restore_math(current_thread_info()->task->thread.regs);
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245 if (current_thread_info()->task->thread.used_vas)
1246 asm volatile(PPC_CP_ABORT);
1247 }
1248#endif
1249
1250 return last;
1251}
1252
1253#define NR_INSN_TO_PRINT 16
1254
1255static void show_instructions(struct pt_regs *regs)
1256{
1257 int i;
1258 unsigned long pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int));
1259
1260 printk("Instruction dump:");
1261
1262 for (i = 0; i < NR_INSN_TO_PRINT; i++) {
1263 int instr;
1264
1265 if (!(i % 8))
1266 pr_cont("\n");
1267
1268#if !defined(CONFIG_BOOKE)
1269
1270
1271
1272 if (!(regs->msr & MSR_IR))
1273 pc = (unsigned long)phys_to_virt(pc);
1274#endif
1275
1276 if (!__kernel_text_address(pc) ||
1277 probe_kernel_address((const void *)pc, instr)) {
1278 pr_cont("XXXXXXXX ");
1279 } else {
1280 if (regs->nip == pc)
1281 pr_cont("<%08x> ", instr);
1282 else
1283 pr_cont("%08x ", instr);
1284 }
1285
1286 pc += sizeof(int);
1287 }
1288
1289 pr_cont("\n");
1290}
1291
1292void show_user_instructions(struct pt_regs *regs)
1293{
1294 unsigned long pc;
1295 int n = NR_INSN_TO_PRINT;
1296 struct seq_buf s;
1297 char buf[96];
1298
1299 pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int));
1300
1301
1302
1303
1304
1305 if (!__access_ok(pc, NR_INSN_TO_PRINT * sizeof(int), USER_DS)) {
1306 pr_info("%s[%d]: Bad NIP, not dumping instructions.\n",
1307 current->comm, current->pid);
1308 return;
1309 }
1310
1311 seq_buf_init(&s, buf, sizeof(buf));
1312
1313 while (n) {
1314 int i;
1315
1316 seq_buf_clear(&s);
1317
1318 for (i = 0; i < 8 && n; i++, n--, pc += sizeof(int)) {
1319 int instr;
1320
1321 if (probe_kernel_address((const void *)pc, instr)) {
1322 seq_buf_printf(&s, "XXXXXXXX ");
1323 continue;
1324 }
1325 seq_buf_printf(&s, regs->nip == pc ? "<%08x> " : "%08x ", instr);
1326 }
1327
1328 if (!seq_buf_has_overflowed(&s))
1329 pr_info("%s[%d]: code: %s\n", current->comm,
1330 current->pid, s.buffer);
1331 }
1332}
1333
1334struct regbit {
1335 unsigned long bit;
1336 const char *name;
1337};
1338
1339static struct regbit msr_bits[] = {
1340#if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
1341 {MSR_SF, "SF"},
1342 {MSR_HV, "HV"},
1343#endif
1344 {MSR_VEC, "VEC"},
1345 {MSR_VSX, "VSX"},
1346#ifdef CONFIG_BOOKE
1347 {MSR_CE, "CE"},
1348#endif
1349 {MSR_EE, "EE"},
1350 {MSR_PR, "PR"},
1351 {MSR_FP, "FP"},
1352 {MSR_ME, "ME"},
1353#ifdef CONFIG_BOOKE
1354 {MSR_DE, "DE"},
1355#else
1356 {MSR_SE, "SE"},
1357 {MSR_BE, "BE"},
1358#endif
1359 {MSR_IR, "IR"},
1360 {MSR_DR, "DR"},
1361 {MSR_PMM, "PMM"},
1362#ifndef CONFIG_BOOKE
1363 {MSR_RI, "RI"},
1364 {MSR_LE, "LE"},
1365#endif
1366 {0, NULL}
1367};
1368
1369static void print_bits(unsigned long val, struct regbit *bits, const char *sep)
1370{
1371 const char *s = "";
1372
1373 for (; bits->bit; ++bits)
1374 if (val & bits->bit) {
1375 pr_cont("%s%s", s, bits->name);
1376 s = sep;
1377 }
1378}
1379
1380#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1381static struct regbit msr_tm_bits[] = {
1382 {MSR_TS_T, "T"},
1383 {MSR_TS_S, "S"},
1384 {MSR_TM, "E"},
1385 {0, NULL}
1386};
1387
1388static void print_tm_bits(unsigned long val)
1389{
1390
1391
1392
1393
1394
1395
1396
1397 if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
1398 pr_cont(",TM[");
1399 print_bits(val, msr_tm_bits, "");
1400 pr_cont("]");
1401 }
1402}
1403#else
1404static void print_tm_bits(unsigned long val) {}
1405#endif
1406
1407static void print_msr_bits(unsigned long val)
1408{
1409 pr_cont("<");
1410 print_bits(val, msr_bits, ",");
1411 print_tm_bits(val);
1412 pr_cont(">");
1413}
1414
1415#ifdef CONFIG_PPC64
1416#define REG "%016lx"
1417#define REGS_PER_LINE 4
1418#define LAST_VOLATILE 13
1419#else
1420#define REG "%08lx"
1421#define REGS_PER_LINE 8
1422#define LAST_VOLATILE 12
1423#endif
1424
1425void show_regs(struct pt_regs * regs)
1426{
1427 int i, trap;
1428
1429 show_regs_print_info(KERN_DEFAULT);
1430
1431 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
1432 regs->nip, regs->link, regs->ctr);
1433 printk("REGS: %px TRAP: %04lx %s (%s)\n",
1434 regs, regs->trap, print_tainted(), init_utsname()->release);
1435 printk("MSR: "REG" ", regs->msr);
1436 print_msr_bits(regs->msr);
1437 pr_cont(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
1438 trap = TRAP(regs);
1439 if ((TRAP(regs) != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
1440 pr_cont("CFAR: "REG" ", regs->orig_gpr3);
1441 if (trap == 0x200 || trap == 0x300 || trap == 0x600)
1442#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
1443 pr_cont("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
1444#else
1445 pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
1446#endif
1447#ifdef CONFIG_PPC64
1448 pr_cont("IRQMASK: %lx ", regs->softe);
1449#endif
1450#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1451 if (MSR_TM_ACTIVE(regs->msr))
1452 pr_cont("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
1453#endif
1454
1455 for (i = 0; i < 32; i++) {
1456 if ((i % REGS_PER_LINE) == 0)
1457 pr_cont("\nGPR%02d: ", i);
1458 pr_cont(REG " ", regs->gpr[i]);
1459 if (i == LAST_VOLATILE && !FULL_REGS(regs))
1460 break;
1461 }
1462 pr_cont("\n");
1463#ifdef CONFIG_KALLSYMS
1464
1465
1466
1467
1468 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
1469 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
1470#endif
1471 show_stack(current, (unsigned long *) regs->gpr[1]);
1472 if (!user_mode(regs))
1473 show_instructions(regs);
1474}
1475
1476void flush_thread(void)
1477{
1478#ifdef CONFIG_HAVE_HW_BREAKPOINT
1479 flush_ptrace_hw_breakpoint(current);
1480#else
1481 set_debug_reg_defaults(¤t->thread);
1482#endif
1483}
1484
1485#ifdef CONFIG_PPC_BOOK3S_64
1486void arch_setup_new_exec(void)
1487{
1488 if (radix_enabled())
1489 return;
1490 hash__setup_new_exec();
1491}
1492#endif
1493
1494int set_thread_uses_vas(void)
1495{
1496#ifdef CONFIG_PPC_BOOK3S_64
1497 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1498 return -EINVAL;
1499
1500 current->thread.used_vas = 1;
1501
1502
1503
1504
1505
1506
1507
1508
1509 asm volatile(PPC_CP_ABORT);
1510
1511#endif
1512 return 0;
1513}
1514
1515#ifdef CONFIG_PPC64
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549int set_thread_tidr(struct task_struct *t)
1550{
1551 if (!cpu_has_feature(CPU_FTR_P9_TIDR))
1552 return -EINVAL;
1553
1554 if (t != current)
1555 return -EINVAL;
1556
1557 if (t->thread.tidr)
1558 return 0;
1559
1560 t->thread.tidr = (u16)task_pid_nr(t);
1561 mtspr(SPRN_TIDR, t->thread.tidr);
1562
1563 return 0;
1564}
1565EXPORT_SYMBOL_GPL(set_thread_tidr);
1566
1567#endif
1568
1569void
1570release_thread(struct task_struct *t)
1571{
1572}
1573
1574
1575
1576
1577
1578int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
1579{
1580 flush_all_to_thread(src);
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591 __switch_to_tm(src, src);
1592
1593 *dst = *src;
1594
1595 clear_task_ebb(dst);
1596
1597 return 0;
1598}
1599
1600static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
1601{
1602#ifdef CONFIG_PPC_BOOK3S_64
1603 unsigned long sp_vsid;
1604 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
1605
1606 if (radix_enabled())
1607 return;
1608
1609 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1610 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
1611 << SLB_VSID_SHIFT_1T;
1612 else
1613 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
1614 << SLB_VSID_SHIFT;
1615 sp_vsid |= SLB_VSID_KERNEL | llp;
1616 p->thread.ksp_vsid = sp_vsid;
1617#endif
1618}
1619
1620
1621
1622
1623
1624
1625
1626
1627int copy_thread(unsigned long clone_flags, unsigned long usp,
1628 unsigned long kthread_arg, struct task_struct *p)
1629{
1630 struct pt_regs *childregs, *kregs;
1631 extern void ret_from_fork(void);
1632 extern void ret_from_kernel_thread(void);
1633 void (*f)(void);
1634 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
1635 struct thread_info *ti = task_thread_info(p);
1636
1637 klp_init_thread_info(ti);
1638
1639
1640 sp -= sizeof(struct pt_regs);
1641 childregs = (struct pt_regs *) sp;
1642 if (unlikely(p->flags & PF_KTHREAD)) {
1643
1644 memset(childregs, 0, sizeof(struct pt_regs));
1645 childregs->gpr[1] = sp + sizeof(struct pt_regs);
1646
1647 if (usp)
1648 childregs->gpr[14] = ppc_function_entry((void *)usp);
1649#ifdef CONFIG_PPC64
1650 clear_tsk_thread_flag(p, TIF_32BIT);
1651 childregs->softe = IRQS_ENABLED;
1652#endif
1653 childregs->gpr[15] = kthread_arg;
1654 p->thread.regs = NULL;
1655 ti->flags |= _TIF_RESTOREALL;
1656 f = ret_from_kernel_thread;
1657 } else {
1658
1659 struct pt_regs *regs = current_pt_regs();
1660 CHECK_FULL_REGS(regs);
1661 *childregs = *regs;
1662 if (usp)
1663 childregs->gpr[1] = usp;
1664 p->thread.regs = childregs;
1665 childregs->gpr[3] = 0;
1666 if (clone_flags & CLONE_SETTLS) {
1667#ifdef CONFIG_PPC64
1668 if (!is_32bit_task())
1669 childregs->gpr[13] = childregs->gpr[6];
1670 else
1671#endif
1672 childregs->gpr[2] = childregs->gpr[6];
1673 }
1674
1675 f = ret_from_fork;
1676 }
1677 childregs->msr &= ~(MSR_FP|MSR_VEC|MSR_VSX);
1678 sp -= STACK_FRAME_OVERHEAD;
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688 ((unsigned long *)sp)[0] = 0;
1689 sp -= sizeof(struct pt_regs);
1690 kregs = (struct pt_regs *) sp;
1691 sp -= STACK_FRAME_OVERHEAD;
1692 p->thread.ksp = sp;
1693#ifdef CONFIG_PPC32
1694 p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
1695 _ALIGN_UP(sizeof(struct thread_info), 16);
1696#endif
1697#ifdef CONFIG_HAVE_HW_BREAKPOINT
1698 p->thread.ptrace_bps[0] = NULL;
1699#endif
1700
1701 p->thread.fp_save_area = NULL;
1702#ifdef CONFIG_ALTIVEC
1703 p->thread.vr_save_area = NULL;
1704#endif
1705
1706 setup_ksp_vsid(p, sp);
1707
1708#ifdef CONFIG_PPC64
1709 if (cpu_has_feature(CPU_FTR_DSCR)) {
1710 p->thread.dscr_inherit = current->thread.dscr_inherit;
1711 p->thread.dscr = mfspr(SPRN_DSCR);
1712 }
1713 if (cpu_has_feature(CPU_FTR_HAS_PPR))
1714 childregs->ppr = DEFAULT_PPR;
1715
1716 p->thread.tidr = 0;
1717#endif
1718 kregs->nip = ppc_function_entry(f);
1719 return 0;
1720}
1721
1722void preload_new_slb_context(unsigned long start, unsigned long sp);
1723
1724
1725
1726
1727void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1728{
1729#ifdef CONFIG_PPC64
1730 unsigned long load_addr = regs->gpr[2];
1731
1732#ifdef CONFIG_PPC_BOOK3S_64
1733 preload_new_slb_context(start, sp);
1734#endif
1735#endif
1736
1737
1738
1739
1740
1741 if (!current->thread.regs) {
1742 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
1743 current->thread.regs = regs - 1;
1744 }
1745
1746#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1747
1748
1749
1750
1751
1752 if (MSR_TM_SUSPENDED(mfmsr()))
1753 tm_reclaim_current(0);
1754#endif
1755
1756 memset(regs->gpr, 0, sizeof(regs->gpr));
1757 regs->ctr = 0;
1758 regs->link = 0;
1759 regs->xer = 0;
1760 regs->ccr = 0;
1761 regs->gpr[1] = sp;
1762
1763
1764
1765
1766
1767
1768 regs->trap &= ~1UL;
1769
1770#ifdef CONFIG_PPC32
1771 regs->mq = 0;
1772 regs->nip = start;
1773 regs->msr = MSR_USER;
1774#else
1775 if (!is_32bit_task()) {
1776 unsigned long entry;
1777
1778 if (is_elf2_task()) {
1779
1780 entry = start;
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790 regs->gpr[12] = start;
1791
1792 set_thread_flag(TIF_RESTOREALL);
1793 } else {
1794 unsigned long toc;
1795
1796
1797
1798
1799
1800
1801
1802 __get_user(entry, (unsigned long __user *)start);
1803 __get_user(toc, (unsigned long __user *)start+1);
1804
1805
1806
1807
1808 if (load_addr != 0) {
1809 entry += load_addr;
1810 toc += load_addr;
1811 }
1812 regs->gpr[2] = toc;
1813 }
1814 regs->nip = entry;
1815 regs->msr = MSR_USER64;
1816 } else {
1817 regs->nip = start;
1818 regs->gpr[2] = 0;
1819 regs->msr = MSR_USER32;
1820 }
1821#endif
1822#ifdef CONFIG_VSX
1823 current->thread.used_vsr = 0;
1824#endif
1825 current->thread.load_slb = 0;
1826 current->thread.load_fp = 0;
1827 memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state));
1828 current->thread.fp_save_area = NULL;
1829#ifdef CONFIG_ALTIVEC
1830 memset(¤t->thread.vr_state, 0, sizeof(current->thread.vr_state));
1831 current->thread.vr_state.vscr.u[3] = 0x00010000;
1832 current->thread.vr_save_area = NULL;
1833 current->thread.vrsave = 0;
1834 current->thread.used_vr = 0;
1835 current->thread.load_vec = 0;
1836#endif
1837#ifdef CONFIG_SPE
1838 memset(current->thread.evr, 0, sizeof(current->thread.evr));
1839 current->thread.acc = 0;
1840 current->thread.spefscr = 0;
1841 current->thread.used_spe = 0;
1842#endif
1843#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1844 current->thread.tm_tfhar = 0;
1845 current->thread.tm_texasr = 0;
1846 current->thread.tm_tfiar = 0;
1847 current->thread.load_tm = 0;
1848#endif
1849
1850 thread_pkey_regs_init(¤t->thread);
1851}
1852EXPORT_SYMBOL(start_thread);
1853
1854#define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
1855 | PR_FP_EXC_RES | PR_FP_EXC_INV)
1856
1857int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
1858{
1859 struct pt_regs *regs = tsk->thread.regs;
1860
1861
1862
1863
1864
1865 if (val & PR_FP_EXC_SW_ENABLE) {
1866#ifdef CONFIG_SPE
1867 if (cpu_has_feature(CPU_FTR_SPE)) {
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1881 tsk->thread.fpexc_mode = val &
1882 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
1883 return 0;
1884 } else {
1885 return -EINVAL;
1886 }
1887#else
1888 return -EINVAL;
1889#endif
1890 }
1891
1892
1893
1894
1895
1896
1897 if (val > PR_FP_EXC_PRECISE)
1898 return -EINVAL;
1899 tsk->thread.fpexc_mode = __pack_fe01(val);
1900 if (regs != NULL && (regs->msr & MSR_FP) != 0)
1901 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
1902 | tsk->thread.fpexc_mode;
1903 return 0;
1904}
1905
1906int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
1907{
1908 unsigned int val;
1909
1910 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
1911#ifdef CONFIG_SPE
1912 if (cpu_has_feature(CPU_FTR_SPE)) {
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1926 val = tsk->thread.fpexc_mode;
1927 } else
1928 return -EINVAL;
1929#else
1930 return -EINVAL;
1931#endif
1932 else
1933 val = __unpack_fe01(tsk->thread.fpexc_mode);
1934 return put_user(val, (unsigned int __user *) adr);
1935}
1936
1937int set_endian(struct task_struct *tsk, unsigned int val)
1938{
1939 struct pt_regs *regs = tsk->thread.regs;
1940
1941 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
1942 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
1943 return -EINVAL;
1944
1945 if (regs == NULL)
1946 return -EINVAL;
1947
1948 if (val == PR_ENDIAN_BIG)
1949 regs->msr &= ~MSR_LE;
1950 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
1951 regs->msr |= MSR_LE;
1952 else
1953 return -EINVAL;
1954
1955 return 0;
1956}
1957
1958int get_endian(struct task_struct *tsk, unsigned long adr)
1959{
1960 struct pt_regs *regs = tsk->thread.regs;
1961 unsigned int val;
1962
1963 if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
1964 !cpu_has_feature(CPU_FTR_REAL_LE))
1965 return -EINVAL;
1966
1967 if (regs == NULL)
1968 return -EINVAL;
1969
1970 if (regs->msr & MSR_LE) {
1971 if (cpu_has_feature(CPU_FTR_REAL_LE))
1972 val = PR_ENDIAN_LITTLE;
1973 else
1974 val = PR_ENDIAN_PPC_LITTLE;
1975 } else
1976 val = PR_ENDIAN_BIG;
1977
1978 return put_user(val, (unsigned int __user *)adr);
1979}
1980
1981int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
1982{
1983 tsk->thread.align_ctl = val;
1984 return 0;
1985}
1986
1987int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
1988{
1989 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
1990}
1991
1992static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
1993 unsigned long nbytes)
1994{
1995 unsigned long stack_page;
1996 unsigned long cpu = task_cpu(p);
1997
1998
1999
2000
2001
2002 if (cpu < NR_CPUS && cpu_possible(cpu)) {
2003 stack_page = (unsigned long) hardirq_ctx[cpu];
2004 if (sp >= stack_page + sizeof(struct thread_struct)
2005 && sp <= stack_page + THREAD_SIZE - nbytes)
2006 return 1;
2007
2008 stack_page = (unsigned long) softirq_ctx[cpu];
2009 if (sp >= stack_page + sizeof(struct thread_struct)
2010 && sp <= stack_page + THREAD_SIZE - nbytes)
2011 return 1;
2012 }
2013 return 0;
2014}
2015
2016int validate_sp(unsigned long sp, struct task_struct *p,
2017 unsigned long nbytes)
2018{
2019 unsigned long stack_page = (unsigned long)task_stack_page(p);
2020
2021 if (sp >= stack_page + sizeof(struct thread_struct)
2022 && sp <= stack_page + THREAD_SIZE - nbytes)
2023 return 1;
2024
2025 return valid_irq_stack(sp, p, nbytes);
2026}
2027
2028EXPORT_SYMBOL(validate_sp);
2029
2030unsigned long get_wchan(struct task_struct *p)
2031{
2032 unsigned long ip, sp;
2033 int count = 0;
2034
2035 if (!p || p == current || p->state == TASK_RUNNING)
2036 return 0;
2037
2038 sp = p->thread.ksp;
2039 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
2040 return 0;
2041
2042 do {
2043 sp = *(unsigned long *)sp;
2044 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) ||
2045 p->state == TASK_RUNNING)
2046 return 0;
2047 if (count > 0) {
2048 ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
2049 if (!in_sched_functions(ip))
2050 return ip;
2051 }
2052 } while (count++ < 16);
2053 return 0;
2054}
2055
2056static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
2057
2058void show_stack(struct task_struct *tsk, unsigned long *stack)
2059{
2060 unsigned long sp, ip, lr, newsp;
2061 int count = 0;
2062 int firstframe = 1;
2063#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2064 struct ftrace_ret_stack *ret_stack;
2065 extern void return_to_handler(void);
2066 unsigned long rth = (unsigned long)return_to_handler;
2067 int curr_frame = 0;
2068#endif
2069
2070 sp = (unsigned long) stack;
2071 if (tsk == NULL)
2072 tsk = current;
2073 if (sp == 0) {
2074 if (tsk == current)
2075 sp = current_stack_pointer();
2076 else
2077 sp = tsk->thread.ksp;
2078 }
2079
2080 lr = 0;
2081 printk("Call Trace:\n");
2082 do {
2083 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
2084 return;
2085
2086 stack = (unsigned long *) sp;
2087 newsp = stack[0];
2088 ip = stack[STACK_FRAME_LR_SAVE];
2089 if (!firstframe || ip != lr) {
2090 printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2091#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2092 if ((ip == rth) && curr_frame >= 0) {
2093 ret_stack = ftrace_graph_get_ret_stack(current,
2094 curr_frame++);
2095 if (ret_stack)
2096 pr_cont(" (%pS)",
2097 (void *)ret_stack->ret);
2098 else
2099 curr_frame = -1;
2100 }
2101#endif
2102 if (firstframe)
2103 pr_cont(" (unreliable)");
2104 pr_cont("\n");
2105 }
2106 firstframe = 0;
2107
2108
2109
2110
2111
2112 if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
2113 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
2114 struct pt_regs *regs = (struct pt_regs *)
2115 (sp + STACK_FRAME_OVERHEAD);
2116 lr = regs->link;
2117 printk("--- interrupt: %lx at %pS\n LR = %pS\n",
2118 regs->trap, (void *)regs->nip, (void *)lr);
2119 firstframe = 1;
2120 }
2121
2122 sp = newsp;
2123 } while (count++ < kstack_depth_to_print);
2124}
2125
2126#ifdef CONFIG_PPC64
2127
2128void notrace __ppc64_runlatch_on(void)
2129{
2130 struct thread_info *ti = current_thread_info();
2131
2132 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
2133
2134
2135
2136
2137
2138 mtspr(SPRN_CTRLT, CTRL_RUNLATCH);
2139 } else {
2140 unsigned long ctrl;
2141
2142
2143
2144
2145
2146 ctrl = mfspr(SPRN_CTRLF);
2147 ctrl |= CTRL_RUNLATCH;
2148 mtspr(SPRN_CTRLT, ctrl);
2149 }
2150
2151 ti->local_flags |= _TLF_RUNLATCH;
2152}
2153
2154
2155void notrace __ppc64_runlatch_off(void)
2156{
2157 struct thread_info *ti = current_thread_info();
2158
2159 ti->local_flags &= ~_TLF_RUNLATCH;
2160
2161 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
2162 mtspr(SPRN_CTRLT, 0);
2163 } else {
2164 unsigned long ctrl;
2165
2166 ctrl = mfspr(SPRN_CTRLF);
2167 ctrl &= ~CTRL_RUNLATCH;
2168 mtspr(SPRN_CTRLT, ctrl);
2169 }
2170}
2171#endif
2172
2173unsigned long arch_align_stack(unsigned long sp)
2174{
2175 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2176 sp -= get_random_int() & ~PAGE_MASK;
2177 return sp & ~0xf;
2178}
2179
2180static inline unsigned long brk_rnd(void)
2181{
2182 unsigned long rnd = 0;
2183
2184
2185 if (is_32bit_task())
2186 rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT)));
2187 else
2188 rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT)));
2189
2190 return rnd << PAGE_SHIFT;
2191}
2192
2193unsigned long arch_randomize_brk(struct mm_struct *mm)
2194{
2195 unsigned long base = mm->brk;
2196 unsigned long ret;
2197
2198#ifdef CONFIG_PPC_BOOK3S_64
2199
2200
2201
2202
2203
2204
2205
2206
2207 if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2208 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2209#endif
2210
2211 ret = PAGE_ALIGN(base + brk_rnd());
2212
2213 if (ret < mm->brk)
2214 return mm->brk;
2215
2216 return ret;
2217}
2218
2219