1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/errno.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/mm.h>
21#include <linux/smp.h>
22#include <linux/stddef.h>
23#include <linux/unistd.h>
24#include <linux/ptrace.h>
25#include <linux/slab.h>
26#include <linux/user.h>
27#include <linux/elf.h>
28#include <linux/init.h>
29#include <linux/prctl.h>
30#include <linux/init_task.h>
31#include <linux/export.h>
32#include <linux/kallsyms.h>
33#include <linux/mqueue.h>
34#include <linux/hardirq.h>
35#include <linux/utsname.h>
36#include <linux/ftrace.h>
37#include <linux/kernel_stat.h>
38#include <linux/personality.h>
39#include <linux/random.h>
40#include <linux/hw_breakpoint.h>
41
42#include <asm/pgtable.h>
43#include <asm/uaccess.h>
44#include <asm/io.h>
45#include <asm/processor.h>
46#include <asm/mmu.h>
47#include <asm/prom.h>
48#include <asm/machdep.h>
49#include <asm/time.h>
50#include <asm/runlatch.h>
51#include <asm/syscalls.h>
52#include <asm/switch_to.h>
53#include <asm/tm.h>
54#include <asm/debug.h>
55#ifdef CONFIG_PPC64
56#include <asm/firmware.h>
57#endif
58#include <linux/kprobes.h>
59#include <linux/kdebug.h>
60
61
62#ifdef TM_DEBUG_SW
63#define TM_DEBUG(x...) printk(KERN_INFO x)
64#else
65#define TM_DEBUG(x...) do { } while(0)
66#endif
67
68extern unsigned long _get_SP(void);
69
70#ifndef CONFIG_SMP
71struct task_struct *last_task_used_math = NULL;
72struct task_struct *last_task_used_altivec = NULL;
73struct task_struct *last_task_used_vsx = NULL;
74struct task_struct *last_task_used_spe = NULL;
75#endif
76
77
78
79
80
81void flush_fp_to_thread(struct task_struct *tsk)
82{
83 if (tsk->thread.regs) {
84
85
86
87
88
89
90
91
92 preempt_disable();
93 if (tsk->thread.regs->msr & MSR_FP) {
94#ifdef CONFIG_SMP
95
96
97
98
99
100
101
102 BUG_ON(tsk != current);
103#endif
104 giveup_fpu(tsk);
105 }
106 preempt_enable();
107 }
108}
109EXPORT_SYMBOL_GPL(flush_fp_to_thread);
110
111void enable_kernel_fp(void)
112{
113 WARN_ON(preemptible());
114
115#ifdef CONFIG_SMP
116 if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
117 giveup_fpu(current);
118 else
119 giveup_fpu(NULL);
120#else
121 giveup_fpu(last_task_used_math);
122#endif
123}
124EXPORT_SYMBOL(enable_kernel_fp);
125
126#ifdef CONFIG_ALTIVEC
127void enable_kernel_altivec(void)
128{
129 WARN_ON(preemptible());
130
131#ifdef CONFIG_SMP
132 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
133 giveup_altivec(current);
134 else
135 giveup_altivec_notask();
136#else
137 giveup_altivec(last_task_used_altivec);
138#endif
139}
140EXPORT_SYMBOL(enable_kernel_altivec);
141
142
143
144
145
146void flush_altivec_to_thread(struct task_struct *tsk)
147{
148 if (tsk->thread.regs) {
149 preempt_disable();
150 if (tsk->thread.regs->msr & MSR_VEC) {
151#ifdef CONFIG_SMP
152 BUG_ON(tsk != current);
153#endif
154 giveup_altivec(tsk);
155 }
156 preempt_enable();
157 }
158}
159EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
160#endif
161
162#ifdef CONFIG_VSX
163#if 0
164
165void enable_kernel_vsx(void)
166{
167 WARN_ON(preemptible());
168
169#ifdef CONFIG_SMP
170 if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
171 giveup_vsx(current);
172 else
173 giveup_vsx(NULL);
174#else
175 giveup_vsx(last_task_used_vsx);
176#endif
177}
178EXPORT_SYMBOL(enable_kernel_vsx);
179#endif
180
181void giveup_vsx(struct task_struct *tsk)
182{
183 giveup_fpu(tsk);
184 giveup_altivec(tsk);
185 __giveup_vsx(tsk);
186}
187
188void flush_vsx_to_thread(struct task_struct *tsk)
189{
190 if (tsk->thread.regs) {
191 preempt_disable();
192 if (tsk->thread.regs->msr & MSR_VSX) {
193#ifdef CONFIG_SMP
194 BUG_ON(tsk != current);
195#endif
196 giveup_vsx(tsk);
197 }
198 preempt_enable();
199 }
200}
201EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
202#endif
203
204#ifdef CONFIG_SPE
205
206void enable_kernel_spe(void)
207{
208 WARN_ON(preemptible());
209
210#ifdef CONFIG_SMP
211 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
212 giveup_spe(current);
213 else
214 giveup_spe(NULL);
215#else
216 giveup_spe(last_task_used_spe);
217#endif
218}
219EXPORT_SYMBOL(enable_kernel_spe);
220
221void flush_spe_to_thread(struct task_struct *tsk)
222{
223 if (tsk->thread.regs) {
224 preempt_disable();
225 if (tsk->thread.regs->msr & MSR_SPE) {
226#ifdef CONFIG_SMP
227 BUG_ON(tsk != current);
228#endif
229 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
230 giveup_spe(tsk);
231 }
232 preempt_enable();
233 }
234}
235#endif
236
237#ifndef CONFIG_SMP
238
239
240
241
242void discard_lazy_cpu_state(void)
243{
244 preempt_disable();
245 if (last_task_used_math == current)
246 last_task_used_math = NULL;
247#ifdef CONFIG_ALTIVEC
248 if (last_task_used_altivec == current)
249 last_task_used_altivec = NULL;
250#endif
251#ifdef CONFIG_VSX
252 if (last_task_used_vsx == current)
253 last_task_used_vsx = NULL;
254#endif
255#ifdef CONFIG_SPE
256 if (last_task_used_spe == current)
257 last_task_used_spe = NULL;
258#endif
259 preempt_enable();
260}
261#endif
262
263#ifdef CONFIG_PPC_ADV_DEBUG_REGS
264void do_send_trap(struct pt_regs *regs, unsigned long address,
265 unsigned long error_code, int signal_code, int breakpt)
266{
267 siginfo_t info;
268
269 current->thread.trap_nr = signal_code;
270 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
271 11, SIGSEGV) == NOTIFY_STOP)
272 return;
273
274
275 info.si_signo = SIGTRAP;
276 info.si_errno = breakpt;
277 info.si_code = signal_code;
278 info.si_addr = (void __user *)address;
279 force_sig_info(SIGTRAP, &info, current);
280}
281#else
282void do_break (struct pt_regs *regs, unsigned long address,
283 unsigned long error_code)
284{
285 siginfo_t info;
286
287 current->thread.trap_nr = TRAP_HWBKPT;
288 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
289 11, SIGSEGV) == NOTIFY_STOP)
290 return;
291
292 if (debugger_break_match(regs))
293 return;
294
295
296 hw_breakpoint_disable();
297
298
299 info.si_signo = SIGTRAP;
300 info.si_errno = 0;
301 info.si_code = TRAP_HWBKPT;
302 info.si_addr = (void __user *)address;
303 force_sig_info(SIGTRAP, &info, current);
304}
305#endif
306
307static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk);
308
309#ifdef CONFIG_PPC_ADV_DEBUG_REGS
310
311
312
313static void set_debug_reg_defaults(struct thread_struct *thread)
314{
315 thread->iac1 = thread->iac2 = 0;
316#if CONFIG_PPC_ADV_DEBUG_IACS > 2
317 thread->iac3 = thread->iac4 = 0;
318#endif
319 thread->dac1 = thread->dac2 = 0;
320#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
321 thread->dvc1 = thread->dvc2 = 0;
322#endif
323 thread->dbcr0 = 0;
324#ifdef CONFIG_BOOKE
325
326
327
328 thread->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | \
329 DBCR1_IAC3US | DBCR1_IAC4US;
330
331
332
333
334 thread->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
335#else
336 thread->dbcr1 = 0;
337#endif
338}
339
340static void prime_debug_regs(struct thread_struct *thread)
341{
342
343
344
345
346
347 mtmsr(mfmsr() & ~MSR_DE);
348
349 mtspr(SPRN_IAC1, thread->iac1);
350 mtspr(SPRN_IAC2, thread->iac2);
351#if CONFIG_PPC_ADV_DEBUG_IACS > 2
352 mtspr(SPRN_IAC3, thread->iac3);
353 mtspr(SPRN_IAC4, thread->iac4);
354#endif
355 mtspr(SPRN_DAC1, thread->dac1);
356 mtspr(SPRN_DAC2, thread->dac2);
357#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
358 mtspr(SPRN_DVC1, thread->dvc1);
359 mtspr(SPRN_DVC2, thread->dvc2);
360#endif
361 mtspr(SPRN_DBCR0, thread->dbcr0);
362 mtspr(SPRN_DBCR1, thread->dbcr1);
363#ifdef CONFIG_BOOKE
364 mtspr(SPRN_DBCR2, thread->dbcr2);
365#endif
366}
367
368
369
370
371
372static void switch_booke_debug_regs(struct thread_struct *new_thread)
373{
374 if ((current->thread.dbcr0 & DBCR0_IDM)
375 || (new_thread->dbcr0 & DBCR0_IDM))
376 prime_debug_regs(new_thread);
377}
378#else
379#ifndef CONFIG_HAVE_HW_BREAKPOINT
380static void set_debug_reg_defaults(struct thread_struct *thread)
381{
382 thread->hw_brk.address = 0;
383 thread->hw_brk.type = 0;
384 set_breakpoint(&thread->hw_brk);
385}
386#endif
387#endif
388
389#ifdef CONFIG_PPC_ADV_DEBUG_REGS
390static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
391{
392 mtspr(SPRN_DAC1, dabr);
393#ifdef CONFIG_PPC_47x
394 isync();
395#endif
396 return 0;
397}
398#elif defined(CONFIG_PPC_BOOK3S)
399static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
400{
401 mtspr(SPRN_DABR, dabr);
402 if (cpu_has_feature(CPU_FTR_DABRX))
403 mtspr(SPRN_DABRX, dabrx);
404 return 0;
405}
406#else
407static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
408{
409 return -EINVAL;
410}
411#endif
412
413static inline int set_dabr(struct arch_hw_breakpoint *brk)
414{
415 unsigned long dabr, dabrx;
416
417 dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
418 dabrx = ((brk->type >> 3) & 0x7);
419
420 if (ppc_md.set_dabr)
421 return ppc_md.set_dabr(dabr, dabrx);
422
423 return __set_dabr(dabr, dabrx);
424}
425
426static inline int set_dawr(struct arch_hw_breakpoint *brk)
427{
428 unsigned long dawr, dawrx, mrd;
429
430 dawr = brk->address;
431
432 dawrx = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)) \
433 << (63 - 58);
434 dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) \
435 << (63 - 59);
436 dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) \
437 >> 3;
438
439
440
441
442
443
444 mrd = ((brk->len + 7) >> 3) - 1;
445 dawrx |= (mrd & 0x3f) << (63 - 53);
446
447 if (ppc_md.set_dawr)
448 return ppc_md.set_dawr(dawr, dawrx);
449 mtspr(SPRN_DAWR, dawr);
450 mtspr(SPRN_DAWRX, dawrx);
451 return 0;
452}
453
454int set_breakpoint(struct arch_hw_breakpoint *brk)
455{
456 __get_cpu_var(current_brk) = *brk;
457
458 if (cpu_has_feature(CPU_FTR_DAWR))
459 return set_dawr(brk);
460
461 return set_dabr(brk);
462}
463
464#ifdef CONFIG_PPC64
465DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
466#endif
467
468static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
469 struct arch_hw_breakpoint *b)
470{
471 if (a->address != b->address)
472 return false;
473 if (a->type != b->type)
474 return false;
475 if (a->len != b->len)
476 return false;
477 return true;
478}
479#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
480static inline void tm_reclaim_task(struct task_struct *tsk)
481{
482
483
484
485
486
487
488
489
490
491
492 struct thread_struct *thr = &tsk->thread;
493
494 if (!thr->regs)
495 return;
496
497 if (!MSR_TM_ACTIVE(thr->regs->msr))
498 goto out_and_saveregs;
499
500
501
502
503
504 thr->tm_orig_msr = thr->regs->msr;
505
506 TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
507 "ccr=%lx, msr=%lx, trap=%lx)\n",
508 tsk->pid, thr->regs->nip,
509 thr->regs->ccr, thr->regs->msr,
510 thr->regs->trap);
511
512 tm_reclaim(thr, thr->regs->msr, TM_CAUSE_RESCHED);
513
514 TM_DEBUG("--- tm_reclaim on pid %d complete\n",
515 tsk->pid);
516
517out_and_saveregs:
518
519
520
521
522
523 tm_save_sprs(thr);
524}
525
526static inline void tm_recheckpoint_new_task(struct task_struct *new)
527{
528 unsigned long msr;
529
530 if (!cpu_has_feature(CPU_FTR_TM))
531 return;
532
533
534
535
536
537
538
539
540
541 if (!new->thread.regs)
542 return;
543
544
545
546
547 tm_restore_sprs(&new->thread);
548
549 if (!MSR_TM_ACTIVE(new->thread.regs->msr))
550 return;
551 msr = new->thread.tm_orig_msr;
552
553 TM_DEBUG("*** tm_recheckpoint of pid %d "
554 "(new->msr 0x%lx, new->origmsr 0x%lx)\n",
555 new->pid, new->thread.regs->msr, msr);
556
557
558 tm_recheckpoint(&new->thread, msr);
559
560
561 if (msr & MSR_FP) {
562 do_load_up_transact_fpu(&new->thread);
563 new->thread.regs->msr |=
564 (MSR_FP | new->thread.fpexc_mode);
565 }
566#ifdef CONFIG_ALTIVEC
567 if (msr & MSR_VEC) {
568 do_load_up_transact_altivec(&new->thread);
569 new->thread.regs->msr |= MSR_VEC;
570 }
571#endif
572
573 if (msr & MSR_VSX)
574 new->thread.regs->msr |= MSR_VSX;
575
576 TM_DEBUG("*** tm_recheckpoint of pid %d complete "
577 "(kernel msr 0x%lx)\n",
578 new->pid, mfmsr());
579}
580
581static inline void __switch_to_tm(struct task_struct *prev)
582{
583 if (cpu_has_feature(CPU_FTR_TM)) {
584 tm_enable();
585 tm_reclaim_task(prev);
586 }
587}
588#else
589#define tm_recheckpoint_new_task(new)
590#define __switch_to_tm(prev)
591#endif
592
593struct task_struct *__switch_to(struct task_struct *prev,
594 struct task_struct *new)
595{
596 struct thread_struct *new_thread, *old_thread;
597 unsigned long flags;
598 struct task_struct *last;
599#ifdef CONFIG_PPC_BOOK3S_64
600 struct ppc64_tlb_batch *batch;
601#endif
602
603
604
605
606
607
608
609
610
611 save_tar(&prev->thread);
612
613 __switch_to_tm(prev);
614
615#ifdef CONFIG_SMP
616
617
618
619
620
621
622
623
624
625 if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
626 giveup_fpu(prev);
627#ifdef CONFIG_ALTIVEC
628
629
630
631
632
633
634
635
636
637
638
639 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
640 giveup_altivec(prev);
641#endif
642#ifdef CONFIG_VSX
643 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
644
645 __giveup_vsx(prev);
646#endif
647#ifdef CONFIG_SPE
648
649
650
651
652
653
654
655 if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
656 giveup_spe(prev);
657#endif
658
659#else
660#ifdef CONFIG_ALTIVEC
661
662
663
664 if (new->thread.regs && last_task_used_altivec == new)
665 new->thread.regs->msr |= MSR_VEC;
666#endif
667#ifdef CONFIG_VSX
668 if (new->thread.regs && last_task_used_vsx == new)
669 new->thread.regs->msr |= MSR_VSX;
670#endif
671#ifdef CONFIG_SPE
672
673
674
675 if (new->thread.regs && last_task_used_spe == new)
676 new->thread.regs->msr |= MSR_SPE;
677#endif
678
679#endif
680
681#ifdef CONFIG_PPC_ADV_DEBUG_REGS
682 switch_booke_debug_regs(&new->thread);
683#else
684
685
686
687
688#ifndef CONFIG_HAVE_HW_BREAKPOINT
689 if (unlikely(hw_brk_match(&__get_cpu_var(current_brk), &new->thread.hw_brk)))
690 set_breakpoint(&new->thread.hw_brk);
691#endif
692#endif
693
694
695 new_thread = &new->thread;
696 old_thread = ¤t->thread;
697
698#ifdef CONFIG_PPC64
699
700
701
702 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
703 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
704 long unsigned start_tb, current_tb;
705 start_tb = old_thread->start_tb;
706 cu->current_tb = current_tb = mfspr(SPRN_PURR);
707 old_thread->accum_tb += (current_tb - start_tb);
708 new_thread->start_tb = current_tb;
709 }
710#endif
711
712#ifdef CONFIG_PPC_BOOK3S_64
713 batch = &__get_cpu_var(ppc64_tlb_batch);
714 if (batch->active) {
715 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
716 if (batch->index)
717 __flush_tlb_pending(batch);
718 batch->active = 0;
719 }
720#endif
721
722 local_irq_save(flags);
723
724
725
726
727
728
729 hard_irq_disable();
730
731 tm_recheckpoint_new_task(new);
732
733 last = _switch(old_thread, new_thread);
734
735#ifdef CONFIG_PPC_BOOK3S_64
736 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
737 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
738 batch = &__get_cpu_var(ppc64_tlb_batch);
739 batch->active = 1;
740 }
741#endif
742
743 local_irq_restore(flags);
744
745 return last;
746}
747
748static int instructions_to_print = 16;
749
750static void show_instructions(struct pt_regs *regs)
751{
752 int i;
753 unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
754 sizeof(int));
755
756 printk("Instruction dump:");
757
758 for (i = 0; i < instructions_to_print; i++) {
759 int instr;
760
761 if (!(i % 8))
762 printk("\n");
763
764#if !defined(CONFIG_BOOKE)
765
766
767
768 if (!(regs->msr & MSR_IR))
769 pc = (unsigned long)phys_to_virt(pc);
770#endif
771
772
773
774
775
776 if (!__kernel_text_address(pc) ||
777 __get_user(instr, (unsigned int __user *)pc)) {
778 printk(KERN_CONT "XXXXXXXX ");
779 } else {
780 if (regs->nip == pc)
781 printk(KERN_CONT "<%08x> ", instr);
782 else
783 printk(KERN_CONT "%08x ", instr);
784 }
785
786 pc += sizeof(int);
787 }
788
789 printk("\n");
790}
791
792static struct regbit {
793 unsigned long bit;
794 const char *name;
795} msr_bits[] = {
796#if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
797 {MSR_SF, "SF"},
798 {MSR_HV, "HV"},
799#endif
800 {MSR_VEC, "VEC"},
801 {MSR_VSX, "VSX"},
802#ifdef CONFIG_BOOKE
803 {MSR_CE, "CE"},
804#endif
805 {MSR_EE, "EE"},
806 {MSR_PR, "PR"},
807 {MSR_FP, "FP"},
808 {MSR_ME, "ME"},
809#ifdef CONFIG_BOOKE
810 {MSR_DE, "DE"},
811#else
812 {MSR_SE, "SE"},
813 {MSR_BE, "BE"},
814#endif
815 {MSR_IR, "IR"},
816 {MSR_DR, "DR"},
817 {MSR_PMM, "PMM"},
818#ifndef CONFIG_BOOKE
819 {MSR_RI, "RI"},
820 {MSR_LE, "LE"},
821#endif
822 {0, NULL}
823};
824
825static void printbits(unsigned long val, struct regbit *bits)
826{
827 const char *sep = "";
828
829 printk("<");
830 for (; bits->bit; ++bits)
831 if (val & bits->bit) {
832 printk("%s%s", sep, bits->name);
833 sep = ",";
834 }
835 printk(">");
836}
837
838#ifdef CONFIG_PPC64
839#define REG "%016lx"
840#define REGS_PER_LINE 4
841#define LAST_VOLATILE 13
842#else
843#define REG "%08lx"
844#define REGS_PER_LINE 8
845#define LAST_VOLATILE 12
846#endif
847
848void show_regs(struct pt_regs * regs)
849{
850 int i, trap;
851
852 show_regs_print_info(KERN_DEFAULT);
853
854 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
855 regs->nip, regs->link, regs->ctr);
856 printk("REGS: %p TRAP: %04lx %s (%s)\n",
857 regs, regs->trap, print_tainted(), init_utsname()->release);
858 printk("MSR: "REG" ", regs->msr);
859 printbits(regs->msr, msr_bits);
860 printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
861#ifdef CONFIG_PPC64
862 printk("SOFTE: %ld\n", regs->softe);
863#endif
864 trap = TRAP(regs);
865 if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
866 printk("CFAR: "REG"\n", regs->orig_gpr3);
867 if (trap == 0x300 || trap == 0x600)
868#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
869 printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
870#else
871 printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr);
872#endif
873
874 for (i = 0; i < 32; i++) {
875 if ((i % REGS_PER_LINE) == 0)
876 printk("\nGPR%02d: ", i);
877 printk(REG " ", regs->gpr[i]);
878 if (i == LAST_VOLATILE && !FULL_REGS(regs))
879 break;
880 }
881 printk("\n");
882#ifdef CONFIG_KALLSYMS
883
884
885
886
887 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
888 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
889#endif
890#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
891 printk("PACATMSCRATCH [%llx]\n", get_paca()->tm_scratch);
892#endif
893 show_stack(current, (unsigned long *) regs->gpr[1]);
894 if (!user_mode(regs))
895 show_instructions(regs);
896}
897
898void exit_thread(void)
899{
900 discard_lazy_cpu_state();
901}
902
903void flush_thread(void)
904{
905 discard_lazy_cpu_state();
906
907#ifdef CONFIG_HAVE_HW_BREAKPOINT
908 flush_ptrace_hw_breakpoint(current);
909#else
910 set_debug_reg_defaults(¤t->thread);
911#endif
912}
913
914void
915release_thread(struct task_struct *t)
916{
917}
918
919
920
921
922
923int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
924{
925 flush_fp_to_thread(src);
926 flush_altivec_to_thread(src);
927 flush_vsx_to_thread(src);
928 flush_spe_to_thread(src);
929
930 *dst = *src;
931
932 clear_task_ebb(dst);
933
934 return 0;
935}
936
937
938
939
940extern unsigned long dscr_default;
941
942int copy_thread(unsigned long clone_flags, unsigned long usp,
943 unsigned long arg, struct task_struct *p)
944{
945 struct pt_regs *childregs, *kregs;
946 extern void ret_from_fork(void);
947 extern void ret_from_kernel_thread(void);
948 void (*f)(void);
949 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
950
951
952 sp -= sizeof(struct pt_regs);
953 childregs = (struct pt_regs *) sp;
954 if (unlikely(p->flags & PF_KTHREAD)) {
955 struct thread_info *ti = (void *)task_stack_page(p);
956 memset(childregs, 0, sizeof(struct pt_regs));
957 childregs->gpr[1] = sp + sizeof(struct pt_regs);
958 childregs->gpr[14] = usp;
959#ifdef CONFIG_PPC64
960 clear_tsk_thread_flag(p, TIF_32BIT);
961 childregs->softe = 1;
962#endif
963 childregs->gpr[15] = arg;
964 p->thread.regs = NULL;
965 ti->flags |= _TIF_RESTOREALL;
966 f = ret_from_kernel_thread;
967 } else {
968 struct pt_regs *regs = current_pt_regs();
969 CHECK_FULL_REGS(regs);
970 *childregs = *regs;
971 if (usp)
972 childregs->gpr[1] = usp;
973 p->thread.regs = childregs;
974 childregs->gpr[3] = 0;
975 if (clone_flags & CLONE_SETTLS) {
976#ifdef CONFIG_PPC64
977 if (!is_32bit_task())
978 childregs->gpr[13] = childregs->gpr[6];
979 else
980#endif
981 childregs->gpr[2] = childregs->gpr[6];
982 }
983
984 f = ret_from_fork;
985 }
986 sp -= STACK_FRAME_OVERHEAD;
987
988
989
990
991
992
993
994
995
996 ((unsigned long *)sp)[0] = 0;
997 sp -= sizeof(struct pt_regs);
998 kregs = (struct pt_regs *) sp;
999 sp -= STACK_FRAME_OVERHEAD;
1000 p->thread.ksp = sp;
1001 p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
1002 _ALIGN_UP(sizeof(struct thread_info), 16);
1003
1004#ifdef CONFIG_HAVE_HW_BREAKPOINT
1005 p->thread.ptrace_bps[0] = NULL;
1006#endif
1007
1008#ifdef CONFIG_PPC_STD_MMU_64
1009 if (mmu_has_feature(MMU_FTR_SLB)) {
1010 unsigned long sp_vsid;
1011 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
1012
1013 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1014 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
1015 << SLB_VSID_SHIFT_1T;
1016 else
1017 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
1018 << SLB_VSID_SHIFT;
1019 sp_vsid |= SLB_VSID_KERNEL | llp;
1020 p->thread.ksp_vsid = sp_vsid;
1021 }
1022#endif
1023#ifdef CONFIG_PPC64
1024 if (cpu_has_feature(CPU_FTR_DSCR)) {
1025 p->thread.dscr_inherit = current->thread.dscr_inherit;
1026 p->thread.dscr = current->thread.dscr;
1027 }
1028 if (cpu_has_feature(CPU_FTR_HAS_PPR))
1029 p->thread.ppr = INIT_PPR;
1030#endif
1031
1032
1033
1034
1035
1036
1037#ifdef CONFIG_PPC64
1038 kregs->nip = *((unsigned long *)f);
1039#else
1040 kregs->nip = (unsigned long)f;
1041#endif
1042 return 0;
1043}
1044
1045
1046
1047
1048void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1049{
1050#ifdef CONFIG_PPC64
1051 unsigned long load_addr = regs->gpr[2];
1052#endif
1053
1054
1055
1056
1057
1058 if (!current->thread.regs) {
1059 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
1060 current->thread.regs = regs - 1;
1061 }
1062
1063 memset(regs->gpr, 0, sizeof(regs->gpr));
1064 regs->ctr = 0;
1065 regs->link = 0;
1066 regs->xer = 0;
1067 regs->ccr = 0;
1068 regs->gpr[1] = sp;
1069
1070
1071
1072
1073
1074
1075 regs->trap &= ~1UL;
1076
1077#ifdef CONFIG_PPC32
1078 regs->mq = 0;
1079 regs->nip = start;
1080 regs->msr = MSR_USER;
1081#else
1082 if (!is_32bit_task()) {
1083 unsigned long entry, toc;
1084
1085
1086
1087
1088
1089
1090 __get_user(entry, (unsigned long __user *)start);
1091 __get_user(toc, (unsigned long __user *)start+1);
1092
1093
1094
1095
1096 if (load_addr != 0) {
1097 entry += load_addr;
1098 toc += load_addr;
1099 }
1100 regs->nip = entry;
1101 regs->gpr[2] = toc;
1102 regs->msr = MSR_USER64;
1103 } else {
1104 regs->nip = start;
1105 regs->gpr[2] = 0;
1106 regs->msr = MSR_USER32;
1107 }
1108#endif
1109 discard_lazy_cpu_state();
1110#ifdef CONFIG_VSX
1111 current->thread.used_vsr = 0;
1112#endif
1113 memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
1114 current->thread.fpscr.val = 0;
1115#ifdef CONFIG_ALTIVEC
1116 memset(current->thread.vr, 0, sizeof(current->thread.vr));
1117 memset(¤t->thread.vscr, 0, sizeof(current->thread.vscr));
1118 current->thread.vscr.u[3] = 0x00010000;
1119 current->thread.vrsave = 0;
1120 current->thread.used_vr = 0;
1121#endif
1122#ifdef CONFIG_SPE
1123 memset(current->thread.evr, 0, sizeof(current->thread.evr));
1124 current->thread.acc = 0;
1125 current->thread.spefscr = 0;
1126 current->thread.used_spe = 0;
1127#endif
1128#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1129 if (cpu_has_feature(CPU_FTR_TM))
1130 regs->msr |= MSR_TM;
1131 current->thread.tm_tfhar = 0;
1132 current->thread.tm_texasr = 0;
1133 current->thread.tm_tfiar = 0;
1134#endif
1135}
1136
1137#define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
1138 | PR_FP_EXC_RES | PR_FP_EXC_INV)
1139
1140int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
1141{
1142 struct pt_regs *regs = tsk->thread.regs;
1143
1144
1145
1146
1147
1148 if (val & PR_FP_EXC_SW_ENABLE) {
1149#ifdef CONFIG_SPE
1150 if (cpu_has_feature(CPU_FTR_SPE)) {
1151 tsk->thread.fpexc_mode = val &
1152 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
1153 return 0;
1154 } else {
1155 return -EINVAL;
1156 }
1157#else
1158 return -EINVAL;
1159#endif
1160 }
1161
1162
1163
1164
1165
1166
1167 if (val > PR_FP_EXC_PRECISE)
1168 return -EINVAL;
1169 tsk->thread.fpexc_mode = __pack_fe01(val);
1170 if (regs != NULL && (regs->msr & MSR_FP) != 0)
1171 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
1172 | tsk->thread.fpexc_mode;
1173 return 0;
1174}
1175
1176int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
1177{
1178 unsigned int val;
1179
1180 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
1181#ifdef CONFIG_SPE
1182 if (cpu_has_feature(CPU_FTR_SPE))
1183 val = tsk->thread.fpexc_mode;
1184 else
1185 return -EINVAL;
1186#else
1187 return -EINVAL;
1188#endif
1189 else
1190 val = __unpack_fe01(tsk->thread.fpexc_mode);
1191 return put_user(val, (unsigned int __user *) adr);
1192}
1193
1194int set_endian(struct task_struct *tsk, unsigned int val)
1195{
1196 struct pt_regs *regs = tsk->thread.regs;
1197
1198 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
1199 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
1200 return -EINVAL;
1201
1202 if (regs == NULL)
1203 return -EINVAL;
1204
1205 if (val == PR_ENDIAN_BIG)
1206 regs->msr &= ~MSR_LE;
1207 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
1208 regs->msr |= MSR_LE;
1209 else
1210 return -EINVAL;
1211
1212 return 0;
1213}
1214
1215int get_endian(struct task_struct *tsk, unsigned long adr)
1216{
1217 struct pt_regs *regs = tsk->thread.regs;
1218 unsigned int val;
1219
1220 if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
1221 !cpu_has_feature(CPU_FTR_REAL_LE))
1222 return -EINVAL;
1223
1224 if (regs == NULL)
1225 return -EINVAL;
1226
1227 if (regs->msr & MSR_LE) {
1228 if (cpu_has_feature(CPU_FTR_REAL_LE))
1229 val = PR_ENDIAN_LITTLE;
1230 else
1231 val = PR_ENDIAN_PPC_LITTLE;
1232 } else
1233 val = PR_ENDIAN_BIG;
1234
1235 return put_user(val, (unsigned int __user *)adr);
1236}
1237
1238int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
1239{
1240 tsk->thread.align_ctl = val;
1241 return 0;
1242}
1243
1244int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
1245{
1246 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
1247}
1248
1249static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
1250 unsigned long nbytes)
1251{
1252 unsigned long stack_page;
1253 unsigned long cpu = task_cpu(p);
1254
1255
1256
1257
1258
1259 if (cpu < NR_CPUS && cpu_possible(cpu)) {
1260 stack_page = (unsigned long) hardirq_ctx[cpu];
1261 if (sp >= stack_page + sizeof(struct thread_struct)
1262 && sp <= stack_page + THREAD_SIZE - nbytes)
1263 return 1;
1264
1265 stack_page = (unsigned long) softirq_ctx[cpu];
1266 if (sp >= stack_page + sizeof(struct thread_struct)
1267 && sp <= stack_page + THREAD_SIZE - nbytes)
1268 return 1;
1269 }
1270 return 0;
1271}
1272
1273int validate_sp(unsigned long sp, struct task_struct *p,
1274 unsigned long nbytes)
1275{
1276 unsigned long stack_page = (unsigned long)task_stack_page(p);
1277
1278 if (sp >= stack_page + sizeof(struct thread_struct)
1279 && sp <= stack_page + THREAD_SIZE - nbytes)
1280 return 1;
1281
1282 return valid_irq_stack(sp, p, nbytes);
1283}
1284
1285EXPORT_SYMBOL(validate_sp);
1286
1287unsigned long get_wchan(struct task_struct *p)
1288{
1289 unsigned long ip, sp;
1290 int count = 0;
1291
1292 if (!p || p == current || p->state == TASK_RUNNING)
1293 return 0;
1294
1295 sp = p->thread.ksp;
1296 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1297 return 0;
1298
1299 do {
1300 sp = *(unsigned long *)sp;
1301 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1302 return 0;
1303 if (count > 0) {
1304 ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
1305 if (!in_sched_functions(ip))
1306 return ip;
1307 }
1308 } while (count++ < 16);
1309 return 0;
1310}
1311
1312static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
1313
1314void show_stack(struct task_struct *tsk, unsigned long *stack)
1315{
1316 unsigned long sp, ip, lr, newsp;
1317 int count = 0;
1318 int firstframe = 1;
1319#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1320 int curr_frame = current->curr_ret_stack;
1321 extern void return_to_handler(void);
1322 unsigned long rth = (unsigned long)return_to_handler;
1323 unsigned long mrth = -1;
1324#ifdef CONFIG_PPC64
1325 extern void mod_return_to_handler(void);
1326 rth = *(unsigned long *)rth;
1327 mrth = (unsigned long)mod_return_to_handler;
1328 mrth = *(unsigned long *)mrth;
1329#endif
1330#endif
1331
1332 sp = (unsigned long) stack;
1333 if (tsk == NULL)
1334 tsk = current;
1335 if (sp == 0) {
1336 if (tsk == current)
1337 asm("mr %0,1" : "=r" (sp));
1338 else
1339 sp = tsk->thread.ksp;
1340 }
1341
1342 lr = 0;
1343 printk("Call Trace:\n");
1344 do {
1345 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
1346 return;
1347
1348 stack = (unsigned long *) sp;
1349 newsp = stack[0];
1350 ip = stack[STACK_FRAME_LR_SAVE];
1351 if (!firstframe || ip != lr) {
1352 printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
1353#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1354 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
1355 printk(" (%pS)",
1356 (void *)current->ret_stack[curr_frame].ret);
1357 curr_frame--;
1358 }
1359#endif
1360 if (firstframe)
1361 printk(" (unreliable)");
1362 printk("\n");
1363 }
1364 firstframe = 0;
1365
1366
1367
1368
1369
1370 if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
1371 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
1372 struct pt_regs *regs = (struct pt_regs *)
1373 (sp + STACK_FRAME_OVERHEAD);
1374 lr = regs->link;
1375 printk("--- Exception: %lx at %pS\n LR = %pS\n",
1376 regs->trap, (void *)regs->nip, (void *)lr);
1377 firstframe = 1;
1378 }
1379
1380 sp = newsp;
1381 } while (count++ < kstack_depth_to_print);
1382}
1383
1384#ifdef CONFIG_PPC64
1385
1386void notrace __ppc64_runlatch_on(void)
1387{
1388 struct thread_info *ti = current_thread_info();
1389 unsigned long ctrl;
1390
1391 ctrl = mfspr(SPRN_CTRLF);
1392 ctrl |= CTRL_RUNLATCH;
1393 mtspr(SPRN_CTRLT, ctrl);
1394
1395 ti->local_flags |= _TLF_RUNLATCH;
1396}
1397
1398
1399void notrace __ppc64_runlatch_off(void)
1400{
1401 struct thread_info *ti = current_thread_info();
1402 unsigned long ctrl;
1403
1404 ti->local_flags &= ~_TLF_RUNLATCH;
1405
1406 ctrl = mfspr(SPRN_CTRLF);
1407 ctrl &= ~CTRL_RUNLATCH;
1408 mtspr(SPRN_CTRLT, ctrl);
1409}
1410#endif
1411
1412unsigned long arch_align_stack(unsigned long sp)
1413{
1414 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1415 sp -= get_random_int() & ~PAGE_MASK;
1416 return sp & ~0xf;
1417}
1418
1419static inline unsigned long brk_rnd(void)
1420{
1421 unsigned long rnd = 0;
1422
1423
1424 if (is_32bit_task())
1425 rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
1426 else
1427 rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
1428
1429 return rnd << PAGE_SHIFT;
1430}
1431
1432unsigned long arch_randomize_brk(struct mm_struct *mm)
1433{
1434 unsigned long base = mm->brk;
1435 unsigned long ret;
1436
1437#ifdef CONFIG_PPC_STD_MMU_64
1438
1439
1440
1441
1442
1443
1444
1445 if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
1446 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
1447#endif
1448
1449 ret = PAGE_ALIGN(base + brk_rnd());
1450
1451 if (ret < mm->brk)
1452 return mm->brk;
1453
1454 return ret;
1455}
1456
1457unsigned long randomize_et_dyn(unsigned long base)
1458{
1459 unsigned long ret = PAGE_ALIGN(base + brk_rnd());
1460
1461 if (ret < base)
1462 return base;
1463
1464 return ret;
1465}
1466