1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/errno.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/mm.h>
21#include <linux/smp.h>
22#include <linux/stddef.h>
23#include <linux/unistd.h>
24#include <linux/ptrace.h>
25#include <linux/slab.h>
26#include <linux/user.h>
27#include <linux/elf.h>
28#include <linux/init.h>
29#include <linux/prctl.h>
30#include <linux/init_task.h>
31#include <linux/export.h>
32#include <linux/kallsyms.h>
33#include <linux/mqueue.h>
34#include <linux/hardirq.h>
35#include <linux/utsname.h>
36#include <linux/ftrace.h>
37#include <linux/kernel_stat.h>
38#include <linux/personality.h>
39#include <linux/random.h>
40#include <linux/hw_breakpoint.h>
41
42#include <asm/pgtable.h>
43#include <asm/uaccess.h>
44#include <asm/io.h>
45#include <asm/processor.h>
46#include <asm/mmu.h>
47#include <asm/prom.h>
48#include <asm/machdep.h>
49#include <asm/time.h>
50#include <asm/runlatch.h>
51#include <asm/syscalls.h>
52#include <asm/switch_to.h>
53#include <asm/debug.h>
54#ifdef CONFIG_PPC64
55#include <asm/firmware.h>
56#endif
57#include <linux/kprobes.h>
58#include <linux/kdebug.h>
59
60extern unsigned long _get_SP(void);
61
62#ifndef CONFIG_SMP
63struct task_struct *last_task_used_math = NULL;
64struct task_struct *last_task_used_altivec = NULL;
65struct task_struct *last_task_used_vsx = NULL;
66struct task_struct *last_task_used_spe = NULL;
67#endif
68
69
70
71
72
73void flush_fp_to_thread(struct task_struct *tsk)
74{
75 if (tsk->thread.regs) {
76
77
78
79
80
81
82
83
84 preempt_disable();
85 if (tsk->thread.regs->msr & MSR_FP) {
86#ifdef CONFIG_SMP
87
88
89
90
91
92
93
94 BUG_ON(tsk != current);
95#endif
96 giveup_fpu(tsk);
97 }
98 preempt_enable();
99 }
100}
101EXPORT_SYMBOL_GPL(flush_fp_to_thread);
102
103void enable_kernel_fp(void)
104{
105 WARN_ON(preemptible());
106
107#ifdef CONFIG_SMP
108 if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
109 giveup_fpu(current);
110 else
111 giveup_fpu(NULL);
112#else
113 giveup_fpu(last_task_used_math);
114#endif
115}
116EXPORT_SYMBOL(enable_kernel_fp);
117
118#ifdef CONFIG_ALTIVEC
119void enable_kernel_altivec(void)
120{
121 WARN_ON(preemptible());
122
123#ifdef CONFIG_SMP
124 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
125 giveup_altivec(current);
126 else
127 giveup_altivec_notask();
128#else
129 giveup_altivec(last_task_used_altivec);
130#endif
131}
132EXPORT_SYMBOL(enable_kernel_altivec);
133
134
135
136
137
138void flush_altivec_to_thread(struct task_struct *tsk)
139{
140 if (tsk->thread.regs) {
141 preempt_disable();
142 if (tsk->thread.regs->msr & MSR_VEC) {
143#ifdef CONFIG_SMP
144 BUG_ON(tsk != current);
145#endif
146 giveup_altivec(tsk);
147 }
148 preempt_enable();
149 }
150}
151EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
152#endif
153
154#ifdef CONFIG_VSX
155#if 0
156
157void enable_kernel_vsx(void)
158{
159 WARN_ON(preemptible());
160
161#ifdef CONFIG_SMP
162 if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
163 giveup_vsx(current);
164 else
165 giveup_vsx(NULL);
166#else
167 giveup_vsx(last_task_used_vsx);
168#endif
169}
170EXPORT_SYMBOL(enable_kernel_vsx);
171#endif
172
173void giveup_vsx(struct task_struct *tsk)
174{
175 giveup_fpu(tsk);
176 giveup_altivec(tsk);
177 __giveup_vsx(tsk);
178}
179
180void flush_vsx_to_thread(struct task_struct *tsk)
181{
182 if (tsk->thread.regs) {
183 preempt_disable();
184 if (tsk->thread.regs->msr & MSR_VSX) {
185#ifdef CONFIG_SMP
186 BUG_ON(tsk != current);
187#endif
188 giveup_vsx(tsk);
189 }
190 preempt_enable();
191 }
192}
193EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
194#endif
195
196#ifdef CONFIG_SPE
197
198void enable_kernel_spe(void)
199{
200 WARN_ON(preemptible());
201
202#ifdef CONFIG_SMP
203 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
204 giveup_spe(current);
205 else
206 giveup_spe(NULL);
207#else
208 giveup_spe(last_task_used_spe);
209#endif
210}
211EXPORT_SYMBOL(enable_kernel_spe);
212
213void flush_spe_to_thread(struct task_struct *tsk)
214{
215 if (tsk->thread.regs) {
216 preempt_disable();
217 if (tsk->thread.regs->msr & MSR_SPE) {
218#ifdef CONFIG_SMP
219 BUG_ON(tsk != current);
220#endif
221 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
222 giveup_spe(tsk);
223 }
224 preempt_enable();
225 }
226}
227#endif
228
229#ifndef CONFIG_SMP
230
231
232
233
234void discard_lazy_cpu_state(void)
235{
236 preempt_disable();
237 if (last_task_used_math == current)
238 last_task_used_math = NULL;
239#ifdef CONFIG_ALTIVEC
240 if (last_task_used_altivec == current)
241 last_task_used_altivec = NULL;
242#endif
243#ifdef CONFIG_VSX
244 if (last_task_used_vsx == current)
245 last_task_used_vsx = NULL;
246#endif
247#ifdef CONFIG_SPE
248 if (last_task_used_spe == current)
249 last_task_used_spe = NULL;
250#endif
251 preempt_enable();
252}
253#endif
254
255#ifdef CONFIG_PPC_ADV_DEBUG_REGS
256void do_send_trap(struct pt_regs *regs, unsigned long address,
257 unsigned long error_code, int signal_code, int breakpt)
258{
259 siginfo_t info;
260
261 current->thread.trap_nr = signal_code;
262 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
263 11, SIGSEGV) == NOTIFY_STOP)
264 return;
265
266
267 info.si_signo = SIGTRAP;
268 info.si_errno = breakpt;
269 info.si_code = signal_code;
270 info.si_addr = (void __user *)address;
271 force_sig_info(SIGTRAP, &info, current);
272}
273#else
274void do_dabr(struct pt_regs *regs, unsigned long address,
275 unsigned long error_code)
276{
277 siginfo_t info;
278
279 current->thread.trap_nr = TRAP_HWBKPT;
280 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
281 11, SIGSEGV) == NOTIFY_STOP)
282 return;
283
284 if (debugger_dabr_match(regs))
285 return;
286
287
288 set_dabr(0, 0);
289
290
291 info.si_signo = SIGTRAP;
292 info.si_errno = 0;
293 info.si_code = TRAP_HWBKPT;
294 info.si_addr = (void __user *)address;
295 force_sig_info(SIGTRAP, &info, current);
296}
297#endif
298
299static DEFINE_PER_CPU(unsigned long, current_dabr);
300
301#ifdef CONFIG_PPC_ADV_DEBUG_REGS
302
303
304
305static void set_debug_reg_defaults(struct thread_struct *thread)
306{
307 thread->iac1 = thread->iac2 = 0;
308#if CONFIG_PPC_ADV_DEBUG_IACS > 2
309 thread->iac3 = thread->iac4 = 0;
310#endif
311 thread->dac1 = thread->dac2 = 0;
312#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
313 thread->dvc1 = thread->dvc2 = 0;
314#endif
315 thread->dbcr0 = 0;
316#ifdef CONFIG_BOOKE
317
318
319
320 thread->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | \
321 DBCR1_IAC3US | DBCR1_IAC4US;
322
323
324
325
326 thread->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
327#else
328 thread->dbcr1 = 0;
329#endif
330}
331
332static void prime_debug_regs(struct thread_struct *thread)
333{
334 mtspr(SPRN_IAC1, thread->iac1);
335 mtspr(SPRN_IAC2, thread->iac2);
336#if CONFIG_PPC_ADV_DEBUG_IACS > 2
337 mtspr(SPRN_IAC3, thread->iac3);
338 mtspr(SPRN_IAC4, thread->iac4);
339#endif
340 mtspr(SPRN_DAC1, thread->dac1);
341 mtspr(SPRN_DAC2, thread->dac2);
342#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
343 mtspr(SPRN_DVC1, thread->dvc1);
344 mtspr(SPRN_DVC2, thread->dvc2);
345#endif
346 mtspr(SPRN_DBCR0, thread->dbcr0);
347 mtspr(SPRN_DBCR1, thread->dbcr1);
348#ifdef CONFIG_BOOKE
349 mtspr(SPRN_DBCR2, thread->dbcr2);
350#endif
351}
352
353
354
355
356
357static void switch_booke_debug_regs(struct thread_struct *new_thread)
358{
359 if ((current->thread.dbcr0 & DBCR0_IDM)
360 || (new_thread->dbcr0 & DBCR0_IDM))
361 prime_debug_regs(new_thread);
362}
363#else
364#ifndef CONFIG_HAVE_HW_BREAKPOINT
365static void set_debug_reg_defaults(struct thread_struct *thread)
366{
367 if (thread->dabr) {
368 thread->dabr = 0;
369 thread->dabrx = 0;
370 set_dabr(0, 0);
371 }
372}
373#endif
374#endif
375
376int set_dabr(unsigned long dabr, unsigned long dabrx)
377{
378 __get_cpu_var(current_dabr) = dabr;
379
380 if (ppc_md.set_dabr)
381 return ppc_md.set_dabr(dabr, dabrx);
382
383
384#ifdef CONFIG_PPC_ADV_DEBUG_REGS
385 mtspr(SPRN_DAC1, dabr);
386#ifdef CONFIG_PPC_47x
387 isync();
388#endif
389#elif defined(CONFIG_PPC_BOOK3S)
390 mtspr(SPRN_DABR, dabr);
391 mtspr(SPRN_DABRX, dabrx);
392#endif
393 return 0;
394}
395
396#ifdef CONFIG_PPC64
397DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
398#endif
399
400struct task_struct *__switch_to(struct task_struct *prev,
401 struct task_struct *new)
402{
403 struct thread_struct *new_thread, *old_thread;
404 unsigned long flags;
405 struct task_struct *last;
406#ifdef CONFIG_PPC_BOOK3S_64
407 struct ppc64_tlb_batch *batch;
408#endif
409
410#ifdef CONFIG_SMP
411
412
413
414
415
416
417
418
419
420 if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
421 giveup_fpu(prev);
422#ifdef CONFIG_ALTIVEC
423
424
425
426
427
428
429
430
431
432
433
434 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
435 giveup_altivec(prev);
436#endif
437#ifdef CONFIG_VSX
438 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
439
440 __giveup_vsx(prev);
441#endif
442#ifdef CONFIG_SPE
443
444
445
446
447
448
449
450 if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
451 giveup_spe(prev);
452#endif
453
454#else
455#ifdef CONFIG_ALTIVEC
456
457
458
459 if (new->thread.regs && last_task_used_altivec == new)
460 new->thread.regs->msr |= MSR_VEC;
461#endif
462#ifdef CONFIG_VSX
463 if (new->thread.regs && last_task_used_vsx == new)
464 new->thread.regs->msr |= MSR_VSX;
465#endif
466#ifdef CONFIG_SPE
467
468
469
470 if (new->thread.regs && last_task_used_spe == new)
471 new->thread.regs->msr |= MSR_SPE;
472#endif
473
474#endif
475
476#ifdef CONFIG_PPC_ADV_DEBUG_REGS
477 switch_booke_debug_regs(&new->thread);
478#else
479
480
481
482
483#ifndef CONFIG_HAVE_HW_BREAKPOINT
484 if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr))
485 set_dabr(new->thread.dabr, new->thread.dabrx);
486#endif
487#endif
488
489
490 new_thread = &new->thread;
491 old_thread = ¤t->thread;
492
493#ifdef CONFIG_PPC64
494
495
496
497 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
498 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
499 long unsigned start_tb, current_tb;
500 start_tb = old_thread->start_tb;
501 cu->current_tb = current_tb = mfspr(SPRN_PURR);
502 old_thread->accum_tb += (current_tb - start_tb);
503 new_thread->start_tb = current_tb;
504 }
505#endif
506
507#ifdef CONFIG_PPC_BOOK3S_64
508 batch = &__get_cpu_var(ppc64_tlb_batch);
509 if (batch->active) {
510 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
511 if (batch->index)
512 __flush_tlb_pending(batch);
513 batch->active = 0;
514 }
515#endif
516
517 local_irq_save(flags);
518
519
520
521
522
523
524 hard_irq_disable();
525 last = _switch(old_thread, new_thread);
526
527#ifdef CONFIG_PPC_BOOK3S_64
528 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
529 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
530 batch = &__get_cpu_var(ppc64_tlb_batch);
531 batch->active = 1;
532 }
533#endif
534
535 local_irq_restore(flags);
536
537 return last;
538}
539
540static int instructions_to_print = 16;
541
542static void show_instructions(struct pt_regs *regs)
543{
544 int i;
545 unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
546 sizeof(int));
547
548 printk("Instruction dump:");
549
550 for (i = 0; i < instructions_to_print; i++) {
551 int instr;
552
553 if (!(i % 8))
554 printk("\n");
555
556#if !defined(CONFIG_BOOKE)
557
558
559
560 if (!(regs->msr & MSR_IR))
561 pc = (unsigned long)phys_to_virt(pc);
562#endif
563
564
565
566
567
568 if (!__kernel_text_address(pc) ||
569 __get_user(instr, (unsigned int __user *)pc)) {
570 printk(KERN_CONT "XXXXXXXX ");
571 } else {
572 if (regs->nip == pc)
573 printk(KERN_CONT "<%08x> ", instr);
574 else
575 printk(KERN_CONT "%08x ", instr);
576 }
577
578 pc += sizeof(int);
579 }
580
581 printk("\n");
582}
583
584static struct regbit {
585 unsigned long bit;
586 const char *name;
587} msr_bits[] = {
588#if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
589 {MSR_SF, "SF"},
590 {MSR_HV, "HV"},
591#endif
592 {MSR_VEC, "VEC"},
593 {MSR_VSX, "VSX"},
594#ifdef CONFIG_BOOKE
595 {MSR_CE, "CE"},
596#endif
597 {MSR_EE, "EE"},
598 {MSR_PR, "PR"},
599 {MSR_FP, "FP"},
600 {MSR_ME, "ME"},
601#ifdef CONFIG_BOOKE
602 {MSR_DE, "DE"},
603#else
604 {MSR_SE, "SE"},
605 {MSR_BE, "BE"},
606#endif
607 {MSR_IR, "IR"},
608 {MSR_DR, "DR"},
609 {MSR_PMM, "PMM"},
610#ifndef CONFIG_BOOKE
611 {MSR_RI, "RI"},
612 {MSR_LE, "LE"},
613#endif
614 {0, NULL}
615};
616
617static void printbits(unsigned long val, struct regbit *bits)
618{
619 const char *sep = "";
620
621 printk("<");
622 for (; bits->bit; ++bits)
623 if (val & bits->bit) {
624 printk("%s%s", sep, bits->name);
625 sep = ",";
626 }
627 printk(">");
628}
629
630#ifdef CONFIG_PPC64
631#define REG "%016lx"
632#define REGS_PER_LINE 4
633#define LAST_VOLATILE 13
634#else
635#define REG "%08lx"
636#define REGS_PER_LINE 8
637#define LAST_VOLATILE 12
638#endif
639
640void show_regs(struct pt_regs * regs)
641{
642 int i, trap;
643
644 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
645 regs->nip, regs->link, regs->ctr);
646 printk("REGS: %p TRAP: %04lx %s (%s)\n",
647 regs, regs->trap, print_tainted(), init_utsname()->release);
648 printk("MSR: "REG" ", regs->msr);
649 printbits(regs->msr, msr_bits);
650 printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
651#ifdef CONFIG_PPC64
652 printk("SOFTE: %ld\n", regs->softe);
653#endif
654 trap = TRAP(regs);
655 if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
656 printk("CFAR: "REG"\n", regs->orig_gpr3);
657 if (trap == 0x300 || trap == 0x600)
658#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
659 printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
660#else
661 printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr);
662#endif
663 printk("TASK = %p[%d] '%s' THREAD: %p",
664 current, task_pid_nr(current), current->comm, task_thread_info(current));
665
666#ifdef CONFIG_SMP
667 printk(" CPU: %d", raw_smp_processor_id());
668#endif
669
670 for (i = 0; i < 32; i++) {
671 if ((i % REGS_PER_LINE) == 0)
672 printk("\nGPR%02d: ", i);
673 printk(REG " ", regs->gpr[i]);
674 if (i == LAST_VOLATILE && !FULL_REGS(regs))
675 break;
676 }
677 printk("\n");
678#ifdef CONFIG_KALLSYMS
679
680
681
682
683 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
684 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
685#endif
686 show_stack(current, (unsigned long *) regs->gpr[1]);
687 if (!user_mode(regs))
688 show_instructions(regs);
689}
690
691void exit_thread(void)
692{
693 discard_lazy_cpu_state();
694}
695
696void flush_thread(void)
697{
698 discard_lazy_cpu_state();
699
700#ifdef CONFIG_HAVE_HW_BREAKPOINT
701 flush_ptrace_hw_breakpoint(current);
702#else
703 set_debug_reg_defaults(¤t->thread);
704#endif
705}
706
707void
708release_thread(struct task_struct *t)
709{
710}
711
712
713
714
715
716int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
717{
718 flush_fp_to_thread(src);
719 flush_altivec_to_thread(src);
720 flush_vsx_to_thread(src);
721 flush_spe_to_thread(src);
722#ifdef CONFIG_HAVE_HW_BREAKPOINT
723 flush_ptrace_hw_breakpoint(src);
724#endif
725
726 *dst = *src;
727 return 0;
728}
729
730
731
732
733extern unsigned long dscr_default;
734
735int copy_thread(unsigned long clone_flags, unsigned long usp,
736 unsigned long arg, struct task_struct *p,
737 struct pt_regs *regs)
738{
739 struct pt_regs *childregs, *kregs;
740 extern void ret_from_fork(void);
741 extern void ret_from_kernel_thread(void);
742 void (*f)(void);
743 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
744
745
746 sp -= sizeof(struct pt_regs);
747 childregs = (struct pt_regs *) sp;
748 if (!regs) {
749
750 memset(childregs, 0, sizeof(struct pt_regs));
751 childregs->gpr[1] = sp + sizeof(struct pt_regs);
752#ifdef CONFIG_PPC64
753 childregs->gpr[14] = *(unsigned long *)usp;
754 childregs->gpr[2] = ((unsigned long *)usp)[1],
755 clear_tsk_thread_flag(p, TIF_32BIT);
756#else
757 childregs->gpr[14] = usp;
758 childregs->gpr[2] = (unsigned long) p;
759#endif
760 childregs->gpr[15] = arg;
761 p->thread.regs = NULL;
762 f = ret_from_kernel_thread;
763 } else {
764 CHECK_FULL_REGS(regs);
765 *childregs = *regs;
766 childregs->gpr[1] = usp;
767 p->thread.regs = childregs;
768 childregs->gpr[3] = 0;
769 if (clone_flags & CLONE_SETTLS) {
770#ifdef CONFIG_PPC64
771 if (!is_32bit_task())
772 childregs->gpr[13] = childregs->gpr[6];
773 else
774#endif
775 childregs->gpr[2] = childregs->gpr[6];
776 }
777
778 f = ret_from_fork;
779 }
780 sp -= STACK_FRAME_OVERHEAD;
781
782
783
784
785
786
787
788
789
790 sp -= sizeof(struct pt_regs);
791 kregs = (struct pt_regs *) sp;
792 sp -= STACK_FRAME_OVERHEAD;
793 p->thread.ksp = sp;
794 p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
795 _ALIGN_UP(sizeof(struct thread_info), 16);
796
797#ifdef CONFIG_PPC_STD_MMU_64
798 if (mmu_has_feature(MMU_FTR_SLB)) {
799 unsigned long sp_vsid;
800 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
801
802 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
803 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
804 << SLB_VSID_SHIFT_1T;
805 else
806 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
807 << SLB_VSID_SHIFT;
808 sp_vsid |= SLB_VSID_KERNEL | llp;
809 p->thread.ksp_vsid = sp_vsid;
810 }
811#endif
812#ifdef CONFIG_PPC64
813 if (cpu_has_feature(CPU_FTR_DSCR)) {
814 p->thread.dscr_inherit = current->thread.dscr_inherit;
815 p->thread.dscr = current->thread.dscr;
816 }
817#endif
818
819
820
821
822
823
824#ifdef CONFIG_PPC64
825 kregs->nip = *((unsigned long *)f);
826#else
827 kregs->nip = (unsigned long)f;
828#endif
829 return 0;
830}
831
832
833
834
835void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
836{
837#ifdef CONFIG_PPC64
838 unsigned long load_addr = regs->gpr[2];
839#endif
840
841
842
843
844
845 if (!current->thread.regs) {
846 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
847 current->thread.regs = regs - 1;
848 }
849
850 memset(regs->gpr, 0, sizeof(regs->gpr));
851 regs->ctr = 0;
852 regs->link = 0;
853 regs->xer = 0;
854 regs->ccr = 0;
855 regs->gpr[1] = sp;
856
857
858
859
860
861
862 regs->trap &= ~1UL;
863
864#ifdef CONFIG_PPC32
865 regs->mq = 0;
866 regs->nip = start;
867 regs->msr = MSR_USER;
868#else
869 if (!is_32bit_task()) {
870 unsigned long entry, toc;
871
872
873
874
875
876
877 __get_user(entry, (unsigned long __user *)start);
878 __get_user(toc, (unsigned long __user *)start+1);
879
880
881
882
883 if (load_addr != 0) {
884 entry += load_addr;
885 toc += load_addr;
886 }
887 regs->nip = entry;
888 regs->gpr[2] = toc;
889 regs->msr = MSR_USER64;
890 } else {
891 regs->nip = start;
892 regs->gpr[2] = 0;
893 regs->msr = MSR_USER32;
894 }
895#endif
896
897 discard_lazy_cpu_state();
898#ifdef CONFIG_VSX
899 current->thread.used_vsr = 0;
900#endif
901 memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
902 current->thread.fpscr.val = 0;
903#ifdef CONFIG_ALTIVEC
904 memset(current->thread.vr, 0, sizeof(current->thread.vr));
905 memset(¤t->thread.vscr, 0, sizeof(current->thread.vscr));
906 current->thread.vscr.u[3] = 0x00010000;
907 current->thread.vrsave = 0;
908 current->thread.used_vr = 0;
909#endif
910#ifdef CONFIG_SPE
911 memset(current->thread.evr, 0, sizeof(current->thread.evr));
912 current->thread.acc = 0;
913 current->thread.spefscr = 0;
914 current->thread.used_spe = 0;
915#endif
916}
917
918#define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
919 | PR_FP_EXC_RES | PR_FP_EXC_INV)
920
921int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
922{
923 struct pt_regs *regs = tsk->thread.regs;
924
925
926
927
928
929 if (val & PR_FP_EXC_SW_ENABLE) {
930#ifdef CONFIG_SPE
931 if (cpu_has_feature(CPU_FTR_SPE)) {
932 tsk->thread.fpexc_mode = val &
933 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
934 return 0;
935 } else {
936 return -EINVAL;
937 }
938#else
939 return -EINVAL;
940#endif
941 }
942
943
944
945
946
947
948 if (val > PR_FP_EXC_PRECISE)
949 return -EINVAL;
950 tsk->thread.fpexc_mode = __pack_fe01(val);
951 if (regs != NULL && (regs->msr & MSR_FP) != 0)
952 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
953 | tsk->thread.fpexc_mode;
954 return 0;
955}
956
957int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
958{
959 unsigned int val;
960
961 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
962#ifdef CONFIG_SPE
963 if (cpu_has_feature(CPU_FTR_SPE))
964 val = tsk->thread.fpexc_mode;
965 else
966 return -EINVAL;
967#else
968 return -EINVAL;
969#endif
970 else
971 val = __unpack_fe01(tsk->thread.fpexc_mode);
972 return put_user(val, (unsigned int __user *) adr);
973}
974
975int set_endian(struct task_struct *tsk, unsigned int val)
976{
977 struct pt_regs *regs = tsk->thread.regs;
978
979 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
980 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
981 return -EINVAL;
982
983 if (regs == NULL)
984 return -EINVAL;
985
986 if (val == PR_ENDIAN_BIG)
987 regs->msr &= ~MSR_LE;
988 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
989 regs->msr |= MSR_LE;
990 else
991 return -EINVAL;
992
993 return 0;
994}
995
996int get_endian(struct task_struct *tsk, unsigned long adr)
997{
998 struct pt_regs *regs = tsk->thread.regs;
999 unsigned int val;
1000
1001 if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
1002 !cpu_has_feature(CPU_FTR_REAL_LE))
1003 return -EINVAL;
1004
1005 if (regs == NULL)
1006 return -EINVAL;
1007
1008 if (regs->msr & MSR_LE) {
1009 if (cpu_has_feature(CPU_FTR_REAL_LE))
1010 val = PR_ENDIAN_LITTLE;
1011 else
1012 val = PR_ENDIAN_PPC_LITTLE;
1013 } else
1014 val = PR_ENDIAN_BIG;
1015
1016 return put_user(val, (unsigned int __user *)adr);
1017}
1018
1019int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
1020{
1021 tsk->thread.align_ctl = val;
1022 return 0;
1023}
1024
1025int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
1026{
1027 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
1028}
1029
1030#define TRUNC_PTR(x) ((typeof(x))(((unsigned long)(x)) & 0xffffffff))
1031
1032int sys_clone(unsigned long clone_flags, unsigned long usp,
1033 int __user *parent_tidp, void __user *child_threadptr,
1034 int __user *child_tidp, int p6,
1035 struct pt_regs *regs)
1036{
1037 CHECK_FULL_REGS(regs);
1038 if (usp == 0)
1039 usp = regs->gpr[1];
1040#ifdef CONFIG_PPC64
1041 if (is_32bit_task()) {
1042 parent_tidp = TRUNC_PTR(parent_tidp);
1043 child_tidp = TRUNC_PTR(child_tidp);
1044 }
1045#endif
1046 return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp);
1047}
1048
1049int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
1050 unsigned long p4, unsigned long p5, unsigned long p6,
1051 struct pt_regs *regs)
1052{
1053 CHECK_FULL_REGS(regs);
1054 return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL);
1055}
1056
1057int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
1058 unsigned long p4, unsigned long p5, unsigned long p6,
1059 struct pt_regs *regs)
1060{
1061 CHECK_FULL_REGS(regs);
1062 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1],
1063 regs, 0, NULL, NULL);
1064}
1065
1066void __ret_from_kernel_execve(struct pt_regs *normal)
1067__noreturn;
1068
1069void ret_from_kernel_execve(struct pt_regs *normal)
1070{
1071 set_thread_flag(TIF_RESTOREALL);
1072 __ret_from_kernel_execve(normal);
1073}
1074
1075static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
1076 unsigned long nbytes)
1077{
1078 unsigned long stack_page;
1079 unsigned long cpu = task_cpu(p);
1080
1081
1082
1083
1084
1085 if (cpu < NR_CPUS && cpu_possible(cpu)) {
1086 stack_page = (unsigned long) hardirq_ctx[cpu];
1087 if (sp >= stack_page + sizeof(struct thread_struct)
1088 && sp <= stack_page + THREAD_SIZE - nbytes)
1089 return 1;
1090
1091 stack_page = (unsigned long) softirq_ctx[cpu];
1092 if (sp >= stack_page + sizeof(struct thread_struct)
1093 && sp <= stack_page + THREAD_SIZE - nbytes)
1094 return 1;
1095 }
1096 return 0;
1097}
1098
1099int validate_sp(unsigned long sp, struct task_struct *p,
1100 unsigned long nbytes)
1101{
1102 unsigned long stack_page = (unsigned long)task_stack_page(p);
1103
1104 if (sp >= stack_page + sizeof(struct thread_struct)
1105 && sp <= stack_page + THREAD_SIZE - nbytes)
1106 return 1;
1107
1108 return valid_irq_stack(sp, p, nbytes);
1109}
1110
1111EXPORT_SYMBOL(validate_sp);
1112
1113unsigned long get_wchan(struct task_struct *p)
1114{
1115 unsigned long ip, sp;
1116 int count = 0;
1117
1118 if (!p || p == current || p->state == TASK_RUNNING)
1119 return 0;
1120
1121 sp = p->thread.ksp;
1122 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1123 return 0;
1124
1125 do {
1126 sp = *(unsigned long *)sp;
1127 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1128 return 0;
1129 if (count > 0) {
1130 ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
1131 if (!in_sched_functions(ip))
1132 return ip;
1133 }
1134 } while (count++ < 16);
1135 return 0;
1136}
1137
1138static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
1139
1140void show_stack(struct task_struct *tsk, unsigned long *stack)
1141{
1142 unsigned long sp, ip, lr, newsp;
1143 int count = 0;
1144 int firstframe = 1;
1145#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1146 int curr_frame = current->curr_ret_stack;
1147 extern void return_to_handler(void);
1148 unsigned long rth = (unsigned long)return_to_handler;
1149 unsigned long mrth = -1;
1150#ifdef CONFIG_PPC64
1151 extern void mod_return_to_handler(void);
1152 rth = *(unsigned long *)rth;
1153 mrth = (unsigned long)mod_return_to_handler;
1154 mrth = *(unsigned long *)mrth;
1155#endif
1156#endif
1157
1158 sp = (unsigned long) stack;
1159 if (tsk == NULL)
1160 tsk = current;
1161 if (sp == 0) {
1162 if (tsk == current)
1163 asm("mr %0,1" : "=r" (sp));
1164 else
1165 sp = tsk->thread.ksp;
1166 }
1167
1168 lr = 0;
1169 printk("Call Trace:\n");
1170 do {
1171 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
1172 return;
1173
1174 stack = (unsigned long *) sp;
1175 newsp = stack[0];
1176 ip = stack[STACK_FRAME_LR_SAVE];
1177 if (!firstframe || ip != lr) {
1178 printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
1179#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1180 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
1181 printk(" (%pS)",
1182 (void *)current->ret_stack[curr_frame].ret);
1183 curr_frame--;
1184 }
1185#endif
1186 if (firstframe)
1187 printk(" (unreliable)");
1188 printk("\n");
1189 }
1190 firstframe = 0;
1191
1192
1193
1194
1195
1196 if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
1197 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
1198 struct pt_regs *regs = (struct pt_regs *)
1199 (sp + STACK_FRAME_OVERHEAD);
1200 lr = regs->link;
1201 printk("--- Exception: %lx at %pS\n LR = %pS\n",
1202 regs->trap, (void *)regs->nip, (void *)lr);
1203 firstframe = 1;
1204 }
1205
1206 sp = newsp;
1207 } while (count++ < kstack_depth_to_print);
1208}
1209
1210void dump_stack(void)
1211{
1212 show_stack(current, NULL);
1213}
1214EXPORT_SYMBOL(dump_stack);
1215
1216#ifdef CONFIG_PPC64
1217
1218void __ppc64_runlatch_on(void)
1219{
1220 struct thread_info *ti = current_thread_info();
1221 unsigned long ctrl;
1222
1223 ctrl = mfspr(SPRN_CTRLF);
1224 ctrl |= CTRL_RUNLATCH;
1225 mtspr(SPRN_CTRLT, ctrl);
1226
1227 ti->local_flags |= _TLF_RUNLATCH;
1228}
1229
1230
1231void __ppc64_runlatch_off(void)
1232{
1233 struct thread_info *ti = current_thread_info();
1234 unsigned long ctrl;
1235
1236 ti->local_flags &= ~_TLF_RUNLATCH;
1237
1238 ctrl = mfspr(SPRN_CTRLF);
1239 ctrl &= ~CTRL_RUNLATCH;
1240 mtspr(SPRN_CTRLT, ctrl);
1241}
1242#endif
1243
1244unsigned long arch_align_stack(unsigned long sp)
1245{
1246 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1247 sp -= get_random_int() & ~PAGE_MASK;
1248 return sp & ~0xf;
1249}
1250
1251static inline unsigned long brk_rnd(void)
1252{
1253 unsigned long rnd = 0;
1254
1255
1256 if (is_32bit_task())
1257 rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
1258 else
1259 rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
1260
1261 return rnd << PAGE_SHIFT;
1262}
1263
1264unsigned long arch_randomize_brk(struct mm_struct *mm)
1265{
1266 unsigned long base = mm->brk;
1267 unsigned long ret;
1268
1269#ifdef CONFIG_PPC_STD_MMU_64
1270
1271
1272
1273
1274
1275
1276
1277 if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
1278 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
1279#endif
1280
1281 ret = PAGE_ALIGN(base + brk_rnd());
1282
1283 if (ret < mm->brk)
1284 return mm->brk;
1285
1286 return ret;
1287}
1288
1289unsigned long randomize_et_dyn(unsigned long base)
1290{
1291 unsigned long ret = PAGE_ALIGN(base + brk_rnd());
1292
1293 if (ret < base)
1294 return base;
1295
1296 return ret;
1297}
1298