1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/errno.h>
19#include <linux/sched.h>
20#include <linux/kernel.h>
21#include <linux/mm.h>
22#include <linux/stddef.h>
23#include <linux/unistd.h>
24#include <linux/ptrace.h>
25#include <linux/user.h>
26#include <linux/interrupt.h>
27#include <linux/init.h>
28#include <linux/module.h>
29#include <linux/prctl.h>
30#include <linux/delay.h>
31#include <linux/kprobes.h>
32#include <linux/kexec.h>
33#include <linux/backlight.h>
34#include <linux/bug.h>
35#include <linux/kdebug.h>
36#include <linux/debugfs.h>
37#include <linux/ratelimit.h>
38#include <linux/context_tracking.h>
39
40#include <asm/emulated_ops.h>
41#include <asm/pgtable.h>
42#include <asm/uaccess.h>
43#include <asm/io.h>
44#include <asm/machdep.h>
45#include <asm/rtas.h>
46#include <asm/pmc.h>
47#include <asm/reg.h>
48#ifdef CONFIG_PMAC_BACKLIGHT
49#include <asm/backlight.h>
50#endif
51#ifdef CONFIG_PPC64
52#include <asm/firmware.h>
53#include <asm/processor.h>
54#include <asm/tm.h>
55#endif
56#include <asm/kexec.h>
57#include <asm/ppc-opcode.h>
58#include <asm/rio.h>
59#include <asm/fadump.h>
60#include <asm/switch_to.h>
61#include <asm/tm.h>
62#include <asm/debug.h>
63#include <sysdev/fsl_pci.h>
64
65#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
66int (*__debugger)(struct pt_regs *regs) __read_mostly;
67int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly;
68int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly;
69int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly;
70int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly;
71int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly;
72int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly;
73
74EXPORT_SYMBOL(__debugger);
75EXPORT_SYMBOL(__debugger_ipi);
76EXPORT_SYMBOL(__debugger_bpt);
77EXPORT_SYMBOL(__debugger_sstep);
78EXPORT_SYMBOL(__debugger_iabr_match);
79EXPORT_SYMBOL(__debugger_break_match);
80EXPORT_SYMBOL(__debugger_fault_handler);
81#endif
82
83
84#ifdef TM_DEBUG_SW
85#define TM_DEBUG(x...) printk(KERN_INFO x)
86#else
87#define TM_DEBUG(x...) do { } while(0)
88#endif
89
90
91
92
93
94#ifdef CONFIG_PMAC_BACKLIGHT
95static void pmac_backlight_unblank(void)
96{
97 mutex_lock(&pmac_backlight_mutex);
98 if (pmac_backlight) {
99 struct backlight_properties *props;
100
101 props = &pmac_backlight->props;
102 props->brightness = props->max_brightness;
103 props->power = FB_BLANK_UNBLANK;
104 backlight_update_status(pmac_backlight);
105 }
106 mutex_unlock(&pmac_backlight_mutex);
107}
108#else
109static inline void pmac_backlight_unblank(void) { }
110#endif
111
112static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
113static int die_owner = -1;
114static unsigned int die_nest_count;
115static int die_counter;
116
117static unsigned __kprobes long oops_begin(struct pt_regs *regs)
118{
119 int cpu;
120 unsigned long flags;
121
122 if (debugger(regs))
123 return 1;
124
125 oops_enter();
126
127
128 raw_local_irq_save(flags);
129 cpu = smp_processor_id();
130 if (!arch_spin_trylock(&die_lock)) {
131 if (cpu == die_owner)
132 ;
133 else
134 arch_spin_lock(&die_lock);
135 }
136 die_nest_count++;
137 die_owner = cpu;
138 console_verbose();
139 bust_spinlocks(1);
140 if (machine_is(powermac))
141 pmac_backlight_unblank();
142 return flags;
143}
144
145static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
146 int signr)
147{
148 bust_spinlocks(0);
149 die_owner = -1;
150 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
151 die_nest_count--;
152 oops_exit();
153 printk("\n");
154 if (!die_nest_count)
155
156 arch_spin_unlock(&die_lock);
157 raw_local_irq_restore(flags);
158
159 crash_fadump(regs, "die oops");
160
161
162
163
164
165 if (kexec_should_crash(current) || (TRAP(regs) == 0x100)) {
166 crash_kexec(regs);
167
168
169
170
171
172
173 crash_kexec_secondary(regs);
174 }
175
176 if (!signr)
177 return;
178
179
180
181
182
183
184
185 if (in_interrupt() || panic_on_oops || !current->pid ||
186 is_global_init(current)) {
187 mdelay(MSEC_PER_SEC);
188 }
189
190 if (in_interrupt())
191 panic("Fatal exception in interrupt");
192 if (panic_on_oops)
193 panic("Fatal exception");
194 do_exit(signr);
195}
196
197static int __kprobes __die(const char *str, struct pt_regs *regs, long err)
198{
199 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
200#ifdef CONFIG_PREEMPT
201 printk("PREEMPT ");
202#endif
203#ifdef CONFIG_SMP
204 printk("SMP NR_CPUS=%d ", NR_CPUS);
205#endif
206#ifdef CONFIG_DEBUG_PAGEALLOC
207 printk("DEBUG_PAGEALLOC ");
208#endif
209#ifdef CONFIG_NUMA
210 printk("NUMA ");
211#endif
212 printk("%s\n", ppc_md.name ? ppc_md.name : "");
213
214 if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP)
215 return 1;
216
217 print_modules();
218 show_regs(regs);
219
220 return 0;
221}
222
223void die(const char *str, struct pt_regs *regs, long err)
224{
225 unsigned long flags = oops_begin(regs);
226
227 if (__die(str, regs, err))
228 err = 0;
229 oops_end(flags, regs, err);
230}
231
232void user_single_step_siginfo(struct task_struct *tsk,
233 struct pt_regs *regs, siginfo_t *info)
234{
235 memset(info, 0, sizeof(*info));
236 info->si_signo = SIGTRAP;
237 info->si_code = TRAP_TRACE;
238 info->si_addr = (void __user *)regs->nip;
239}
240
241void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
242{
243 siginfo_t info;
244 const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \
245 "at %08lx nip %08lx lr %08lx code %x\n";
246 const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \
247 "at %016lx nip %016lx lr %016lx code %x\n";
248
249 if (!user_mode(regs)) {
250 die("Exception in kernel mode", regs, signr);
251 return;
252 }
253
254 if (show_unhandled_signals && unhandled_signal(current, signr)) {
255 printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
256 current->comm, current->pid, signr,
257 addr, regs->nip, regs->link, code);
258 }
259
260 if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs))
261 local_irq_enable();
262
263 current->thread.trap_nr = code;
264 memset(&info, 0, sizeof(info));
265 info.si_signo = signr;
266 info.si_code = code;
267 info.si_addr = (void __user *) addr;
268 force_sig_info(signr, &info, current);
269}
270
271#ifdef CONFIG_PPC64
272void system_reset_exception(struct pt_regs *regs)
273{
274
275 if (ppc_md.system_reset_exception) {
276 if (ppc_md.system_reset_exception(regs))
277 return;
278 }
279
280 die("System Reset", regs, SIGABRT);
281
282
283 if (!(regs->msr & MSR_RI))
284 panic("Unrecoverable System Reset");
285
286
287}
288#endif
289
290
291
292
293
294
295
296
297
298
299
300static inline int check_io_access(struct pt_regs *regs)
301{
302#ifdef CONFIG_PPC32
303 unsigned long msr = regs->msr;
304 const struct exception_table_entry *entry;
305 unsigned int *nip = (unsigned int *)regs->nip;
306
307 if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
308 && (entry = search_exception_tables(regs->nip)) != NULL) {
309
310
311
312
313
314
315
316
317 if (*nip == 0x60000000)
318 nip -= 2;
319 else if (*nip == 0x4c00012c)
320 --nip;
321 if (*nip == 0x7c0004ac || (*nip >> 26) == 3) {
322
323 unsigned int rb;
324
325 --nip;
326 rb = (*nip >> 11) & 0x1f;
327 printk(KERN_DEBUG "%s bad port %lx at %p\n",
328 (*nip & 0x100)? "OUT to": "IN from",
329 regs->gpr[rb] - _IO_BASE, nip);
330 regs->msr |= MSR_RI;
331 regs->nip = entry->fixup;
332 return 1;
333 }
334 }
335#endif
336 return 0;
337}
338
339#ifdef CONFIG_PPC_ADV_DEBUG_REGS
340
341
342#define get_reason(regs) ((regs)->dsisr)
343#ifndef CONFIG_FSL_BOOKE
344#define get_mc_reason(regs) ((regs)->dsisr)
345#else
346#define get_mc_reason(regs) (mfspr(SPRN_MCSR))
347#endif
348#define REASON_FP ESR_FP
349#define REASON_ILLEGAL (ESR_PIL | ESR_PUO)
350#define REASON_PRIVILEGED ESR_PPR
351#define REASON_TRAP ESR_PTR
352
353
354#define single_stepping(regs) (current->thread.dbcr0 & DBCR0_IC)
355#define clear_single_step(regs) (current->thread.dbcr0 &= ~DBCR0_IC)
356
357#else
358
359
360#define get_reason(regs) ((regs)->msr)
361#define get_mc_reason(regs) ((regs)->msr)
362#define REASON_TM 0x200000
363#define REASON_FP 0x100000
364#define REASON_ILLEGAL 0x80000
365#define REASON_PRIVILEGED 0x40000
366#define REASON_TRAP 0x20000
367
368#define single_stepping(regs) ((regs)->msr & MSR_SE)
369#define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
370#endif
371
372#if defined(CONFIG_4xx)
373int machine_check_4xx(struct pt_regs *regs)
374{
375 unsigned long reason = get_mc_reason(regs);
376
377 if (reason & ESR_IMCP) {
378 printk("Instruction");
379 mtspr(SPRN_ESR, reason & ~ESR_IMCP);
380 } else
381 printk("Data");
382 printk(" machine check in kernel mode.\n");
383
384 return 0;
385}
386
387int machine_check_440A(struct pt_regs *regs)
388{
389 unsigned long reason = get_mc_reason(regs);
390
391 printk("Machine check in kernel mode.\n");
392 if (reason & ESR_IMCP){
393 printk("Instruction Synchronous Machine Check exception\n");
394 mtspr(SPRN_ESR, reason & ~ESR_IMCP);
395 }
396 else {
397 u32 mcsr = mfspr(SPRN_MCSR);
398 if (mcsr & MCSR_IB)
399 printk("Instruction Read PLB Error\n");
400 if (mcsr & MCSR_DRB)
401 printk("Data Read PLB Error\n");
402 if (mcsr & MCSR_DWB)
403 printk("Data Write PLB Error\n");
404 if (mcsr & MCSR_TLBP)
405 printk("TLB Parity Error\n");
406 if (mcsr & MCSR_ICP){
407 flush_instruction_cache();
408 printk("I-Cache Parity Error\n");
409 }
410 if (mcsr & MCSR_DCSP)
411 printk("D-Cache Search Parity Error\n");
412 if (mcsr & MCSR_DCFP)
413 printk("D-Cache Flush Parity Error\n");
414 if (mcsr & MCSR_IMPE)
415 printk("Machine Check exception is imprecise\n");
416
417
418 mtspr(SPRN_MCSR, mcsr);
419 }
420 return 0;
421}
422
423int machine_check_47x(struct pt_regs *regs)
424{
425 unsigned long reason = get_mc_reason(regs);
426 u32 mcsr;
427
428 printk(KERN_ERR "Machine check in kernel mode.\n");
429 if (reason & ESR_IMCP) {
430 printk(KERN_ERR
431 "Instruction Synchronous Machine Check exception\n");
432 mtspr(SPRN_ESR, reason & ~ESR_IMCP);
433 return 0;
434 }
435 mcsr = mfspr(SPRN_MCSR);
436 if (mcsr & MCSR_IB)
437 printk(KERN_ERR "Instruction Read PLB Error\n");
438 if (mcsr & MCSR_DRB)
439 printk(KERN_ERR "Data Read PLB Error\n");
440 if (mcsr & MCSR_DWB)
441 printk(KERN_ERR "Data Write PLB Error\n");
442 if (mcsr & MCSR_TLBP)
443 printk(KERN_ERR "TLB Parity Error\n");
444 if (mcsr & MCSR_ICP) {
445 flush_instruction_cache();
446 printk(KERN_ERR "I-Cache Parity Error\n");
447 }
448 if (mcsr & MCSR_DCSP)
449 printk(KERN_ERR "D-Cache Search Parity Error\n");
450 if (mcsr & PPC47x_MCSR_GPR)
451 printk(KERN_ERR "GPR Parity Error\n");
452 if (mcsr & PPC47x_MCSR_FPR)
453 printk(KERN_ERR "FPR Parity Error\n");
454 if (mcsr & PPC47x_MCSR_IPR)
455 printk(KERN_ERR "Machine Check exception is imprecise\n");
456
457
458 mtspr(SPRN_MCSR, mcsr);
459
460 return 0;
461}
462#elif defined(CONFIG_E500)
463int machine_check_e500mc(struct pt_regs *regs)
464{
465 unsigned long mcsr = mfspr(SPRN_MCSR);
466 unsigned long reason = mcsr;
467 int recoverable = 1;
468
469 if (reason & MCSR_LD) {
470 recoverable = fsl_rio_mcheck_exception(regs);
471 if (recoverable == 1)
472 goto silent_out;
473 }
474
475 printk("Machine check in kernel mode.\n");
476 printk("Caused by (from MCSR=%lx): ", reason);
477
478 if (reason & MCSR_MCP)
479 printk("Machine Check Signal\n");
480
481 if (reason & MCSR_ICPERR) {
482 printk("Instruction Cache Parity Error\n");
483
484
485
486
487 mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI);
488 while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI)
489 ;
490
491
492
493
494
495
496 reason &= ~MCSR_IF;
497 }
498
499 if (reason & MCSR_DCPERR_MC) {
500 printk("Data Cache Parity Error\n");
501
502
503
504
505
506
507 if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS))
508 recoverable = 0;
509 }
510
511 if (reason & MCSR_L2MMU_MHIT) {
512 printk("Hit on multiple TLB entries\n");
513 recoverable = 0;
514 }
515
516 if (reason & MCSR_NMI)
517 printk("Non-maskable interrupt\n");
518
519 if (reason & MCSR_IF) {
520 printk("Instruction Fetch Error Report\n");
521 recoverable = 0;
522 }
523
524 if (reason & MCSR_LD) {
525 printk("Load Error Report\n");
526 recoverable = 0;
527 }
528
529 if (reason & MCSR_ST) {
530 printk("Store Error Report\n");
531 recoverable = 0;
532 }
533
534 if (reason & MCSR_LDG) {
535 printk("Guarded Load Error Report\n");
536 recoverable = 0;
537 }
538
539 if (reason & MCSR_TLBSYNC)
540 printk("Simultaneous tlbsync operations\n");
541
542 if (reason & MCSR_BSL2_ERR) {
543 printk("Level 2 Cache Error\n");
544 recoverable = 0;
545 }
546
547 if (reason & MCSR_MAV) {
548 u64 addr;
549
550 addr = mfspr(SPRN_MCAR);
551 addr |= (u64)mfspr(SPRN_MCARU) << 32;
552
553 printk("Machine Check %s Address: %#llx\n",
554 reason & MCSR_MEA ? "Effective" : "Physical", addr);
555 }
556
557silent_out:
558 mtspr(SPRN_MCSR, mcsr);
559 return mfspr(SPRN_MCSR) == 0 && recoverable;
560}
561
562int machine_check_e500(struct pt_regs *regs)
563{
564 unsigned long reason = get_mc_reason(regs);
565
566 if (reason & MCSR_BUS_RBERR) {
567 if (fsl_rio_mcheck_exception(regs))
568 return 1;
569 if (fsl_pci_mcheck_exception(regs))
570 return 1;
571 }
572
573 printk("Machine check in kernel mode.\n");
574 printk("Caused by (from MCSR=%lx): ", reason);
575
576 if (reason & MCSR_MCP)
577 printk("Machine Check Signal\n");
578 if (reason & MCSR_ICPERR)
579 printk("Instruction Cache Parity Error\n");
580 if (reason & MCSR_DCP_PERR)
581 printk("Data Cache Push Parity Error\n");
582 if (reason & MCSR_DCPERR)
583 printk("Data Cache Parity Error\n");
584 if (reason & MCSR_BUS_IAERR)
585 printk("Bus - Instruction Address Error\n");
586 if (reason & MCSR_BUS_RAERR)
587 printk("Bus - Read Address Error\n");
588 if (reason & MCSR_BUS_WAERR)
589 printk("Bus - Write Address Error\n");
590 if (reason & MCSR_BUS_IBERR)
591 printk("Bus - Instruction Data Error\n");
592 if (reason & MCSR_BUS_RBERR)
593 printk("Bus - Read Data Bus Error\n");
594 if (reason & MCSR_BUS_WBERR)
595 printk("Bus - Read Data Bus Error\n");
596 if (reason & MCSR_BUS_IPERR)
597 printk("Bus - Instruction Parity Error\n");
598 if (reason & MCSR_BUS_RPERR)
599 printk("Bus - Read Parity Error\n");
600
601 return 0;
602}
603
604int machine_check_generic(struct pt_regs *regs)
605{
606 return 0;
607}
608#elif defined(CONFIG_E200)
609int machine_check_e200(struct pt_regs *regs)
610{
611 unsigned long reason = get_mc_reason(regs);
612
613 printk("Machine check in kernel mode.\n");
614 printk("Caused by (from MCSR=%lx): ", reason);
615
616 if (reason & MCSR_MCP)
617 printk("Machine Check Signal\n");
618 if (reason & MCSR_CP_PERR)
619 printk("Cache Push Parity Error\n");
620 if (reason & MCSR_CPERR)
621 printk("Cache Parity Error\n");
622 if (reason & MCSR_EXCP_ERR)
623 printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
624 if (reason & MCSR_BUS_IRERR)
625 printk("Bus - Read Bus Error on instruction fetch\n");
626 if (reason & MCSR_BUS_DRERR)
627 printk("Bus - Read Bus Error on data load\n");
628 if (reason & MCSR_BUS_WRERR)
629 printk("Bus - Write Bus Error on buffered store or cache line push\n");
630
631 return 0;
632}
633#else
634int machine_check_generic(struct pt_regs *regs)
635{
636 unsigned long reason = get_mc_reason(regs);
637
638 printk("Machine check in kernel mode.\n");
639 printk("Caused by (from SRR1=%lx): ", reason);
640 switch (reason & 0x601F0000) {
641 case 0x80000:
642 printk("Machine check signal\n");
643 break;
644 case 0:
645 case 0x40000:
646 case 0x140000:
647 printk("Transfer error ack signal\n");
648 break;
649 case 0x20000:
650 printk("Data parity error signal\n");
651 break;
652 case 0x10000:
653 printk("Address parity error signal\n");
654 break;
655 case 0x20000000:
656 printk("L1 Data Cache error\n");
657 break;
658 case 0x40000000:
659 printk("L1 Instruction Cache error\n");
660 break;
661 case 0x00100000:
662 printk("L2 data cache parity error\n");
663 break;
664 default:
665 printk("Unknown values in msr\n");
666 }
667 return 0;
668}
669#endif
670
671void machine_check_exception(struct pt_regs *regs)
672{
673 enum ctx_state prev_state = exception_enter();
674 int recover = 0;
675
676 __get_cpu_var(irq_stat).mce_exceptions++;
677
678
679
680
681
682
683
684 if (ppc_md.machine_check_exception)
685 recover = ppc_md.machine_check_exception(regs);
686 else if (cur_cpu_spec->machine_check)
687 recover = cur_cpu_spec->machine_check(regs);
688
689 if (recover > 0)
690 goto bail;
691
692#if defined(CONFIG_8xx) && defined(CONFIG_PCI)
693
694
695
696
697
698
699 bad_page_fault(regs, regs->dar, SIGBUS);
700 goto bail;
701#endif
702
703 if (debugger_fault_handler(regs))
704 goto bail;
705
706 if (check_io_access(regs))
707 goto bail;
708
709 die("Machine check", regs, SIGBUS);
710
711
712 if (!(regs->msr & MSR_RI))
713 panic("Unrecoverable Machine check");
714
715bail:
716 exception_exit(prev_state);
717}
718
719void SMIException(struct pt_regs *regs)
720{
721 die("System Management Interrupt", regs, SIGABRT);
722}
723
724void unknown_exception(struct pt_regs *regs)
725{
726 enum ctx_state prev_state = exception_enter();
727
728 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
729 regs->nip, regs->msr, regs->trap);
730
731 _exception(SIGTRAP, regs, 0, 0);
732
733 exception_exit(prev_state);
734}
735
736void instruction_breakpoint_exception(struct pt_regs *regs)
737{
738 enum ctx_state prev_state = exception_enter();
739
740 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
741 5, SIGTRAP) == NOTIFY_STOP)
742 goto bail;
743 if (debugger_iabr_match(regs))
744 goto bail;
745 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
746
747bail:
748 exception_exit(prev_state);
749}
750
751void RunModeException(struct pt_regs *regs)
752{
753 _exception(SIGTRAP, regs, 0, 0);
754}
755
756void __kprobes single_step_exception(struct pt_regs *regs)
757{
758 enum ctx_state prev_state = exception_enter();
759
760 clear_single_step(regs);
761
762 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
763 5, SIGTRAP) == NOTIFY_STOP)
764 goto bail;
765 if (debugger_sstep(regs))
766 goto bail;
767
768 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
769
770bail:
771 exception_exit(prev_state);
772}
773
774
775
776
777
778
779
780static void emulate_single_step(struct pt_regs *regs)
781{
782 if (single_stepping(regs))
783 single_step_exception(regs);
784}
785
786static inline int __parse_fpscr(unsigned long fpscr)
787{
788 int ret = 0;
789
790
791 if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
792 ret = FPE_FLTINV;
793
794
795 else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
796 ret = FPE_FLTOVF;
797
798
799 else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
800 ret = FPE_FLTUND;
801
802
803 else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
804 ret = FPE_FLTDIV;
805
806
807 else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
808 ret = FPE_FLTRES;
809
810 return ret;
811}
812
813static void parse_fpe(struct pt_regs *regs)
814{
815 int code = 0;
816
817 flush_fp_to_thread(current);
818
819 code = __parse_fpscr(current->thread.fpscr.val);
820
821 _exception(SIGFPE, regs, code, regs->nip);
822}
823
824
825
826
827
828
829
830
831
832
833
834
835static int emulate_string_inst(struct pt_regs *regs, u32 instword)
836{
837 u8 rT = (instword >> 21) & 0x1f;
838 u8 rA = (instword >> 16) & 0x1f;
839 u8 NB_RB = (instword >> 11) & 0x1f;
840 u32 num_bytes;
841 unsigned long EA;
842 int pos = 0;
843
844
845 if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX)
846 if ((rT == rA) || (rT == NB_RB))
847 return -EINVAL;
848
849 EA = (rA == 0) ? 0 : regs->gpr[rA];
850
851 switch (instword & PPC_INST_STRING_MASK) {
852 case PPC_INST_LSWX:
853 case PPC_INST_STSWX:
854 EA += NB_RB;
855 num_bytes = regs->xer & 0x7f;
856 break;
857 case PPC_INST_LSWI:
858 case PPC_INST_STSWI:
859 num_bytes = (NB_RB == 0) ? 32 : NB_RB;
860 break;
861 default:
862 return -EINVAL;
863 }
864
865 while (num_bytes != 0)
866 {
867 u8 val;
868 u32 shift = 8 * (3 - (pos & 0x3));
869
870
871 if ((regs->msr & MSR_64BIT) == 0)
872 EA &= 0xFFFFFFFF;
873
874 switch ((instword & PPC_INST_STRING_MASK)) {
875 case PPC_INST_LSWX:
876 case PPC_INST_LSWI:
877 if (get_user(val, (u8 __user *)EA))
878 return -EFAULT;
879
880
881 if (pos == 0)
882 regs->gpr[rT] = 0;
883 regs->gpr[rT] |= val << shift;
884 break;
885 case PPC_INST_STSWI:
886 case PPC_INST_STSWX:
887 val = regs->gpr[rT] >> shift;
888 if (put_user(val, (u8 __user *)EA))
889 return -EFAULT;
890 break;
891 }
892
893 EA += 1;
894 num_bytes--;
895
896
897 if (++pos == 4) {
898 pos = 0;
899 if (++rT == 32)
900 rT = 0;
901 }
902 }
903
904 return 0;
905}
906
907static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword)
908{
909 u32 ra,rs;
910 unsigned long tmp;
911
912 ra = (instword >> 16) & 0x1f;
913 rs = (instword >> 21) & 0x1f;
914
915 tmp = regs->gpr[rs];
916 tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL);
917 tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL);
918 tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
919 regs->gpr[ra] = tmp;
920
921 return 0;
922}
923
924static int emulate_isel(struct pt_regs *regs, u32 instword)
925{
926 u8 rT = (instword >> 21) & 0x1f;
927 u8 rA = (instword >> 16) & 0x1f;
928 u8 rB = (instword >> 11) & 0x1f;
929 u8 BC = (instword >> 6) & 0x1f;
930 u8 bit;
931 unsigned long tmp;
932
933 tmp = (rA == 0) ? 0 : regs->gpr[rA];
934 bit = (regs->ccr >> (31 - BC)) & 0x1;
935
936 regs->gpr[rT] = bit ? tmp : regs->gpr[rB];
937
938 return 0;
939}
940
941#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
942static inline bool tm_abort_check(struct pt_regs *regs, int cause)
943{
944
945
946
947
948
949 if (MSR_TM_TRANSACTIONAL(regs->msr)) {
950 tm_enable();
951 tm_abort(cause);
952 return true;
953 }
954 return false;
955}
956#else
957static inline bool tm_abort_check(struct pt_regs *regs, int reason)
958{
959 return false;
960}
961#endif
962
963static int emulate_instruction(struct pt_regs *regs)
964{
965 u32 instword;
966 u32 rd;
967
968 if (!user_mode(regs))
969 return -EINVAL;
970 CHECK_FULL_REGS(regs);
971
972 if (get_user(instword, (u32 __user *)(regs->nip)))
973 return -EFAULT;
974
975
976 if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) {
977 PPC_WARN_EMULATED(mfpvr, regs);
978 rd = (instword >> 21) & 0x1f;
979 regs->gpr[rd] = mfspr(SPRN_PVR);
980 return 0;
981 }
982
983
984 if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) {
985 PPC_WARN_EMULATED(dcba, regs);
986 return 0;
987 }
988
989
990 if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) {
991 int shift = (instword >> 21) & 0x1c;
992 unsigned long msk = 0xf0000000UL >> shift;
993
994 PPC_WARN_EMULATED(mcrxr, regs);
995 regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
996 regs->xer &= ~0xf0000000UL;
997 return 0;
998 }
999
1000
1001 if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
1002 if (tm_abort_check(regs,
1003 TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
1004 return -EINVAL;
1005 PPC_WARN_EMULATED(string, regs);
1006 return emulate_string_inst(regs, instword);
1007 }
1008
1009
1010 if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) {
1011 PPC_WARN_EMULATED(popcntb, regs);
1012 return emulate_popcntb_inst(regs, instword);
1013 }
1014
1015
1016 if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) {
1017 PPC_WARN_EMULATED(isel, regs);
1018 return emulate_isel(regs, instword);
1019 }
1020
1021#ifdef CONFIG_PPC64
1022
1023 if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) ==
1024 PPC_INST_MFSPR_DSCR_USER) ||
1025 ((instword & PPC_INST_MFSPR_DSCR_MASK) ==
1026 PPC_INST_MFSPR_DSCR)) &&
1027 cpu_has_feature(CPU_FTR_DSCR)) {
1028 PPC_WARN_EMULATED(mfdscr, regs);
1029 rd = (instword >> 21) & 0x1f;
1030 regs->gpr[rd] = mfspr(SPRN_DSCR);
1031 return 0;
1032 }
1033
1034 if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) ==
1035 PPC_INST_MTSPR_DSCR_USER) ||
1036 ((instword & PPC_INST_MTSPR_DSCR_MASK) ==
1037 PPC_INST_MTSPR_DSCR)) &&
1038 cpu_has_feature(CPU_FTR_DSCR)) {
1039 PPC_WARN_EMULATED(mtdscr, regs);
1040 rd = (instword >> 21) & 0x1f;
1041 current->thread.dscr = regs->gpr[rd];
1042 current->thread.dscr_inherit = 1;
1043 mtspr(SPRN_DSCR, current->thread.dscr);
1044 return 0;
1045 }
1046#endif
1047
1048 return -EINVAL;
1049}
1050
1051int is_valid_bugaddr(unsigned long addr)
1052{
1053 return is_kernel_addr(addr);
1054}
1055
1056#ifdef CONFIG_MATH_EMULATION
1057static int emulate_math(struct pt_regs *regs)
1058{
1059 int ret;
1060 extern int do_mathemu(struct pt_regs *regs);
1061
1062 ret = do_mathemu(regs);
1063 if (ret >= 0)
1064 PPC_WARN_EMULATED(math, regs);
1065
1066 switch (ret) {
1067 case 0:
1068 emulate_single_step(regs);
1069 return 0;
1070 case 1: {
1071 int code = 0;
1072 code = __parse_fpscr(current->thread.fpscr.val);
1073 _exception(SIGFPE, regs, code, regs->nip);
1074 return 0;
1075 }
1076 case -EFAULT:
1077 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1078 return 0;
1079 }
1080
1081 return -1;
1082}
1083#else
1084static inline int emulate_math(struct pt_regs *regs) { return -1; }
1085#endif
1086
1087void __kprobes program_check_exception(struct pt_regs *regs)
1088{
1089 enum ctx_state prev_state = exception_enter();
1090 unsigned int reason = get_reason(regs);
1091
1092
1093
1094
1095 if (reason & REASON_FP) {
1096
1097 parse_fpe(regs);
1098 goto bail;
1099 }
1100 if (reason & REASON_TRAP) {
1101
1102
1103 if (debugger_bpt(regs))
1104 goto bail;
1105
1106
1107 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
1108 == NOTIFY_STOP)
1109 goto bail;
1110
1111 if (!(regs->msr & MSR_PR) &&
1112 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
1113 regs->nip += 4;
1114 goto bail;
1115 }
1116 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
1117 goto bail;
1118 }
1119#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1120 if (reason & REASON_TM) {
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130 if (!user_mode(regs) &&
1131 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
1132 regs->nip += 4;
1133 goto bail;
1134 }
1135
1136
1137
1138
1139
1140
1141 if (user_mode(regs)) {
1142 _exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
1143 goto bail;
1144 } else {
1145 printk(KERN_EMERG "Unexpected TM Bad Thing exception "
1146 "at %lx (msr 0x%x)\n", regs->nip, reason);
1147 die("Unrecoverable exception", regs, SIGABRT);
1148 }
1149 }
1150#endif
1151
1152
1153
1154
1155
1156
1157
1158
1159 if (!user_mode(regs))
1160 goto sigill;
1161
1162
1163 if (!arch_irq_disabled_regs(regs))
1164 local_irq_enable();
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174 if (!emulate_math(regs))
1175 goto bail;
1176
1177
1178 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
1179 switch (emulate_instruction(regs)) {
1180 case 0:
1181 regs->nip += 4;
1182 emulate_single_step(regs);
1183 goto bail;
1184 case -EFAULT:
1185 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1186 goto bail;
1187 }
1188 }
1189
1190sigill:
1191 if (reason & REASON_PRIVILEGED)
1192 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1193 else
1194 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1195
1196bail:
1197 exception_exit(prev_state);
1198}
1199
1200
1201
1202
1203
1204void __kprobes emulation_assist_interrupt(struct pt_regs *regs)
1205{
1206 regs->msr |= REASON_ILLEGAL;
1207 program_check_exception(regs);
1208}
1209
1210void alignment_exception(struct pt_regs *regs)
1211{
1212 enum ctx_state prev_state = exception_enter();
1213 int sig, code, fixed = 0;
1214
1215
1216 if (!arch_irq_disabled_regs(regs))
1217 local_irq_enable();
1218
1219 if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
1220 goto bail;
1221
1222
1223 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
1224 fixed = fix_alignment(regs);
1225
1226 if (fixed == 1) {
1227 regs->nip += 4;
1228 emulate_single_step(regs);
1229 goto bail;
1230 }
1231
1232
1233 if (fixed == -EFAULT) {
1234 sig = SIGSEGV;
1235 code = SEGV_ACCERR;
1236 } else {
1237 sig = SIGBUS;
1238 code = BUS_ADRALN;
1239 }
1240 if (user_mode(regs))
1241 _exception(sig, regs, code, regs->dar);
1242 else
1243 bad_page_fault(regs, regs->dar, sig);
1244
1245bail:
1246 exception_exit(prev_state);
1247}
1248
1249void StackOverflow(struct pt_regs *regs)
1250{
1251 printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
1252 current, regs->gpr[1]);
1253 debugger(regs);
1254 show_regs(regs);
1255 panic("kernel stack overflow");
1256}
1257
1258void nonrecoverable_exception(struct pt_regs *regs)
1259{
1260 printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
1261 regs->nip, regs->msr);
1262 debugger(regs);
1263 die("nonrecoverable exception", regs, SIGKILL);
1264}
1265
1266void trace_syscall(struct pt_regs *regs)
1267{
1268 printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n",
1269 current, task_pid_nr(current), regs->nip, regs->link, regs->gpr[0],
1270 regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted());
1271}
1272
1273void kernel_fp_unavailable_exception(struct pt_regs *regs)
1274{
1275 enum ctx_state prev_state = exception_enter();
1276
1277 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
1278 "%lx at %lx\n", regs->trap, regs->nip);
1279 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
1280
1281 exception_exit(prev_state);
1282}
1283
1284void altivec_unavailable_exception(struct pt_regs *regs)
1285{
1286 enum ctx_state prev_state = exception_enter();
1287
1288 if (user_mode(regs)) {
1289
1290
1291 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1292 goto bail;
1293 }
1294
1295 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
1296 "%lx at %lx\n", regs->trap, regs->nip);
1297 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
1298
1299bail:
1300 exception_exit(prev_state);
1301}
1302
1303void vsx_unavailable_exception(struct pt_regs *regs)
1304{
1305 if (user_mode(regs)) {
1306
1307
1308 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1309 return;
1310 }
1311
1312 printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception "
1313 "%lx at %lx\n", regs->trap, regs->nip);
1314 die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
1315}
1316
1317#ifdef CONFIG_PPC64
1318void facility_unavailable_exception(struct pt_regs *regs)
1319{
1320 static char *facility_strings[] = {
1321 [FSCR_FP_LG] = "FPU",
1322 [FSCR_VECVSX_LG] = "VMX/VSX",
1323 [FSCR_DSCR_LG] = "DSCR",
1324 [FSCR_PM_LG] = "PMU SPRs",
1325 [FSCR_BHRB_LG] = "BHRB",
1326 [FSCR_TM_LG] = "TM",
1327 [FSCR_EBB_LG] = "EBB",
1328 [FSCR_TAR_LG] = "TAR",
1329 };
1330 char *facility = "unknown";
1331 u64 value;
1332 u8 status;
1333 bool hv;
1334
1335 hv = (regs->trap == 0xf80);
1336 if (hv)
1337 value = mfspr(SPRN_HFSCR);
1338 else
1339 value = mfspr(SPRN_FSCR);
1340
1341 status = value >> 56;
1342 if (status == FSCR_DSCR_LG) {
1343
1344
1345
1346
1347 current->thread.dscr_inherit = 1;
1348 mtspr(SPRN_FSCR, value | FSCR_DSCR);
1349 return;
1350 }
1351
1352 if ((status < ARRAY_SIZE(facility_strings)) &&
1353 facility_strings[status])
1354 facility = facility_strings[status];
1355
1356
1357 if (!arch_irq_disabled_regs(regs))
1358 local_irq_enable();
1359
1360 pr_err("%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n",
1361 hv ? "Hypervisor " : "", facility, regs->nip, regs->msr);
1362
1363 if (user_mode(regs)) {
1364 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1365 return;
1366 }
1367
1368 die("Unexpected facility unavailable exception", regs, SIGABRT);
1369}
1370#endif
1371
1372#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1373
1374extern void do_load_up_fpu(struct pt_regs *regs);
1375
1376void fp_unavailable_tm(struct pt_regs *regs)
1377{
1378
1379
1380 TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n",
1381 regs->nip, regs->msr);
1382 tm_enable();
1383
1384
1385
1386
1387
1388
1389
1390
1391 tm_reclaim(¤t->thread, current->thread.regs->msr,
1392 TM_CAUSE_FAC_UNAV);
1393
1394
1395
1396 regs->msr |= (MSR_FP | current->thread.fpexc_mode);
1397
1398
1399
1400
1401
1402 tm_recheckpoint(¤t->thread, regs->msr);
1403}
1404
1405#ifdef CONFIG_ALTIVEC
1406extern void do_load_up_altivec(struct pt_regs *regs);
1407
1408void altivec_unavailable_tm(struct pt_regs *regs)
1409{
1410
1411
1412
1413
1414 TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx,"
1415 "MSR=%lx\n",
1416 regs->nip, regs->msr);
1417 tm_enable();
1418 tm_reclaim(¤t->thread, current->thread.regs->msr,
1419 TM_CAUSE_FAC_UNAV);
1420 regs->msr |= MSR_VEC;
1421 tm_recheckpoint(¤t->thread, regs->msr);
1422 current->thread.used_vr = 1;
1423}
1424#endif
1425
1426#ifdef CONFIG_VSX
1427void vsx_unavailable_tm(struct pt_regs *regs)
1428{
1429
1430
1431
1432
1433
1434
1435
1436 TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx,"
1437 "MSR=%lx\n",
1438 regs->nip, regs->msr);
1439
1440 tm_enable();
1441
1442 tm_reclaim(¤t->thread, current->thread.regs->msr,
1443 TM_CAUSE_FAC_UNAV);
1444
1445 regs->msr |= MSR_VEC | MSR_FP | current->thread.fpexc_mode |
1446 MSR_VSX;
1447
1448 tm_recheckpoint(¤t->thread, regs->msr);
1449 current->thread.used_vsr = 1;
1450}
1451#endif
1452#endif
1453
1454void performance_monitor_exception(struct pt_regs *regs)
1455{
1456 __get_cpu_var(irq_stat).pmu_irqs++;
1457
1458 perf_irq(regs);
1459}
1460
1461#ifdef CONFIG_8xx
1462void SoftwareEmulation(struct pt_regs *regs)
1463{
1464 CHECK_FULL_REGS(regs);
1465
1466 if (!user_mode(regs)) {
1467 debugger(regs);
1468 die("Kernel Mode Software FPU Emulation", regs, SIGFPE);
1469 }
1470
1471 if (!emulate_math(regs))
1472 return;
1473
1474 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1475}
1476#endif
1477
1478#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1479static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
1480{
1481 int changed = 0;
1482
1483
1484
1485
1486 if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
1487 dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W);
1488#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1489 current->thread.dbcr2 &= ~DBCR2_DAC12MODE;
1490#endif
1491 do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT,
1492 5);
1493 changed |= 0x01;
1494 } else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) {
1495 dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W);
1496 do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, TRAP_HWBKPT,
1497 6);
1498 changed |= 0x01;
1499 } else if (debug_status & DBSR_IAC1) {
1500 current->thread.dbcr0 &= ~DBCR0_IAC1;
1501 dbcr_iac_range(current) &= ~DBCR_IAC12MODE;
1502 do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT,
1503 1);
1504 changed |= 0x01;
1505 } else if (debug_status & DBSR_IAC2) {
1506 current->thread.dbcr0 &= ~DBCR0_IAC2;
1507 do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT,
1508 2);
1509 changed |= 0x01;
1510 } else if (debug_status & DBSR_IAC3) {
1511 current->thread.dbcr0 &= ~DBCR0_IAC3;
1512 dbcr_iac_range(current) &= ~DBCR_IAC34MODE;
1513 do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT,
1514 3);
1515 changed |= 0x01;
1516 } else if (debug_status & DBSR_IAC4) {
1517 current->thread.dbcr0 &= ~DBCR0_IAC4;
1518 do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT,
1519 4);
1520 changed |= 0x01;
1521 }
1522
1523
1524
1525
1526
1527 if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0, current->thread.dbcr1))
1528 regs->msr |= MSR_DE;
1529 else
1530
1531 current->thread.dbcr0 &= ~DBCR0_IDM;
1532
1533 if (changed & 0x01)
1534 mtspr(SPRN_DBCR0, current->thread.dbcr0);
1535}
1536
1537void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
1538{
1539 current->thread.dbsr = debug_status;
1540
1541
1542
1543
1544
1545
1546 if (debug_status & DBSR_BT) {
1547 regs->msr &= ~MSR_DE;
1548
1549
1550 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT);
1551
1552 mtspr(SPRN_DBSR, DBSR_BT);
1553
1554
1555 if (user_mode(regs)) {
1556 current->thread.dbcr0 &= ~DBCR0_BT;
1557 current->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC;
1558 regs->msr |= MSR_DE;
1559 return;
1560 }
1561
1562 if (notify_die(DIE_SSTEP, "block_step", regs, 5,
1563 5, SIGTRAP) == NOTIFY_STOP) {
1564 return;
1565 }
1566 if (debugger_sstep(regs))
1567 return;
1568 } else if (debug_status & DBSR_IC) {
1569 regs->msr &= ~MSR_DE;
1570
1571
1572 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
1573
1574 mtspr(SPRN_DBSR, DBSR_IC);
1575
1576 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
1577 5, SIGTRAP) == NOTIFY_STOP) {
1578 return;
1579 }
1580
1581 if (debugger_sstep(regs))
1582 return;
1583
1584 if (user_mode(regs)) {
1585 current->thread.dbcr0 &= ~DBCR0_IC;
1586 if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0,
1587 current->thread.dbcr1))
1588 regs->msr |= MSR_DE;
1589 else
1590
1591 current->thread.dbcr0 &= ~DBCR0_IDM;
1592 }
1593
1594 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
1595 } else
1596 handle_debug(regs, debug_status);
1597}
1598#endif
1599
1600#if !defined(CONFIG_TAU_INT)
1601void TAUException(struct pt_regs *regs)
1602{
1603 printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n",
1604 regs->nip, regs->msr, regs->trap, print_tainted());
1605}
1606#endif
1607
1608#ifdef CONFIG_ALTIVEC
1609void altivec_assist_exception(struct pt_regs *regs)
1610{
1611 int err;
1612
1613 if (!user_mode(regs)) {
1614 printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
1615 " at %lx\n", regs->nip);
1616 die("Kernel VMX/Altivec assist exception", regs, SIGILL);
1617 }
1618
1619 flush_altivec_to_thread(current);
1620
1621 PPC_WARN_EMULATED(altivec, regs);
1622 err = emulate_altivec(regs);
1623 if (err == 0) {
1624 regs->nip += 4;
1625 emulate_single_step(regs);
1626 return;
1627 }
1628
1629 if (err == -EFAULT) {
1630
1631 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1632 } else {
1633
1634
1635 printk_ratelimited(KERN_ERR "Unrecognized altivec instruction "
1636 "in %s at %lx\n", current->comm, regs->nip);
1637 current->thread.vscr.u[3] |= 0x10000;
1638 }
1639}
1640#endif
1641
1642#ifdef CONFIG_VSX
1643void vsx_assist_exception(struct pt_regs *regs)
1644{
1645 if (!user_mode(regs)) {
1646 printk(KERN_EMERG "VSX assist exception in kernel mode"
1647 " at %lx\n", regs->nip);
1648 die("Kernel VSX assist exception", regs, SIGILL);
1649 }
1650
1651 flush_vsx_to_thread(current);
1652 printk(KERN_INFO "VSX assist not supported at %lx\n", regs->nip);
1653 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1654}
1655#endif
1656
1657#ifdef CONFIG_FSL_BOOKE
1658void CacheLockingException(struct pt_regs *regs, unsigned long address,
1659 unsigned long error_code)
1660{
1661
1662
1663
1664
1665 if (error_code & (ESR_DLK|ESR_ILK))
1666 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1667 return;
1668}
1669#endif
1670
1671#ifdef CONFIG_SPE
1672void SPEFloatingPointException(struct pt_regs *regs)
1673{
1674 extern int do_spe_mathemu(struct pt_regs *regs);
1675 unsigned long spefscr;
1676 int fpexc_mode;
1677 int code = 0;
1678 int err;
1679
1680 flush_spe_to_thread(current);
1681
1682 spefscr = current->thread.spefscr;
1683 fpexc_mode = current->thread.fpexc_mode;
1684
1685 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
1686 code = FPE_FLTOVF;
1687 }
1688 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
1689 code = FPE_FLTUND;
1690 }
1691 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
1692 code = FPE_FLTDIV;
1693 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
1694 code = FPE_FLTINV;
1695 }
1696 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
1697 code = FPE_FLTRES;
1698
1699 err = do_spe_mathemu(regs);
1700 if (err == 0) {
1701 regs->nip += 4;
1702 emulate_single_step(regs);
1703 return;
1704 }
1705
1706 if (err == -EFAULT) {
1707
1708 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1709 } else if (err == -EINVAL) {
1710
1711 printk(KERN_ERR "unrecognized spe instruction "
1712 "in %s at %lx\n", current->comm, regs->nip);
1713 } else {
1714 _exception(SIGFPE, regs, code, regs->nip);
1715 }
1716
1717 return;
1718}
1719
1720void SPEFloatingPointRoundException(struct pt_regs *regs)
1721{
1722 extern int speround_handler(struct pt_regs *regs);
1723 int err;
1724
1725 preempt_disable();
1726 if (regs->msr & MSR_SPE)
1727 giveup_spe(current);
1728 preempt_enable();
1729
1730 regs->nip -= 4;
1731 err = speround_handler(regs);
1732 if (err == 0) {
1733 regs->nip += 4;
1734 emulate_single_step(regs);
1735 return;
1736 }
1737
1738 if (err == -EFAULT) {
1739
1740 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1741 } else if (err == -EINVAL) {
1742
1743 printk(KERN_ERR "unrecognized spe instruction "
1744 "in %s at %lx\n", current->comm, regs->nip);
1745 } else {
1746 _exception(SIGFPE, regs, 0, regs->nip);
1747 return;
1748 }
1749}
1750#endif
1751
1752
1753
1754
1755
1756
1757
1758void unrecoverable_exception(struct pt_regs *regs)
1759{
1760 printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
1761 regs->trap, regs->nip);
1762 die("Unrecoverable exception", regs, SIGABRT);
1763}
1764
1765#if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x)
1766
1767
1768
1769
1770void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
1771{
1772
1773 mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
1774 return;
1775}
1776
1777void WatchdogException(struct pt_regs *regs)
1778{
1779 printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
1780 WatchdogHandler(regs);
1781}
1782#endif
1783
1784
1785
1786
1787
1788void kernel_bad_stack(struct pt_regs *regs)
1789{
1790 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
1791 regs->gpr[1], regs->nip);
1792 die("Bad kernel stack pointer", regs, SIGABRT);
1793}
1794
1795void __init trap_init(void)
1796{
1797}
1798
1799
1800#ifdef CONFIG_PPC_EMULATED_STATS
1801
1802#define WARN_EMULATED_SETUP(type) .type = { .name = #type }
1803
1804struct ppc_emulated ppc_emulated = {
1805#ifdef CONFIG_ALTIVEC
1806 WARN_EMULATED_SETUP(altivec),
1807#endif
1808 WARN_EMULATED_SETUP(dcba),
1809 WARN_EMULATED_SETUP(dcbz),
1810 WARN_EMULATED_SETUP(fp_pair),
1811 WARN_EMULATED_SETUP(isel),
1812 WARN_EMULATED_SETUP(mcrxr),
1813 WARN_EMULATED_SETUP(mfpvr),
1814 WARN_EMULATED_SETUP(multiple),
1815 WARN_EMULATED_SETUP(popcntb),
1816 WARN_EMULATED_SETUP(spe),
1817 WARN_EMULATED_SETUP(string),
1818 WARN_EMULATED_SETUP(unaligned),
1819#ifdef CONFIG_MATH_EMULATION
1820 WARN_EMULATED_SETUP(math),
1821#endif
1822#ifdef CONFIG_VSX
1823 WARN_EMULATED_SETUP(vsx),
1824#endif
1825#ifdef CONFIG_PPC64
1826 WARN_EMULATED_SETUP(mfdscr),
1827 WARN_EMULATED_SETUP(mtdscr),
1828#endif
1829};
1830
1831u32 ppc_warn_emulated;
1832
1833void ppc_warn_emulated_print(const char *type)
1834{
1835 pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm,
1836 type);
1837}
1838
1839static int __init ppc_warn_emulated_init(void)
1840{
1841 struct dentry *dir, *d;
1842 unsigned int i;
1843 struct ppc_emulated_entry *entries = (void *)&ppc_emulated;
1844
1845 if (!powerpc_debugfs_root)
1846 return -ENODEV;
1847
1848 dir = debugfs_create_dir("emulated_instructions",
1849 powerpc_debugfs_root);
1850 if (!dir)
1851 return -ENOMEM;
1852
1853 d = debugfs_create_u32("do_warn", S_IRUGO | S_IWUSR, dir,
1854 &ppc_warn_emulated);
1855 if (!d)
1856 goto fail;
1857
1858 for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) {
1859 d = debugfs_create_u32(entries[i].name, S_IRUGO | S_IWUSR, dir,
1860 (u32 *)&entries[i].val.counter);
1861 if (!d)
1862 goto fail;
1863 }
1864
1865 return 0;
1866
1867fail:
1868 debugfs_remove_recursive(dir);
1869 return -ENOMEM;
1870}
1871
1872device_initcall(ppc_warn_emulated_init);
1873
1874#endif
1875