1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/bitops.h>
16#include <linux/bug.h>
17#include <linux/compiler.h>
18#include <linux/context_tracking.h>
19#include <linux/cpu_pm.h>
20#include <linux/kexec.h>
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/extable.h>
25#include <linux/mm.h>
26#include <linux/sched/mm.h>
27#include <linux/sched/debug.h>
28#include <linux/smp.h>
29#include <linux/spinlock.h>
30#include <linux/kallsyms.h>
31#include <linux/bootmem.h>
32#include <linux/interrupt.h>
33#include <linux/ptrace.h>
34#include <linux/kgdb.h>
35#include <linux/kdebug.h>
36#include <linux/kprobes.h>
37#include <linux/notifier.h>
38#include <linux/kdb.h>
39#include <linux/irq.h>
40#include <linux/perf_event.h>
41
42#include <asm/addrspace.h>
43#include <asm/bootinfo.h>
44#include <asm/branch.h>
45#include <asm/break.h>
46#include <asm/cop2.h>
47#include <asm/cpu.h>
48#include <asm/cpu-type.h>
49#include <asm/dsp.h>
50#include <asm/fpu.h>
51#include <asm/fpu_emulator.h>
52#include <asm/idle.h>
53#include <asm/mips-cps.h>
54#include <asm/mips-r2-to-r6-emul.h>
55#include <asm/mipsregs.h>
56#include <asm/mipsmtregs.h>
57#include <asm/module.h>
58#include <asm/msa.h>
59#include <asm/pgtable.h>
60#include <asm/ptrace.h>
61#include <asm/sections.h>
62#include <asm/siginfo.h>
63#include <asm/tlbdebug.h>
64#include <asm/traps.h>
65#include <linux/uaccess.h>
66#include <asm/watch.h>
67#include <asm/mmu_context.h>
68#include <asm/types.h>
69#include <asm/stacktrace.h>
70#include <asm/uasm.h>
71
72extern void check_wait(void);
73extern asmlinkage void rollback_handle_int(void);
74extern asmlinkage void handle_int(void);
75extern u32 handle_tlbl[];
76extern u32 handle_tlbs[];
77extern u32 handle_tlbm[];
78extern asmlinkage void handle_adel(void);
79extern asmlinkage void handle_ades(void);
80extern asmlinkage void handle_ibe(void);
81extern asmlinkage void handle_dbe(void);
82extern asmlinkage void handle_sys(void);
83extern asmlinkage void handle_bp(void);
84extern asmlinkage void handle_ri(void);
85extern asmlinkage void handle_ri_rdhwr_tlbp(void);
86extern asmlinkage void handle_ri_rdhwr(void);
87extern asmlinkage void handle_cpu(void);
88extern asmlinkage void handle_ov(void);
89extern asmlinkage void handle_tr(void);
90extern asmlinkage void handle_msa_fpe(void);
91extern asmlinkage void handle_fpe(void);
92extern asmlinkage void handle_ftlb(void);
93extern asmlinkage void handle_msa(void);
94extern asmlinkage void handle_mdmx(void);
95extern asmlinkage void handle_watch(void);
96extern asmlinkage void handle_mt(void);
97extern asmlinkage void handle_dsp(void);
98extern asmlinkage void handle_mcheck(void);
99extern asmlinkage void handle_reserved(void);
100extern void tlb_do_page_fault_0(void);
101
102void (*board_be_init)(void);
103int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
104void (*board_nmi_handler_setup)(void);
105void (*board_ejtag_handler_setup)(void);
106void (*board_bind_eic_interrupt)(int irq, int regset);
107void (*board_ebase_setup)(void);
108void(*board_cache_error_setup)(void);
109
110static void show_raw_backtrace(unsigned long reg29)
111{
112 unsigned long *sp = (unsigned long *)(reg29 & ~3);
113 unsigned long addr;
114
115 printk("Call Trace:");
116#ifdef CONFIG_KALLSYMS
117 printk("\n");
118#endif
119 while (!kstack_end(sp)) {
120 unsigned long __user *p =
121 (unsigned long __user *)(unsigned long)sp++;
122 if (__get_user(addr, p)) {
123 printk(" (Bad stack address)");
124 break;
125 }
126 if (__kernel_text_address(addr))
127 print_ip_sym(addr);
128 }
129 printk("\n");
130}
131
132#ifdef CONFIG_KALLSYMS
133int raw_show_trace;
134static int __init set_raw_show_trace(char *str)
135{
136 raw_show_trace = 1;
137 return 1;
138}
139__setup("raw_show_trace", set_raw_show_trace);
140#endif
141
142static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
143{
144 unsigned long sp = regs->regs[29];
145 unsigned long ra = regs->regs[31];
146 unsigned long pc = regs->cp0_epc;
147
148 if (!task)
149 task = current;
150
151 if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) {
152 show_raw_backtrace(sp);
153 return;
154 }
155 printk("Call Trace:\n");
156 do {
157 print_ip_sym(pc);
158 pc = unwind_stack(task, &sp, pc, &ra);
159 } while (pc);
160 pr_cont("\n");
161}
162
163
164
165
166
167static void show_stacktrace(struct task_struct *task,
168 const struct pt_regs *regs)
169{
170 const int field = 2 * sizeof(unsigned long);
171 long stackdata;
172 int i;
173 unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
174
175 printk("Stack :");
176 i = 0;
177 while ((unsigned long) sp & (PAGE_SIZE - 1)) {
178 if (i && ((i % (64 / field)) == 0)) {
179 pr_cont("\n");
180 printk(" ");
181 }
182 if (i > 39) {
183 pr_cont(" ...");
184 break;
185 }
186
187 if (__get_user(stackdata, sp++)) {
188 pr_cont(" (Bad stack address)");
189 break;
190 }
191
192 pr_cont(" %0*lx", field, stackdata);
193 i++;
194 }
195 pr_cont("\n");
196 show_backtrace(task, regs);
197}
198
199void show_stack(struct task_struct *task, unsigned long *sp)
200{
201 struct pt_regs regs;
202 mm_segment_t old_fs = get_fs();
203
204 regs.cp0_status = KSU_KERNEL;
205 if (sp) {
206 regs.regs[29] = (unsigned long)sp;
207 regs.regs[31] = 0;
208 regs.cp0_epc = 0;
209 } else {
210 if (task && task != current) {
211 regs.regs[29] = task->thread.reg29;
212 regs.regs[31] = 0;
213 regs.cp0_epc = task->thread.reg31;
214#ifdef CONFIG_KGDB_KDB
215 } else if (atomic_read(&kgdb_active) != -1 &&
216 kdb_current_regs) {
217 memcpy(®s, kdb_current_regs, sizeof(regs));
218#endif
219 } else {
220 prepare_frametrace(®s);
221 }
222 }
223
224
225
226
227 set_fs(KERNEL_DS);
228 show_stacktrace(task, ®s);
229 set_fs(old_fs);
230}
231
232static void show_code(unsigned int __user *pc)
233{
234 long i;
235 unsigned short __user *pc16 = NULL;
236
237 printk("Code:");
238
239 if ((unsigned long)pc & 1)
240 pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
241 for(i = -3 ; i < 6 ; i++) {
242 unsigned int insn;
243 if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
244 pr_cont(" (Bad address in epc)\n");
245 break;
246 }
247 pr_cont("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
248 }
249 pr_cont("\n");
250}
251
252static void __show_regs(const struct pt_regs *regs)
253{
254 const int field = 2 * sizeof(unsigned long);
255 unsigned int cause = regs->cp0_cause;
256 unsigned int exccode;
257 int i;
258
259 show_regs_print_info(KERN_DEFAULT);
260
261
262
263
264 for (i = 0; i < 32; ) {
265 if ((i % 4) == 0)
266 printk("$%2d :", i);
267 if (i == 0)
268 pr_cont(" %0*lx", field, 0UL);
269 else if (i == 26 || i == 27)
270 pr_cont(" %*s", field, "");
271 else
272 pr_cont(" %0*lx", field, regs->regs[i]);
273
274 i++;
275 if ((i % 4) == 0)
276 pr_cont("\n");
277 }
278
279#ifdef CONFIG_CPU_HAS_SMARTMIPS
280 printk("Acx : %0*lx\n", field, regs->acx);
281#endif
282 printk("Hi : %0*lx\n", field, regs->hi);
283 printk("Lo : %0*lx\n", field, regs->lo);
284
285
286
287
288 printk("epc : %0*lx %pS\n", field, regs->cp0_epc,
289 (void *) regs->cp0_epc);
290 printk("ra : %0*lx %pS\n", field, regs->regs[31],
291 (void *) regs->regs[31]);
292
293 printk("Status: %08x ", (uint32_t) regs->cp0_status);
294
295 if (cpu_has_3kex) {
296 if (regs->cp0_status & ST0_KUO)
297 pr_cont("KUo ");
298 if (regs->cp0_status & ST0_IEO)
299 pr_cont("IEo ");
300 if (regs->cp0_status & ST0_KUP)
301 pr_cont("KUp ");
302 if (regs->cp0_status & ST0_IEP)
303 pr_cont("IEp ");
304 if (regs->cp0_status & ST0_KUC)
305 pr_cont("KUc ");
306 if (regs->cp0_status & ST0_IEC)
307 pr_cont("IEc ");
308 } else if (cpu_has_4kex) {
309 if (regs->cp0_status & ST0_KX)
310 pr_cont("KX ");
311 if (regs->cp0_status & ST0_SX)
312 pr_cont("SX ");
313 if (regs->cp0_status & ST0_UX)
314 pr_cont("UX ");
315 switch (regs->cp0_status & ST0_KSU) {
316 case KSU_USER:
317 pr_cont("USER ");
318 break;
319 case KSU_SUPERVISOR:
320 pr_cont("SUPERVISOR ");
321 break;
322 case KSU_KERNEL:
323 pr_cont("KERNEL ");
324 break;
325 default:
326 pr_cont("BAD_MODE ");
327 break;
328 }
329 if (regs->cp0_status & ST0_ERL)
330 pr_cont("ERL ");
331 if (regs->cp0_status & ST0_EXL)
332 pr_cont("EXL ");
333 if (regs->cp0_status & ST0_IE)
334 pr_cont("IE ");
335 }
336 pr_cont("\n");
337
338 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
339 printk("Cause : %08x (ExcCode %02x)\n", cause, exccode);
340
341 if (1 <= exccode && exccode <= 5)
342 printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
343
344 printk("PrId : %08x (%s)\n", read_c0_prid(),
345 cpu_name_string());
346}
347
348
349
350
351void show_regs(struct pt_regs *regs)
352{
353 __show_regs((struct pt_regs *)regs);
354}
355
356void show_registers(struct pt_regs *regs)
357{
358 const int field = 2 * sizeof(unsigned long);
359 mm_segment_t old_fs = get_fs();
360
361 __show_regs(regs);
362 print_modules();
363 printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
364 current->comm, current->pid, current_thread_info(), current,
365 field, current_thread_info()->tp_value);
366 if (cpu_has_userlocal) {
367 unsigned long tls;
368
369 tls = read_c0_userlocal();
370 if (tls != current_thread_info()->tp_value)
371 printk("*HwTLS: %0*lx\n", field, tls);
372 }
373
374 if (!user_mode(regs))
375
376 set_fs(KERNEL_DS);
377 show_stacktrace(current, regs);
378 show_code((unsigned int __user *) regs->cp0_epc);
379 printk("\n");
380 set_fs(old_fs);
381}
382
383static DEFINE_RAW_SPINLOCK(die_lock);
384
385void __noreturn die(const char *str, struct pt_regs *regs)
386{
387 static int die_counter;
388 int sig = SIGSEGV;
389
390 oops_enter();
391
392 if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
393 SIGSEGV) == NOTIFY_STOP)
394 sig = 0;
395
396 console_verbose();
397 raw_spin_lock_irq(&die_lock);
398 bust_spinlocks(1);
399
400 printk("%s[#%d]:\n", str, ++die_counter);
401 show_registers(regs);
402 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
403 raw_spin_unlock_irq(&die_lock);
404
405 oops_exit();
406
407 if (in_interrupt())
408 panic("Fatal exception in interrupt");
409
410 if (panic_on_oops)
411 panic("Fatal exception");
412
413 if (regs && kexec_should_crash(current))
414 crash_kexec(regs);
415
416 do_exit(sig);
417}
418
419extern struct exception_table_entry __start___dbe_table[];
420extern struct exception_table_entry __stop___dbe_table[];
421
422__asm__(
423" .section __dbe_table, \"a\"\n"
424" .previous \n");
425
426
427static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
428{
429 const struct exception_table_entry *e;
430
431 e = search_extable(__start___dbe_table,
432 __stop___dbe_table - __start___dbe_table, addr);
433 if (!e)
434 e = search_module_dbetables(addr);
435 return e;
436}
437
438asmlinkage void do_be(struct pt_regs *regs)
439{
440 const int field = 2 * sizeof(unsigned long);
441 const struct exception_table_entry *fixup = NULL;
442 int data = regs->cp0_cause & 4;
443 int action = MIPS_BE_FATAL;
444 enum ctx_state prev_state;
445
446 prev_state = exception_enter();
447
448 if (data && !user_mode(regs))
449 fixup = search_dbe_tables(exception_epc(regs));
450
451 if (fixup)
452 action = MIPS_BE_FIXUP;
453
454 if (board_be_handler)
455 action = board_be_handler(regs, fixup != NULL);
456 else
457 mips_cm_error_report();
458
459 switch (action) {
460 case MIPS_BE_DISCARD:
461 goto out;
462 case MIPS_BE_FIXUP:
463 if (fixup) {
464 regs->cp0_epc = fixup->nextinsn;
465 goto out;
466 }
467 break;
468 default:
469 break;
470 }
471
472
473
474
475 printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
476 data ? "Data" : "Instruction",
477 field, regs->cp0_epc, field, regs->regs[31]);
478 if (notify_die(DIE_OOPS, "bus error", regs, 0, current->thread.trap_nr,
479 SIGBUS) == NOTIFY_STOP)
480 goto out;
481
482 die_if_kernel("Oops", regs);
483 force_sig(SIGBUS, current);
484
485out:
486 exception_exit(prev_state);
487}
488
489
490
491
492
493#define OPCODE 0xfc000000
494#define BASE 0x03e00000
495#define RT 0x001f0000
496#define OFFSET 0x0000ffff
497#define LL 0xc0000000
498#define SC 0xe0000000
499#define SPEC0 0x00000000
500#define SPEC3 0x7c000000
501#define RD 0x0000f800
502#define FUNC 0x0000003f
503#define SYNC 0x0000000f
504#define RDHWR 0x0000003b
505
506
507#define MM_POOL32A_FUNC 0xfc00ffff
508#define MM_RDHWR 0x00006b3c
509#define MM_RS 0x001f0000
510#define MM_RT 0x03e00000
511
512
513
514
515
516unsigned int ll_bit;
517struct task_struct *ll_task;
518
519static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
520{
521 unsigned long value, __user *vaddr;
522 long offset;
523
524
525
526
527
528
529
530 offset = opcode & OFFSET;
531 offset <<= 16;
532 offset >>= 16;
533
534 vaddr = (unsigned long __user *)
535 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
536
537 if ((unsigned long)vaddr & 3)
538 return SIGBUS;
539 if (get_user(value, vaddr))
540 return SIGSEGV;
541
542 preempt_disable();
543
544 if (ll_task == NULL || ll_task == current) {
545 ll_bit = 1;
546 } else {
547 ll_bit = 0;
548 }
549 ll_task = current;
550
551 preempt_enable();
552
553 regs->regs[(opcode & RT) >> 16] = value;
554
555 return 0;
556}
557
558static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
559{
560 unsigned long __user *vaddr;
561 unsigned long reg;
562 long offset;
563
564
565
566
567
568
569
570 offset = opcode & OFFSET;
571 offset <<= 16;
572 offset >>= 16;
573
574 vaddr = (unsigned long __user *)
575 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
576 reg = (opcode & RT) >> 16;
577
578 if ((unsigned long)vaddr & 3)
579 return SIGBUS;
580
581 preempt_disable();
582
583 if (ll_bit == 0 || ll_task != current) {
584 regs->regs[reg] = 0;
585 preempt_enable();
586 return 0;
587 }
588
589 preempt_enable();
590
591 if (put_user(regs->regs[reg], vaddr))
592 return SIGSEGV;
593
594 regs->regs[reg] = 1;
595
596 return 0;
597}
598
599
600
601
602
603
604
605
606static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
607{
608 if ((opcode & OPCODE) == LL) {
609 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
610 1, regs, 0);
611 return simulate_ll(regs, opcode);
612 }
613 if ((opcode & OPCODE) == SC) {
614 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
615 1, regs, 0);
616 return simulate_sc(regs, opcode);
617 }
618
619 return -1;
620}
621
622
623
624
625
626static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
627{
628 struct thread_info *ti = task_thread_info(current);
629
630 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
631 1, regs, 0);
632 switch (rd) {
633 case MIPS_HWR_CPUNUM:
634 regs->regs[rt] = smp_processor_id();
635 return 0;
636 case MIPS_HWR_SYNCISTEP:
637 regs->regs[rt] = min(current_cpu_data.dcache.linesz,
638 current_cpu_data.icache.linesz);
639 return 0;
640 case MIPS_HWR_CC:
641 regs->regs[rt] = read_c0_count();
642 return 0;
643 case MIPS_HWR_CCRES:
644 switch (current_cpu_type()) {
645 case CPU_20KC:
646 case CPU_25KF:
647 regs->regs[rt] = 1;
648 break;
649 default:
650 regs->regs[rt] = 2;
651 }
652 return 0;
653 case MIPS_HWR_ULR:
654 regs->regs[rt] = ti->tp_value;
655 return 0;
656 default:
657 return -1;
658 }
659}
660
661static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
662{
663 if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
664 int rd = (opcode & RD) >> 11;
665 int rt = (opcode & RT) >> 16;
666
667 simulate_rdhwr(regs, rd, rt);
668 return 0;
669 }
670
671
672 return -1;
673}
674
675static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned int opcode)
676{
677 if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
678 int rd = (opcode & MM_RS) >> 16;
679 int rt = (opcode & MM_RT) >> 21;
680 simulate_rdhwr(regs, rd, rt);
681 return 0;
682 }
683
684
685 return -1;
686}
687
688static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
689{
690 if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
691 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
692 1, regs, 0);
693 return 0;
694 }
695
696 return -1;
697}
698
699asmlinkage void do_ov(struct pt_regs *regs)
700{
701 enum ctx_state prev_state;
702 siginfo_t info = {
703 .si_signo = SIGFPE,
704 .si_code = FPE_INTOVF,
705 .si_addr = (void __user *)regs->cp0_epc,
706 };
707
708 prev_state = exception_enter();
709 die_if_kernel("Integer overflow", regs);
710
711 force_sig_info(SIGFPE, &info, current);
712 exception_exit(prev_state);
713}
714
715
716
717
718
719
720
721void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
722 struct task_struct *tsk)
723{
724 struct siginfo si = { .si_addr = fault_addr, .si_signo = SIGFPE };
725
726 if (fcr31 & FPU_CSR_INV_X)
727 si.si_code = FPE_FLTINV;
728 else if (fcr31 & FPU_CSR_DIV_X)
729 si.si_code = FPE_FLTDIV;
730 else if (fcr31 & FPU_CSR_OVF_X)
731 si.si_code = FPE_FLTOVF;
732 else if (fcr31 & FPU_CSR_UDF_X)
733 si.si_code = FPE_FLTUND;
734 else if (fcr31 & FPU_CSR_INE_X)
735 si.si_code = FPE_FLTRES;
736
737 force_sig_info(SIGFPE, &si, tsk);
738}
739
740int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
741{
742 struct siginfo si = { 0 };
743 struct vm_area_struct *vma;
744
745 switch (sig) {
746 case 0:
747 return 0;
748
749 case SIGFPE:
750 force_fcr31_sig(fcr31, fault_addr, current);
751 return 1;
752
753 case SIGBUS:
754 si.si_addr = fault_addr;
755 si.si_signo = sig;
756 si.si_code = BUS_ADRERR;
757 force_sig_info(sig, &si, current);
758 return 1;
759
760 case SIGSEGV:
761 si.si_addr = fault_addr;
762 si.si_signo = sig;
763 down_read(¤t->mm->mmap_sem);
764 vma = find_vma(current->mm, (unsigned long)fault_addr);
765 if (vma && (vma->vm_start <= (unsigned long)fault_addr))
766 si.si_code = SEGV_ACCERR;
767 else
768 si.si_code = SEGV_MAPERR;
769 up_read(¤t->mm->mmap_sem);
770 force_sig_info(sig, &si, current);
771 return 1;
772
773 default:
774 force_sig(sig, current);
775 return 1;
776 }
777}
778
779static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
780 unsigned long old_epc, unsigned long old_ra)
781{
782 union mips_instruction inst = { .word = opcode };
783 void __user *fault_addr;
784 unsigned long fcr31;
785 int sig;
786
787
788 switch (inst.i_format.opcode) {
789 case cop1_op:
790 case cop1x_op:
791 case lwc1_op:
792 case ldc1_op:
793 case swc1_op:
794 case sdc1_op:
795 break;
796
797 default:
798 return -1;
799 }
800
801
802
803
804
805 regs->cp0_epc = old_epc;
806 regs->regs[31] = old_ra;
807
808
809 lose_fpu(1);
810
811
812 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
813 &fault_addr);
814
815
816
817
818
819 fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
820 current->thread.fpu.fcr31 &= ~fcr31;
821
822
823 own_fpu(1);
824
825
826 process_fpemu_return(sig, fault_addr, fcr31);
827
828 return 0;
829}
830
831
832
833
834asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
835{
836 enum ctx_state prev_state;
837 void __user *fault_addr;
838 int sig;
839
840 prev_state = exception_enter();
841 if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
842 SIGFPE) == NOTIFY_STOP)
843 goto out;
844
845
846 write_32bit_cp1_register(CP1_STATUS, fcr31 & ~mask_fcr31_x(fcr31));
847 local_irq_enable();
848
849 die_if_kernel("FP exception in kernel code", regs);
850
851 if (fcr31 & FPU_CSR_UNI_X) {
852
853
854
855
856
857
858
859
860
861
862
863 lose_fpu(1);
864
865
866 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
867 &fault_addr);
868
869
870
871
872
873 fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
874 current->thread.fpu.fcr31 &= ~fcr31;
875
876
877 own_fpu(1);
878 } else {
879 sig = SIGFPE;
880 fault_addr = (void __user *) regs->cp0_epc;
881 }
882
883
884 process_fpemu_return(sig, fault_addr, fcr31);
885
886out:
887 exception_exit(prev_state);
888}
889
890void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
891 const char *str)
892{
893 siginfo_t info = { 0 };
894 char b[40];
895
896#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
897 if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr,
898 SIGTRAP) == NOTIFY_STOP)
899 return;
900#endif
901
902 if (notify_die(DIE_TRAP, str, regs, code, current->thread.trap_nr,
903 SIGTRAP) == NOTIFY_STOP)
904 return;
905
906
907
908
909
910
911
912 switch (code) {
913 case BRK_OVERFLOW:
914 case BRK_DIVZERO:
915 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
916 die_if_kernel(b, regs);
917 if (code == BRK_DIVZERO)
918 info.si_code = FPE_INTDIV;
919 else
920 info.si_code = FPE_INTOVF;
921 info.si_signo = SIGFPE;
922 info.si_addr = (void __user *) regs->cp0_epc;
923 force_sig_info(SIGFPE, &info, current);
924 break;
925 case BRK_BUG:
926 die_if_kernel("Kernel bug detected", regs);
927 force_sig(SIGTRAP, current);
928 break;
929 case BRK_MEMU:
930
931
932
933
934
935
936
937
938 if (do_dsemulret(regs))
939 return;
940
941 die_if_kernel("Math emu break/trap", regs);
942 force_sig(SIGTRAP, current);
943 break;
944 default:
945 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
946 die_if_kernel(b, regs);
947 if (si_code) {
948 info.si_signo = SIGTRAP;
949 info.si_code = si_code;
950 force_sig_info(SIGTRAP, &info, current);
951 } else {
952 force_sig(SIGTRAP, current);
953 }
954 }
955}
956
957asmlinkage void do_bp(struct pt_regs *regs)
958{
959 unsigned long epc = msk_isa16_mode(exception_epc(regs));
960 unsigned int opcode, bcode;
961 enum ctx_state prev_state;
962 mm_segment_t seg;
963
964 seg = get_fs();
965 if (!user_mode(regs))
966 set_fs(KERNEL_DS);
967
968 prev_state = exception_enter();
969 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
970 if (get_isa16_mode(regs->cp0_epc)) {
971 u16 instr[2];
972
973 if (__get_user(instr[0], (u16 __user *)epc))
974 goto out_sigsegv;
975
976 if (!cpu_has_mmips) {
977
978 bcode = (instr[0] >> 5) & 0x3f;
979 } else if (mm_insn_16bit(instr[0])) {
980
981 bcode = instr[0] & 0xf;
982 } else {
983
984 if (__get_user(instr[1], (u16 __user *)(epc + 2)))
985 goto out_sigsegv;
986 opcode = (instr[0] << 16) | instr[1];
987 bcode = (opcode >> 6) & ((1 << 20) - 1);
988 }
989 } else {
990 if (__get_user(opcode, (unsigned int __user *)epc))
991 goto out_sigsegv;
992 bcode = (opcode >> 6) & ((1 << 20) - 1);
993 }
994
995
996
997
998
999
1000
1001 if (bcode >= (1 << 10))
1002 bcode = ((bcode & ((1 << 10) - 1)) << 10) | (bcode >> 10);
1003
1004
1005
1006
1007
1008 switch (bcode) {
1009 case BRK_UPROBE:
1010 if (notify_die(DIE_UPROBE, "uprobe", regs, bcode,
1011 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1012 goto out;
1013 else
1014 break;
1015 case BRK_UPROBE_XOL:
1016 if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, bcode,
1017 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1018 goto out;
1019 else
1020 break;
1021 case BRK_KPROBE_BP:
1022 if (notify_die(DIE_BREAK, "debug", regs, bcode,
1023 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1024 goto out;
1025 else
1026 break;
1027 case BRK_KPROBE_SSTEPBP:
1028 if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
1029 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1030 goto out;
1031 else
1032 break;
1033 default:
1034 break;
1035 }
1036
1037 do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break");
1038
1039out:
1040 set_fs(seg);
1041 exception_exit(prev_state);
1042 return;
1043
1044out_sigsegv:
1045 force_sig(SIGSEGV, current);
1046 goto out;
1047}
1048
1049asmlinkage void do_tr(struct pt_regs *regs)
1050{
1051 u32 opcode, tcode = 0;
1052 enum ctx_state prev_state;
1053 u16 instr[2];
1054 mm_segment_t seg;
1055 unsigned long epc = msk_isa16_mode(exception_epc(regs));
1056
1057 seg = get_fs();
1058 if (!user_mode(regs))
1059 set_fs(get_ds());
1060
1061 prev_state = exception_enter();
1062 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1063 if (get_isa16_mode(regs->cp0_epc)) {
1064 if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
1065 __get_user(instr[1], (u16 __user *)(epc + 2)))
1066 goto out_sigsegv;
1067 opcode = (instr[0] << 16) | instr[1];
1068
1069 if (!(opcode & OPCODE))
1070 tcode = (opcode >> 12) & ((1 << 4) - 1);
1071 } else {
1072 if (__get_user(opcode, (u32 __user *)epc))
1073 goto out_sigsegv;
1074
1075 if (!(opcode & OPCODE))
1076 tcode = (opcode >> 6) & ((1 << 10) - 1);
1077 }
1078
1079 do_trap_or_bp(regs, tcode, 0, "Trap");
1080
1081out:
1082 set_fs(seg);
1083 exception_exit(prev_state);
1084 return;
1085
1086out_sigsegv:
1087 force_sig(SIGSEGV, current);
1088 goto out;
1089}
1090
1091asmlinkage void do_ri(struct pt_regs *regs)
1092{
1093 unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
1094 unsigned long old_epc = regs->cp0_epc;
1095 unsigned long old31 = regs->regs[31];
1096 enum ctx_state prev_state;
1097 unsigned int opcode = 0;
1098 int status = -1;
1099
1100
1101
1102
1103
1104 if (mipsr2_emulation && cpu_has_mips_r6 &&
1105 likely(user_mode(regs)) &&
1106 likely(get_user(opcode, epc) >= 0)) {
1107 unsigned long fcr31 = 0;
1108
1109 status = mipsr2_decoder(regs, opcode, &fcr31);
1110 switch (status) {
1111 case 0:
1112 case SIGEMT:
1113 return;
1114 case SIGILL:
1115 goto no_r2_instr;
1116 default:
1117 process_fpemu_return(status,
1118 ¤t->thread.cp0_baduaddr,
1119 fcr31);
1120 return;
1121 }
1122 }
1123
1124no_r2_instr:
1125
1126 prev_state = exception_enter();
1127 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1128
1129 if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
1130 SIGILL) == NOTIFY_STOP)
1131 goto out;
1132
1133 die_if_kernel("Reserved instruction in kernel code", regs);
1134
1135 if (unlikely(compute_return_epc(regs) < 0))
1136 goto out;
1137
1138 if (!get_isa16_mode(regs->cp0_epc)) {
1139 if (unlikely(get_user(opcode, epc) < 0))
1140 status = SIGSEGV;
1141
1142 if (!cpu_has_llsc && status < 0)
1143 status = simulate_llsc(regs, opcode);
1144
1145 if (status < 0)
1146 status = simulate_rdhwr_normal(regs, opcode);
1147
1148 if (status < 0)
1149 status = simulate_sync(regs, opcode);
1150
1151 if (status < 0)
1152 status = simulate_fp(regs, opcode, old_epc, old31);
1153 } else if (cpu_has_mmips) {
1154 unsigned short mmop[2] = { 0 };
1155
1156 if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
1157 status = SIGSEGV;
1158 if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
1159 status = SIGSEGV;
1160 opcode = mmop[0];
1161 opcode = (opcode << 16) | mmop[1];
1162
1163 if (status < 0)
1164 status = simulate_rdhwr_mm(regs, opcode);
1165 }
1166
1167 if (status < 0)
1168 status = SIGILL;
1169
1170 if (unlikely(status > 0)) {
1171 regs->cp0_epc = old_epc;
1172 regs->regs[31] = old31;
1173 force_sig(status, current);
1174 }
1175
1176out:
1177 exception_exit(prev_state);
1178}
1179
1180
1181
1182
1183
1184
1185static void mt_ase_fp_affinity(void)
1186{
1187#ifdef CONFIG_MIPS_MT_FPAFF
1188 if (mt_fpemul_threshold > 0 &&
1189 ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
1190
1191
1192
1193
1194
1195 if (cpumask_intersects(¤t->cpus_allowed, &mt_fpu_cpumask)) {
1196 cpumask_t tmask;
1197
1198 current->thread.user_cpus_allowed
1199 = current->cpus_allowed;
1200 cpumask_and(&tmask, ¤t->cpus_allowed,
1201 &mt_fpu_cpumask);
1202 set_cpus_allowed_ptr(current, &tmask);
1203 set_thread_flag(TIF_FPUBOUND);
1204 }
1205 }
1206#endif
1207}
1208
1209
1210
1211
1212static RAW_NOTIFIER_HEAD(cu2_chain);
1213
1214int __ref register_cu2_notifier(struct notifier_block *nb)
1215{
1216 return raw_notifier_chain_register(&cu2_chain, nb);
1217}
1218
1219int cu2_notifier_call_chain(unsigned long val, void *v)
1220{
1221 return raw_notifier_call_chain(&cu2_chain, val, v);
1222}
1223
1224static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
1225 void *data)
1226{
1227 struct pt_regs *regs = data;
1228
1229 die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
1230 "instruction", regs);
1231 force_sig(SIGILL, current);
1232
1233 return NOTIFY_OK;
1234}
1235
1236static int wait_on_fp_mode_switch(atomic_t *p)
1237{
1238
1239
1240
1241
1242
1243
1244 schedule();
1245 return 0;
1246}
1247
1248static int enable_restore_fp_context(int msa)
1249{
1250 int err, was_fpu_owner, prior_msa;
1251
1252
1253
1254
1255
1256 wait_on_atomic_t(¤t->mm->context.fp_mode_switching,
1257 wait_on_fp_mode_switch, TASK_KILLABLE);
1258
1259 if (!used_math()) {
1260
1261 preempt_disable();
1262 err = init_fpu();
1263 if (msa && !err) {
1264 enable_msa();
1265 init_msa_upper();
1266 set_thread_flag(TIF_USEDMSA);
1267 set_thread_flag(TIF_MSA_CTX_LIVE);
1268 }
1269 preempt_enable();
1270 if (!err)
1271 set_used_math();
1272 return err;
1273 }
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302 if (!msa && !thread_msa_context_live())
1303 return own_fpu(1);
1304
1305
1306
1307
1308
1309 preempt_disable();
1310 was_fpu_owner = is_fpu_owner();
1311 err = own_fpu_inatomic(0);
1312 if (err)
1313 goto out;
1314
1315 enable_msa();
1316 write_msa_csr(current->thread.fpu.msacsr);
1317 set_thread_flag(TIF_USEDMSA);
1318
1319
1320
1321
1322
1323
1324
1325
1326 prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
1327 if (!prior_msa && was_fpu_owner) {
1328 init_msa_upper();
1329
1330 goto out;
1331 }
1332
1333 if (!prior_msa) {
1334
1335
1336
1337
1338 _restore_fp(current);
1339
1340
1341
1342
1343
1344
1345 init_msa_upper();
1346 } else {
1347
1348 restore_msa(current);
1349
1350
1351 if (!was_fpu_owner)
1352 write_32bit_cp1_register(CP1_STATUS,
1353 current->thread.fpu.fcr31);
1354 }
1355
1356out:
1357 preempt_enable();
1358
1359 return 0;
1360}
1361
1362asmlinkage void do_cpu(struct pt_regs *regs)
1363{
1364 enum ctx_state prev_state;
1365 unsigned int __user *epc;
1366 unsigned long old_epc, old31;
1367 void __user *fault_addr;
1368 unsigned int opcode;
1369 unsigned long fcr31;
1370 unsigned int cpid;
1371 int status, err;
1372 int sig;
1373
1374 prev_state = exception_enter();
1375 cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
1376
1377 if (cpid != 2)
1378 die_if_kernel("do_cpu invoked from kernel context!", regs);
1379
1380 switch (cpid) {
1381 case 0:
1382 epc = (unsigned int __user *)exception_epc(regs);
1383 old_epc = regs->cp0_epc;
1384 old31 = regs->regs[31];
1385 opcode = 0;
1386 status = -1;
1387
1388 if (unlikely(compute_return_epc(regs) < 0))
1389 break;
1390
1391 if (!get_isa16_mode(regs->cp0_epc)) {
1392 if (unlikely(get_user(opcode, epc) < 0))
1393 status = SIGSEGV;
1394
1395 if (!cpu_has_llsc && status < 0)
1396 status = simulate_llsc(regs, opcode);
1397 }
1398
1399 if (status < 0)
1400 status = SIGILL;
1401
1402 if (unlikely(status > 0)) {
1403 regs->cp0_epc = old_epc;
1404 regs->regs[31] = old31;
1405 force_sig(status, current);
1406 }
1407
1408 break;
1409
1410 case 3:
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423 if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) {
1424 force_sig(SIGILL, current);
1425 break;
1426 }
1427
1428
1429 case 1:
1430 err = enable_restore_fp_context(0);
1431
1432 if (raw_cpu_has_fpu && !err)
1433 break;
1434
1435 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0,
1436 &fault_addr);
1437
1438
1439
1440
1441
1442 fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
1443 current->thread.fpu.fcr31 &= ~fcr31;
1444
1445
1446 if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
1447 mt_ase_fp_affinity();
1448
1449 break;
1450
1451 case 2:
1452 raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
1453 break;
1454 }
1455
1456 exception_exit(prev_state);
1457}
1458
1459asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr)
1460{
1461 enum ctx_state prev_state;
1462
1463 prev_state = exception_enter();
1464 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1465 if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
1466 current->thread.trap_nr, SIGFPE) == NOTIFY_STOP)
1467 goto out;
1468
1469
1470 write_msa_csr(msacsr & ~MSA_CSR_CAUSEF);
1471 local_irq_enable();
1472
1473 die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
1474 force_sig(SIGFPE, current);
1475out:
1476 exception_exit(prev_state);
1477}
1478
1479asmlinkage void do_msa(struct pt_regs *regs)
1480{
1481 enum ctx_state prev_state;
1482 int err;
1483
1484 prev_state = exception_enter();
1485
1486 if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
1487 force_sig(SIGILL, current);
1488 goto out;
1489 }
1490
1491 die_if_kernel("do_msa invoked from kernel context!", regs);
1492
1493 err = enable_restore_fp_context(1);
1494 if (err)
1495 force_sig(SIGILL, current);
1496out:
1497 exception_exit(prev_state);
1498}
1499
1500asmlinkage void do_mdmx(struct pt_regs *regs)
1501{
1502 enum ctx_state prev_state;
1503
1504 prev_state = exception_enter();
1505 force_sig(SIGILL, current);
1506 exception_exit(prev_state);
1507}
1508
1509
1510
1511
1512asmlinkage void do_watch(struct pt_regs *regs)
1513{
1514 siginfo_t info = { .si_signo = SIGTRAP, .si_code = TRAP_HWBKPT };
1515 enum ctx_state prev_state;
1516
1517 prev_state = exception_enter();
1518
1519
1520
1521
1522 clear_c0_cause(CAUSEF_WP);
1523
1524
1525
1526
1527
1528
1529 if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
1530 mips_read_watch_registers();
1531 local_irq_enable();
1532 force_sig_info(SIGTRAP, &info, current);
1533 } else {
1534 mips_clear_watch_registers();
1535 local_irq_enable();
1536 }
1537 exception_exit(prev_state);
1538}
1539
1540asmlinkage void do_mcheck(struct pt_regs *regs)
1541{
1542 int multi_match = regs->cp0_status & ST0_TS;
1543 enum ctx_state prev_state;
1544 mm_segment_t old_fs = get_fs();
1545
1546 prev_state = exception_enter();
1547 show_regs(regs);
1548
1549 if (multi_match) {
1550 dump_tlb_regs();
1551 pr_info("\n");
1552 dump_tlb_all();
1553 }
1554
1555 if (!user_mode(regs))
1556 set_fs(KERNEL_DS);
1557
1558 show_code((unsigned int __user *) regs->cp0_epc);
1559
1560 set_fs(old_fs);
1561
1562
1563
1564
1565
1566 panic("Caught Machine Check exception - %scaused by multiple "
1567 "matching entries in the TLB.",
1568 (multi_match) ? "" : "not ");
1569}
1570
1571asmlinkage void do_mt(struct pt_regs *regs)
1572{
1573 int subcode;
1574
1575 subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
1576 >> VPECONTROL_EXCPT_SHIFT;
1577 switch (subcode) {
1578 case 0:
1579 printk(KERN_DEBUG "Thread Underflow\n");
1580 break;
1581 case 1:
1582 printk(KERN_DEBUG "Thread Overflow\n");
1583 break;
1584 case 2:
1585 printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
1586 break;
1587 case 3:
1588 printk(KERN_DEBUG "Gating Storage Exception\n");
1589 break;
1590 case 4:
1591 printk(KERN_DEBUG "YIELD Scheduler Exception\n");
1592 break;
1593 case 5:
1594 printk(KERN_DEBUG "Gating Storage Scheduler Exception\n");
1595 break;
1596 default:
1597 printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
1598 subcode);
1599 break;
1600 }
1601 die_if_kernel("MIPS MT Thread exception in kernel", regs);
1602
1603 force_sig(SIGILL, current);
1604}
1605
1606
1607asmlinkage void do_dsp(struct pt_regs *regs)
1608{
1609 if (cpu_has_dsp)
1610 panic("Unexpected DSP exception");
1611
1612 force_sig(SIGILL, current);
1613}
1614
1615asmlinkage void do_reserved(struct pt_regs *regs)
1616{
1617
1618
1619
1620
1621
1622 show_regs(regs);
1623 panic("Caught reserved exception %ld - should not happen.",
1624 (regs->cp0_cause & 0x7f) >> 2);
1625}
1626
1627static int __initdata l1parity = 1;
1628static int __init nol1parity(char *s)
1629{
1630 l1parity = 0;
1631 return 1;
1632}
1633__setup("nol1par", nol1parity);
1634static int __initdata l2parity = 1;
1635static int __init nol2parity(char *s)
1636{
1637 l2parity = 0;
1638 return 1;
1639}
1640__setup("nol2par", nol2parity);
1641
1642
1643
1644
1645
1646static inline void parity_protection_init(void)
1647{
1648#define ERRCTL_PE 0x80000000
1649#define ERRCTL_L2P 0x00800000
1650
1651 if (mips_cm_revision() >= CM_REV_CM3) {
1652 ulong gcr_ectl, cp0_ectl;
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662 l1parity &= l2parity;
1663 l2parity &= l1parity;
1664
1665
1666 cp0_ectl = read_c0_ecc();
1667 write_c0_ecc(cp0_ectl | ERRCTL_PE);
1668 back_to_back_c0_hazard();
1669 cp0_ectl = read_c0_ecc();
1670
1671
1672 gcr_ectl = read_gcr_err_control();
1673
1674 if (!(gcr_ectl & CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT) ||
1675 !(cp0_ectl & ERRCTL_PE)) {
1676
1677
1678
1679
1680 l1parity = l2parity = 0;
1681 }
1682
1683
1684 if (l1parity)
1685 cp0_ectl |= ERRCTL_PE;
1686 else
1687 cp0_ectl &= ~ERRCTL_PE;
1688 write_c0_ecc(cp0_ectl);
1689 back_to_back_c0_hazard();
1690 WARN_ON(!!(read_c0_ecc() & ERRCTL_PE) != l1parity);
1691
1692
1693 if (l2parity)
1694 gcr_ectl |= CM_GCR_ERR_CONTROL_L2_ECC_EN;
1695 else
1696 gcr_ectl &= ~CM_GCR_ERR_CONTROL_L2_ECC_EN;
1697 write_gcr_err_control(gcr_ectl);
1698 gcr_ectl = read_gcr_err_control();
1699 gcr_ectl &= CM_GCR_ERR_CONTROL_L2_ECC_EN;
1700 WARN_ON(!!gcr_ectl != l2parity);
1701
1702 pr_info("Cache parity protection %sabled\n",
1703 l1parity ? "en" : "dis");
1704 return;
1705 }
1706
1707 switch (current_cpu_type()) {
1708 case CPU_24K:
1709 case CPU_34K:
1710 case CPU_74K:
1711 case CPU_1004K:
1712 case CPU_1074K:
1713 case CPU_INTERAPTIV:
1714 case CPU_PROAPTIV:
1715 case CPU_P5600:
1716 case CPU_QEMU_GENERIC:
1717 case CPU_P6600:
1718 {
1719 unsigned long errctl;
1720 unsigned int l1parity_present, l2parity_present;
1721
1722 errctl = read_c0_ecc();
1723 errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
1724
1725
1726 write_c0_ecc(errctl | ERRCTL_PE);
1727 back_to_back_c0_hazard();
1728 l1parity_present = (read_c0_ecc() & ERRCTL_PE);
1729
1730
1731 write_c0_ecc(errctl|ERRCTL_L2P);
1732 back_to_back_c0_hazard();
1733 l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
1734
1735 if (l1parity_present && l2parity_present) {
1736 if (l1parity)
1737 errctl |= ERRCTL_PE;
1738 if (l1parity ^ l2parity)
1739 errctl |= ERRCTL_L2P;
1740 } else if (l1parity_present) {
1741 if (l1parity)
1742 errctl |= ERRCTL_PE;
1743 } else if (l2parity_present) {
1744 if (l2parity)
1745 errctl |= ERRCTL_L2P;
1746 } else {
1747
1748 }
1749
1750 printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
1751
1752 write_c0_ecc(errctl);
1753 back_to_back_c0_hazard();
1754 errctl = read_c0_ecc();
1755 printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
1756
1757 if (l1parity_present)
1758 printk(KERN_INFO "Cache parity protection %sabled\n",
1759 (errctl & ERRCTL_PE) ? "en" : "dis");
1760
1761 if (l2parity_present) {
1762 if (l1parity_present && l1parity)
1763 errctl ^= ERRCTL_L2P;
1764 printk(KERN_INFO "L2 cache parity protection %sabled\n",
1765 (errctl & ERRCTL_L2P) ? "en" : "dis");
1766 }
1767 }
1768 break;
1769
1770 case CPU_5KC:
1771 case CPU_5KE:
1772 case CPU_LOONGSON1:
1773 write_c0_ecc(0x80000000);
1774 back_to_back_c0_hazard();
1775
1776 printk(KERN_INFO "Cache parity protection %sabled\n",
1777 (read_c0_ecc() & 0x80000000) ? "en" : "dis");
1778 break;
1779 case CPU_20KC:
1780 case CPU_25KF:
1781
1782 printk(KERN_INFO "Enable cache parity protection for "
1783 "MIPS 20KC/25KF CPUs.\n");
1784 clear_c0_status(ST0_DE);
1785 break;
1786 default:
1787 break;
1788 }
1789}
1790
1791asmlinkage void cache_parity_error(void)
1792{
1793 const int field = 2 * sizeof(unsigned long);
1794 unsigned int reg_val;
1795
1796
1797 printk("Cache error exception:\n");
1798 printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1799 reg_val = read_c0_cacheerr();
1800 printk("c0_cacheerr == %08x\n", reg_val);
1801
1802 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1803 reg_val & (1<<30) ? "secondary" : "primary",
1804 reg_val & (1<<31) ? "data" : "insn");
1805 if ((cpu_has_mips_r2_r6) &&
1806 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1807 pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
1808 reg_val & (1<<29) ? "ED " : "",
1809 reg_val & (1<<28) ? "ET " : "",
1810 reg_val & (1<<27) ? "ES " : "",
1811 reg_val & (1<<26) ? "EE " : "",
1812 reg_val & (1<<25) ? "EB " : "",
1813 reg_val & (1<<24) ? "EI " : "",
1814 reg_val & (1<<23) ? "E1 " : "",
1815 reg_val & (1<<22) ? "E0 " : "");
1816 } else {
1817 pr_err("Error bits: %s%s%s%s%s%s%s\n",
1818 reg_val & (1<<29) ? "ED " : "",
1819 reg_val & (1<<28) ? "ET " : "",
1820 reg_val & (1<<26) ? "EE " : "",
1821 reg_val & (1<<25) ? "EB " : "",
1822 reg_val & (1<<24) ? "EI " : "",
1823 reg_val & (1<<23) ? "E1 " : "",
1824 reg_val & (1<<22) ? "E0 " : "");
1825 }
1826 printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
1827
1828#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1829 if (reg_val & (1<<22))
1830 printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
1831
1832 if (reg_val & (1<<23))
1833 printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
1834#endif
1835
1836 panic("Can't handle the cache error!");
1837}
1838
1839asmlinkage void do_ftlb(void)
1840{
1841 const int field = 2 * sizeof(unsigned long);
1842 unsigned int reg_val;
1843
1844
1845 if ((cpu_has_mips_r2_r6) &&
1846 (((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS) ||
1847 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_LOONGSON))) {
1848 pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
1849 read_c0_ecc());
1850 pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1851 reg_val = read_c0_cacheerr();
1852 pr_err("c0_cacheerr == %08x\n", reg_val);
1853
1854 if ((reg_val & 0xc0000000) == 0xc0000000) {
1855 pr_err("Decoded c0_cacheerr: FTLB parity error\n");
1856 } else {
1857 pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1858 reg_val & (1<<30) ? "secondary" : "primary",
1859 reg_val & (1<<31) ? "data" : "insn");
1860 }
1861 } else {
1862 pr_err("FTLB error exception\n");
1863 }
1864
1865 cache_parity_error();
1866}
1867
1868
1869
1870
1871
1872void ejtag_exception_handler(struct pt_regs *regs)
1873{
1874 const int field = 2 * sizeof(unsigned long);
1875 unsigned long depc, old_epc, old_ra;
1876 unsigned int debug;
1877
1878 printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
1879 depc = read_c0_depc();
1880 debug = read_c0_debug();
1881 printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
1882 if (debug & 0x80000000) {
1883
1884
1885
1886
1887
1888
1889 old_epc = regs->cp0_epc;
1890 old_ra = regs->regs[31];
1891 regs->cp0_epc = depc;
1892 compute_return_epc(regs);
1893 depc = regs->cp0_epc;
1894 regs->cp0_epc = old_epc;
1895 regs->regs[31] = old_ra;
1896 } else
1897 depc += 4;
1898 write_c0_depc(depc);
1899
1900#if 0
1901 printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
1902 write_c0_debug(debug | 0x100);
1903#endif
1904}
1905
1906
1907
1908
1909
1910static RAW_NOTIFIER_HEAD(nmi_chain);
1911
1912int register_nmi_notifier(struct notifier_block *nb)
1913{
1914 return raw_notifier_chain_register(&nmi_chain, nb);
1915}
1916
1917void __noreturn nmi_exception_handler(struct pt_regs *regs)
1918{
1919 char str[100];
1920
1921 nmi_enter();
1922 raw_notifier_call_chain(&nmi_chain, 0, regs);
1923 bust_spinlocks(1);
1924 snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
1925 smp_processor_id(), regs->cp0_epc);
1926 regs->cp0_epc = read_c0_errorepc();
1927 die(str, regs);
1928 nmi_exit();
1929}
1930
1931#define VECTORSPACING 0x100
1932
1933unsigned long ebase;
1934EXPORT_SYMBOL_GPL(ebase);
1935unsigned long exception_handlers[32];
1936unsigned long vi_handlers[64];
1937
1938void __init *set_except_vector(int n, void *addr)
1939{
1940 unsigned long handler = (unsigned long) addr;
1941 unsigned long old_handler;
1942
1943#ifdef CONFIG_CPU_MICROMIPS
1944
1945
1946
1947
1948
1949
1950
1951 if (!(handler & 0x1))
1952 handler |= 1;
1953#endif
1954 old_handler = xchg(&exception_handlers[n], handler);
1955
1956 if (n == 0 && cpu_has_divec) {
1957#ifdef CONFIG_CPU_MICROMIPS
1958 unsigned long jump_mask = ~((1 << 27) - 1);
1959#else
1960 unsigned long jump_mask = ~((1 << 28) - 1);
1961#endif
1962 u32 *buf = (u32 *)(ebase + 0x200);
1963 unsigned int k0 = 26;
1964 if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
1965 uasm_i_j(&buf, handler & ~jump_mask);
1966 uasm_i_nop(&buf);
1967 } else {
1968 UASM_i_LA(&buf, k0, handler);
1969 uasm_i_jr(&buf, k0);
1970 uasm_i_nop(&buf);
1971 }
1972 local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
1973 }
1974 return (void *)old_handler;
1975}
1976
1977static void do_default_vi(void)
1978{
1979 show_regs(get_irq_regs());
1980 panic("Caught unexpected vectored interrupt.");
1981}
1982
1983static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1984{
1985 unsigned long handler;
1986 unsigned long old_handler = vi_handlers[n];
1987 int srssets = current_cpu_data.srsets;
1988 u16 *h;
1989 unsigned char *b;
1990
1991 BUG_ON(!cpu_has_veic && !cpu_has_vint);
1992
1993 if (addr == NULL) {
1994 handler = (unsigned long) do_default_vi;
1995 srs = 0;
1996 } else
1997 handler = (unsigned long) addr;
1998 vi_handlers[n] = handler;
1999
2000 b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
2001
2002 if (srs >= srssets)
2003 panic("Shadow register set %d not supported", srs);
2004
2005 if (cpu_has_veic) {
2006 if (board_bind_eic_interrupt)
2007 board_bind_eic_interrupt(n, srs);
2008 } else if (cpu_has_vint) {
2009
2010 if (srssets > 1)
2011 change_c0_srsmap(0xf << n*4, srs << n*4);
2012 }
2013
2014 if (srs == 0) {
2015
2016
2017
2018
2019 extern char except_vec_vi, except_vec_vi_lui;
2020 extern char except_vec_vi_ori, except_vec_vi_end;
2021 extern char rollback_except_vec_vi;
2022 char *vec_start = using_rollback_handler() ?
2023 &rollback_except_vec_vi : &except_vec_vi;
2024#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
2025 const int lui_offset = &except_vec_vi_lui - vec_start + 2;
2026 const int ori_offset = &except_vec_vi_ori - vec_start + 2;
2027#else
2028 const int lui_offset = &except_vec_vi_lui - vec_start;
2029 const int ori_offset = &except_vec_vi_ori - vec_start;
2030#endif
2031 const int handler_len = &except_vec_vi_end - vec_start;
2032
2033 if (handler_len > VECTORSPACING) {
2034
2035
2036
2037
2038 panic("VECTORSPACING too small");
2039 }
2040
2041 set_handler(((unsigned long)b - ebase), vec_start,
2042#ifdef CONFIG_CPU_MICROMIPS
2043 (handler_len - 1));
2044#else
2045 handler_len);
2046#endif
2047 h = (u16 *)(b + lui_offset);
2048 *h = (handler >> 16) & 0xffff;
2049 h = (u16 *)(b + ori_offset);
2050 *h = (handler & 0xffff);
2051 local_flush_icache_range((unsigned long)b,
2052 (unsigned long)(b+handler_len));
2053 }
2054 else {
2055
2056
2057
2058
2059
2060 u32 insn;
2061
2062 h = (u16 *)b;
2063
2064#ifdef CONFIG_CPU_MICROMIPS
2065 insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
2066#else
2067 insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
2068#endif
2069 h[0] = (insn >> 16) & 0xffff;
2070 h[1] = insn & 0xffff;
2071 h[2] = 0;
2072 h[3] = 0;
2073 local_flush_icache_range((unsigned long)b,
2074 (unsigned long)(b+8));
2075 }
2076
2077 return (void *)old_handler;
2078}
2079
2080void *set_vi_handler(int n, vi_handler_t addr)
2081{
2082 return set_vi_srs_handler(n, addr, 0);
2083}
2084
2085extern void tlb_init(void);
2086
2087
2088
2089
2090int cp0_compare_irq;
2091EXPORT_SYMBOL_GPL(cp0_compare_irq);
2092int cp0_compare_irq_shift;
2093
2094
2095
2096
2097int cp0_perfcount_irq;
2098EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
2099
2100
2101
2102
2103int cp0_fdc_irq;
2104EXPORT_SYMBOL_GPL(cp0_fdc_irq);
2105
2106static int noulri;
2107
2108static int __init ulri_disable(char *s)
2109{
2110 pr_info("Disabling ulri\n");
2111 noulri = 1;
2112
2113 return 1;
2114}
2115__setup("noulri", ulri_disable);
2116
2117
2118static void configure_status(void)
2119{
2120
2121
2122
2123
2124
2125
2126 unsigned int status_set = ST0_CU0;
2127#ifdef CONFIG_64BIT
2128 status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
2129#endif
2130 if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
2131 status_set |= ST0_XX;
2132 if (cpu_has_dsp)
2133 status_set |= ST0_MX;
2134
2135 change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
2136 status_set);
2137}
2138
2139unsigned int hwrena;
2140EXPORT_SYMBOL_GPL(hwrena);
2141
2142
2143static void configure_hwrena(void)
2144{
2145 hwrena = cpu_hwrena_impl_bits;
2146
2147 if (cpu_has_mips_r2_r6)
2148 hwrena |= MIPS_HWRENA_CPUNUM |
2149 MIPS_HWRENA_SYNCISTEP |
2150 MIPS_HWRENA_CC |
2151 MIPS_HWRENA_CCRES;
2152
2153 if (!noulri && cpu_has_userlocal)
2154 hwrena |= MIPS_HWRENA_ULR;
2155
2156 if (hwrena)
2157 write_c0_hwrena(hwrena);
2158}
2159
2160static void configure_exception_vector(void)
2161{
2162 if (cpu_has_veic || cpu_has_vint) {
2163 unsigned long sr = set_c0_status(ST0_BEV);
2164
2165 if (cpu_has_ebase_wg) {
2166#ifdef CONFIG_64BIT
2167 write_c0_ebase_64(ebase | MIPS_EBASE_WG);
2168#else
2169 write_c0_ebase(ebase | MIPS_EBASE_WG);
2170#endif
2171 }
2172 write_c0_ebase(ebase);
2173 write_c0_status(sr);
2174
2175 change_c0_intctl(0x3e0, VECTORSPACING);
2176 }
2177 if (cpu_has_divec) {
2178 if (cpu_has_mipsmt) {
2179 unsigned int vpflags = dvpe();
2180 set_c0_cause(CAUSEF_IV);
2181 evpe(vpflags);
2182 } else
2183 set_c0_cause(CAUSEF_IV);
2184 }
2185}
2186
2187void per_cpu_trap_init(bool is_boot_cpu)
2188{
2189 unsigned int cpu = smp_processor_id();
2190
2191 configure_status();
2192 configure_hwrena();
2193
2194 configure_exception_vector();
2195
2196
2197
2198
2199
2200
2201
2202
2203 if (cpu_has_mips_r2_r6) {
2204
2205
2206
2207
2208 if (!is_boot_cpu) {
2209
2210 if (cpu_has_ebase_wg) {
2211#ifdef CONFIG_64BIT
2212 write_c0_ebase_64(ebase | MIPS_EBASE_WG);
2213#else
2214 write_c0_ebase(ebase | MIPS_EBASE_WG);
2215#endif
2216 }
2217 write_c0_ebase(ebase);
2218 }
2219
2220 cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
2221 cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
2222 cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
2223 cp0_fdc_irq = (read_c0_intctl() >> INTCTLB_IPFDC) & 7;
2224 if (!cp0_fdc_irq)
2225 cp0_fdc_irq = -1;
2226
2227 } else {
2228 cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
2229 cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
2230 cp0_perfcount_irq = -1;
2231 cp0_fdc_irq = -1;
2232 }
2233
2234 if (!cpu_data[cpu].asid_cache)
2235 cpu_data[cpu].asid_cache = asid_first_version(cpu);
2236
2237 mmgrab(&init_mm);
2238 current->active_mm = &init_mm;
2239 BUG_ON(current->mm);
2240 enter_lazy_tlb(&init_mm, current);
2241
2242
2243 if (!is_boot_cpu)
2244 cpu_cache_init();
2245 tlb_init();
2246 TLBMISS_HANDLER_SETUP();
2247}
2248
2249
2250void set_handler(unsigned long offset, void *addr, unsigned long size)
2251{
2252#ifdef CONFIG_CPU_MICROMIPS
2253 memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
2254#else
2255 memcpy((void *)(ebase + offset), addr, size);
2256#endif
2257 local_flush_icache_range(ebase + offset, ebase + offset + size);
2258}
2259
2260static const char panic_null_cerr[] =
2261 "Trying to set NULL cache error exception handler\n";
2262
2263
2264
2265
2266
2267
2268void set_uncached_handler(unsigned long offset, void *addr,
2269 unsigned long size)
2270{
2271 unsigned long uncached_ebase = CKSEG1ADDR(ebase);
2272
2273 if (!addr)
2274 panic(panic_null_cerr);
2275
2276 memcpy((void *)(uncached_ebase + offset), addr, size);
2277}
2278
2279static int __initdata rdhwr_noopt;
2280static int __init set_rdhwr_noopt(char *str)
2281{
2282 rdhwr_noopt = 1;
2283 return 1;
2284}
2285
2286__setup("rdhwr_noopt", set_rdhwr_noopt);
2287
2288void __init trap_init(void)
2289{
2290 extern char except_vec3_generic;
2291 extern char except_vec4;
2292 extern char except_vec3_r4000;
2293 unsigned long i;
2294
2295 check_wait();
2296
2297 if (cpu_has_veic || cpu_has_vint) {
2298 unsigned long size = 0x200 + VECTORSPACING*64;
2299 phys_addr_t ebase_pa;
2300
2301 ebase = (unsigned long)
2302 __alloc_bootmem(size, 1 << fls(size), 0);
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315 ebase_pa = __pa(ebase);
2316 if (!IS_ENABLED(CONFIG_EVA) && !WARN_ON(ebase_pa >= 0x20000000))
2317 ebase = CKSEG0ADDR(ebase_pa);
2318 } else {
2319 ebase = CAC_BASE;
2320
2321 if (cpu_has_mips_r2_r6) {
2322 if (cpu_has_ebase_wg) {
2323#ifdef CONFIG_64BIT
2324 ebase = (read_c0_ebase_64() & ~0xfff);
2325#else
2326 ebase = (read_c0_ebase() & ~0xfff);
2327#endif
2328 } else {
2329 ebase += (read_c0_ebase() & 0x3ffff000);
2330 }
2331 }
2332 }
2333
2334 if (cpu_has_mmips) {
2335 unsigned int config3 = read_c0_config3();
2336
2337 if (IS_ENABLED(CONFIG_CPU_MICROMIPS))
2338 write_c0_config3(config3 | MIPS_CONF3_ISA_OE);
2339 else
2340 write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE);
2341 }
2342
2343 if (board_ebase_setup)
2344 board_ebase_setup();
2345 per_cpu_trap_init(true);
2346
2347
2348
2349
2350
2351
2352 set_handler(0x180, &except_vec3_generic, 0x80);
2353
2354
2355
2356
2357 for (i = 0; i <= 31; i++)
2358 set_except_vector(i, handle_reserved);
2359
2360
2361
2362
2363
2364 if (cpu_has_ejtag && board_ejtag_handler_setup)
2365 board_ejtag_handler_setup();
2366
2367
2368
2369
2370 if (cpu_has_watch)
2371 set_except_vector(EXCCODE_WATCH, handle_watch);
2372
2373
2374
2375
2376 if (cpu_has_veic || cpu_has_vint) {
2377 int nvec = cpu_has_veic ? 64 : 8;
2378 for (i = 0; i < nvec; i++)
2379 set_vi_handler(i, NULL);
2380 }
2381 else if (cpu_has_divec)
2382 set_handler(0x200, &except_vec4, 0x8);
2383
2384
2385
2386
2387
2388 parity_protection_init();
2389
2390
2391
2392
2393
2394
2395 if (board_be_init)
2396 board_be_init();
2397
2398 set_except_vector(EXCCODE_INT, using_rollback_handler() ?
2399 rollback_handle_int : handle_int);
2400 set_except_vector(EXCCODE_MOD, handle_tlbm);
2401 set_except_vector(EXCCODE_TLBL, handle_tlbl);
2402 set_except_vector(EXCCODE_TLBS, handle_tlbs);
2403
2404 set_except_vector(EXCCODE_ADEL, handle_adel);
2405 set_except_vector(EXCCODE_ADES, handle_ades);
2406
2407 set_except_vector(EXCCODE_IBE, handle_ibe);
2408 set_except_vector(EXCCODE_DBE, handle_dbe);
2409
2410 set_except_vector(EXCCODE_SYS, handle_sys);
2411 set_except_vector(EXCCODE_BP, handle_bp);
2412
2413 if (rdhwr_noopt)
2414 set_except_vector(EXCCODE_RI, handle_ri);
2415 else {
2416 if (cpu_has_vtag_icache)
2417 set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2418 else if (current_cpu_type() == CPU_LOONGSON3)
2419 set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2420 else
2421 set_except_vector(EXCCODE_RI, handle_ri_rdhwr);
2422 }
2423
2424 set_except_vector(EXCCODE_CPU, handle_cpu);
2425 set_except_vector(EXCCODE_OV, handle_ov);
2426 set_except_vector(EXCCODE_TR, handle_tr);
2427 set_except_vector(EXCCODE_MSAFPE, handle_msa_fpe);
2428
2429 if (board_nmi_handler_setup)
2430 board_nmi_handler_setup();
2431
2432 if (cpu_has_fpu && !cpu_has_nofpuex)
2433 set_except_vector(EXCCODE_FPE, handle_fpe);
2434
2435 set_except_vector(MIPS_EXCCODE_TLBPAR, handle_ftlb);
2436
2437 if (cpu_has_rixiex) {
2438 set_except_vector(EXCCODE_TLBRI, tlb_do_page_fault_0);
2439 set_except_vector(EXCCODE_TLBXI, tlb_do_page_fault_0);
2440 }
2441
2442 set_except_vector(EXCCODE_MSADIS, handle_msa);
2443 set_except_vector(EXCCODE_MDMX, handle_mdmx);
2444
2445 if (cpu_has_mcheck)
2446 set_except_vector(EXCCODE_MCHECK, handle_mcheck);
2447
2448 if (cpu_has_mipsmt)
2449 set_except_vector(EXCCODE_THREAD, handle_mt);
2450
2451 set_except_vector(EXCCODE_DSPDIS, handle_dsp);
2452
2453 if (board_cache_error_setup)
2454 board_cache_error_setup();
2455
2456 if (cpu_has_vce)
2457
2458 set_handler(0x180, &except_vec3_r4000, 0x100);
2459 else if (cpu_has_4kex)
2460 set_handler(0x180, &except_vec3_generic, 0x80);
2461 else
2462 set_handler(0x080, &except_vec3_generic, 0x80);
2463
2464 local_flush_icache_range(ebase, ebase + 0x400);
2465
2466 sort_extable(__start___dbe_table, __stop___dbe_table);
2467
2468 cu2_notifier(default_cu2_call, 0x80000000);
2469}
2470
2471static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd,
2472 void *v)
2473{
2474 switch (cmd) {
2475 case CPU_PM_ENTER_FAILED:
2476 case CPU_PM_EXIT:
2477 configure_status();
2478 configure_hwrena();
2479 configure_exception_vector();
2480
2481
2482 TLBMISS_HANDLER_RESTORE();
2483
2484 break;
2485 }
2486
2487 return NOTIFY_OK;
2488}
2489
2490static struct notifier_block trap_pm_notifier_block = {
2491 .notifier_call = trap_pm_notifier,
2492};
2493
2494static int __init trap_pm_init(void)
2495{
2496 return cpu_pm_register_notifier(&trap_pm_notifier_block);
2497}
2498arch_initcall(trap_pm_init);
2499