1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/bitops.h>
16#include <linux/bug.h>
17#include <linux/compiler.h>
18#include <linux/context_tracking.h>
19#include <linux/cpu_pm.h>
20#include <linux/kexec.h>
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/extable.h>
25#include <linux/mm.h>
26#include <linux/sched/mm.h>
27#include <linux/sched/debug.h>
28#include <linux/smp.h>
29#include <linux/spinlock.h>
30#include <linux/kallsyms.h>
31#include <linux/bootmem.h>
32#include <linux/interrupt.h>
33#include <linux/ptrace.h>
34#include <linux/kgdb.h>
35#include <linux/kdebug.h>
36#include <linux/kprobes.h>
37#include <linux/notifier.h>
38#include <linux/kdb.h>
39#include <linux/irq.h>
40#include <linux/perf_event.h>
41
42#include <asm/addrspace.h>
43#include <asm/bootinfo.h>
44#include <asm/branch.h>
45#include <asm/break.h>
46#include <asm/cop2.h>
47#include <asm/cpu.h>
48#include <asm/cpu-type.h>
49#include <asm/dsp.h>
50#include <asm/fpu.h>
51#include <asm/fpu_emulator.h>
52#include <asm/idle.h>
53#include <asm/mips-cm.h>
54#include <asm/mips-r2-to-r6-emul.h>
55#include <asm/mips-cm.h>
56#include <asm/mipsregs.h>
57#include <asm/mipsmtregs.h>
58#include <asm/module.h>
59#include <asm/msa.h>
60#include <asm/pgtable.h>
61#include <asm/ptrace.h>
62#include <asm/sections.h>
63#include <asm/siginfo.h>
64#include <asm/tlbdebug.h>
65#include <asm/traps.h>
66#include <linux/uaccess.h>
67#include <asm/watch.h>
68#include <asm/mmu_context.h>
69#include <asm/types.h>
70#include <asm/stacktrace.h>
71#include <asm/uasm.h>
72
73extern void check_wait(void);
74extern asmlinkage void rollback_handle_int(void);
75extern asmlinkage void handle_int(void);
76extern u32 handle_tlbl[];
77extern u32 handle_tlbs[];
78extern u32 handle_tlbm[];
79extern asmlinkage void handle_adel(void);
80extern asmlinkage void handle_ades(void);
81extern asmlinkage void handle_ibe(void);
82extern asmlinkage void handle_dbe(void);
83extern asmlinkage void handle_sys(void);
84extern asmlinkage void handle_bp(void);
85extern asmlinkage void handle_ri(void);
86extern asmlinkage void handle_ri_rdhwr_tlbp(void);
87extern asmlinkage void handle_ri_rdhwr(void);
88extern asmlinkage void handle_cpu(void);
89extern asmlinkage void handle_ov(void);
90extern asmlinkage void handle_tr(void);
91extern asmlinkage void handle_msa_fpe(void);
92extern asmlinkage void handle_fpe(void);
93extern asmlinkage void handle_ftlb(void);
94extern asmlinkage void handle_msa(void);
95extern asmlinkage void handle_mdmx(void);
96extern asmlinkage void handle_watch(void);
97extern asmlinkage void handle_mt(void);
98extern asmlinkage void handle_dsp(void);
99extern asmlinkage void handle_mcheck(void);
100extern asmlinkage void handle_reserved(void);
101extern void tlb_do_page_fault_0(void);
102
103void (*board_be_init)(void);
104int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
105void (*board_nmi_handler_setup)(void);
106void (*board_ejtag_handler_setup)(void);
107void (*board_bind_eic_interrupt)(int irq, int regset);
108void (*board_ebase_setup)(void);
109void(*board_cache_error_setup)(void);
110
111static void show_raw_backtrace(unsigned long reg29)
112{
113 unsigned long *sp = (unsigned long *)(reg29 & ~3);
114 unsigned long addr;
115
116 printk("Call Trace:");
117#ifdef CONFIG_KALLSYMS
118 printk("\n");
119#endif
120 while (!kstack_end(sp)) {
121 unsigned long __user *p =
122 (unsigned long __user *)(unsigned long)sp++;
123 if (__get_user(addr, p)) {
124 printk(" (Bad stack address)");
125 break;
126 }
127 if (__kernel_text_address(addr))
128 print_ip_sym(addr);
129 }
130 printk("\n");
131}
132
133#ifdef CONFIG_KALLSYMS
134int raw_show_trace;
135static int __init set_raw_show_trace(char *str)
136{
137 raw_show_trace = 1;
138 return 1;
139}
140__setup("raw_show_trace", set_raw_show_trace);
141#endif
142
143static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
144{
145 unsigned long sp = regs->regs[29];
146 unsigned long ra = regs->regs[31];
147 unsigned long pc = regs->cp0_epc;
148
149 if (!task)
150 task = current;
151
152 if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) {
153 show_raw_backtrace(sp);
154 return;
155 }
156 printk("Call Trace:\n");
157 do {
158 print_ip_sym(pc);
159 pc = unwind_stack(task, &sp, pc, &ra);
160 } while (pc);
161 pr_cont("\n");
162}
163
164
165
166
167
168static void show_stacktrace(struct task_struct *task,
169 const struct pt_regs *regs)
170{
171 const int field = 2 * sizeof(unsigned long);
172 long stackdata;
173 int i;
174 unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
175
176 printk("Stack :");
177 i = 0;
178 while ((unsigned long) sp & (PAGE_SIZE - 1)) {
179 if (i && ((i % (64 / field)) == 0)) {
180 pr_cont("\n");
181 printk(" ");
182 }
183 if (i > 39) {
184 pr_cont(" ...");
185 break;
186 }
187
188 if (__get_user(stackdata, sp++)) {
189 pr_cont(" (Bad stack address)");
190 break;
191 }
192
193 pr_cont(" %0*lx", field, stackdata);
194 i++;
195 }
196 pr_cont("\n");
197 show_backtrace(task, regs);
198}
199
200void show_stack(struct task_struct *task, unsigned long *sp)
201{
202 struct pt_regs regs;
203 mm_segment_t old_fs = get_fs();
204
205 regs.cp0_status = KSU_KERNEL;
206 if (sp) {
207 regs.regs[29] = (unsigned long)sp;
208 regs.regs[31] = 0;
209 regs.cp0_epc = 0;
210 } else {
211 if (task && task != current) {
212 regs.regs[29] = task->thread.reg29;
213 regs.regs[31] = 0;
214 regs.cp0_epc = task->thread.reg31;
215#ifdef CONFIG_KGDB_KDB
216 } else if (atomic_read(&kgdb_active) != -1 &&
217 kdb_current_regs) {
218 memcpy(®s, kdb_current_regs, sizeof(regs));
219#endif
220 } else {
221 prepare_frametrace(®s);
222 }
223 }
224
225
226
227
228 set_fs(KERNEL_DS);
229 show_stacktrace(task, ®s);
230 set_fs(old_fs);
231}
232
233static void show_code(unsigned int __user *pc)
234{
235 long i;
236 unsigned short __user *pc16 = NULL;
237
238 printk("Code:");
239
240 if ((unsigned long)pc & 1)
241 pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
242 for(i = -3 ; i < 6 ; i++) {
243 unsigned int insn;
244 if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
245 pr_cont(" (Bad address in epc)\n");
246 break;
247 }
248 pr_cont("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
249 }
250 pr_cont("\n");
251}
252
253static void __show_regs(const struct pt_regs *regs)
254{
255 const int field = 2 * sizeof(unsigned long);
256 unsigned int cause = regs->cp0_cause;
257 unsigned int exccode;
258 int i;
259
260 show_regs_print_info(KERN_DEFAULT);
261
262
263
264
265 for (i = 0; i < 32; ) {
266 if ((i % 4) == 0)
267 printk("$%2d :", i);
268 if (i == 0)
269 pr_cont(" %0*lx", field, 0UL);
270 else if (i == 26 || i == 27)
271 pr_cont(" %*s", field, "");
272 else
273 pr_cont(" %0*lx", field, regs->regs[i]);
274
275 i++;
276 if ((i % 4) == 0)
277 pr_cont("\n");
278 }
279
280#ifdef CONFIG_CPU_HAS_SMARTMIPS
281 printk("Acx : %0*lx\n", field, regs->acx);
282#endif
283 printk("Hi : %0*lx\n", field, regs->hi);
284 printk("Lo : %0*lx\n", field, regs->lo);
285
286
287
288
289 printk("epc : %0*lx %pS\n", field, regs->cp0_epc,
290 (void *) regs->cp0_epc);
291 printk("ra : %0*lx %pS\n", field, regs->regs[31],
292 (void *) regs->regs[31]);
293
294 printk("Status: %08x ", (uint32_t) regs->cp0_status);
295
296 if (cpu_has_3kex) {
297 if (regs->cp0_status & ST0_KUO)
298 pr_cont("KUo ");
299 if (regs->cp0_status & ST0_IEO)
300 pr_cont("IEo ");
301 if (regs->cp0_status & ST0_KUP)
302 pr_cont("KUp ");
303 if (regs->cp0_status & ST0_IEP)
304 pr_cont("IEp ");
305 if (regs->cp0_status & ST0_KUC)
306 pr_cont("KUc ");
307 if (regs->cp0_status & ST0_IEC)
308 pr_cont("IEc ");
309 } else if (cpu_has_4kex) {
310 if (regs->cp0_status & ST0_KX)
311 pr_cont("KX ");
312 if (regs->cp0_status & ST0_SX)
313 pr_cont("SX ");
314 if (regs->cp0_status & ST0_UX)
315 pr_cont("UX ");
316 switch (regs->cp0_status & ST0_KSU) {
317 case KSU_USER:
318 pr_cont("USER ");
319 break;
320 case KSU_SUPERVISOR:
321 pr_cont("SUPERVISOR ");
322 break;
323 case KSU_KERNEL:
324 pr_cont("KERNEL ");
325 break;
326 default:
327 pr_cont("BAD_MODE ");
328 break;
329 }
330 if (regs->cp0_status & ST0_ERL)
331 pr_cont("ERL ");
332 if (regs->cp0_status & ST0_EXL)
333 pr_cont("EXL ");
334 if (regs->cp0_status & ST0_IE)
335 pr_cont("IE ");
336 }
337 pr_cont("\n");
338
339 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
340 printk("Cause : %08x (ExcCode %02x)\n", cause, exccode);
341
342 if (1 <= exccode && exccode <= 5)
343 printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
344
345 printk("PrId : %08x (%s)\n", read_c0_prid(),
346 cpu_name_string());
347}
348
349
350
351
352void show_regs(struct pt_regs *regs)
353{
354 __show_regs((struct pt_regs *)regs);
355}
356
357void show_registers(struct pt_regs *regs)
358{
359 const int field = 2 * sizeof(unsigned long);
360 mm_segment_t old_fs = get_fs();
361
362 __show_regs(regs);
363 print_modules();
364 printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
365 current->comm, current->pid, current_thread_info(), current,
366 field, current_thread_info()->tp_value);
367 if (cpu_has_userlocal) {
368 unsigned long tls;
369
370 tls = read_c0_userlocal();
371 if (tls != current_thread_info()->tp_value)
372 printk("*HwTLS: %0*lx\n", field, tls);
373 }
374
375 if (!user_mode(regs))
376
377 set_fs(KERNEL_DS);
378 show_stacktrace(current, regs);
379 show_code((unsigned int __user *) regs->cp0_epc);
380 printk("\n");
381 set_fs(old_fs);
382}
383
384static DEFINE_RAW_SPINLOCK(die_lock);
385
386void __noreturn die(const char *str, struct pt_regs *regs)
387{
388 static int die_counter;
389 int sig = SIGSEGV;
390
391 oops_enter();
392
393 if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
394 SIGSEGV) == NOTIFY_STOP)
395 sig = 0;
396
397 console_verbose();
398 raw_spin_lock_irq(&die_lock);
399 bust_spinlocks(1);
400
401 printk("%s[#%d]:\n", str, ++die_counter);
402 show_registers(regs);
403 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
404 raw_spin_unlock_irq(&die_lock);
405
406 oops_exit();
407
408 if (in_interrupt())
409 panic("Fatal exception in interrupt");
410
411 if (panic_on_oops)
412 panic("Fatal exception");
413
414 if (regs && kexec_should_crash(current))
415 crash_kexec(regs);
416
417 do_exit(sig);
418}
419
420extern struct exception_table_entry __start___dbe_table[];
421extern struct exception_table_entry __stop___dbe_table[];
422
423__asm__(
424" .section __dbe_table, \"a\"\n"
425" .previous \n");
426
427
428static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
429{
430 const struct exception_table_entry *e;
431
432 e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr);
433 if (!e)
434 e = search_module_dbetables(addr);
435 return e;
436}
437
438asmlinkage void do_be(struct pt_regs *regs)
439{
440 const int field = 2 * sizeof(unsigned long);
441 const struct exception_table_entry *fixup = NULL;
442 int data = regs->cp0_cause & 4;
443 int action = MIPS_BE_FATAL;
444 enum ctx_state prev_state;
445
446 prev_state = exception_enter();
447
448 if (data && !user_mode(regs))
449 fixup = search_dbe_tables(exception_epc(regs));
450
451 if (fixup)
452 action = MIPS_BE_FIXUP;
453
454 if (board_be_handler)
455 action = board_be_handler(regs, fixup != NULL);
456 else
457 mips_cm_error_report();
458
459 switch (action) {
460 case MIPS_BE_DISCARD:
461 goto out;
462 case MIPS_BE_FIXUP:
463 if (fixup) {
464 regs->cp0_epc = fixup->nextinsn;
465 goto out;
466 }
467 break;
468 default:
469 break;
470 }
471
472
473
474
475 printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
476 data ? "Data" : "Instruction",
477 field, regs->cp0_epc, field, regs->regs[31]);
478 if (notify_die(DIE_OOPS, "bus error", regs, 0, current->thread.trap_nr,
479 SIGBUS) == NOTIFY_STOP)
480 goto out;
481
482 die_if_kernel("Oops", regs);
483 force_sig(SIGBUS, current);
484
485out:
486 exception_exit(prev_state);
487}
488
489
490
491
492
493#define OPCODE 0xfc000000
494#define BASE 0x03e00000
495#define RT 0x001f0000
496#define OFFSET 0x0000ffff
497#define LL 0xc0000000
498#define SC 0xe0000000
499#define SPEC0 0x00000000
500#define SPEC3 0x7c000000
501#define RD 0x0000f800
502#define FUNC 0x0000003f
503#define SYNC 0x0000000f
504#define RDHWR 0x0000003b
505
506
507#define MM_POOL32A_FUNC 0xfc00ffff
508#define MM_RDHWR 0x00006b3c
509#define MM_RS 0x001f0000
510#define MM_RT 0x03e00000
511
512
513
514
515
516unsigned int ll_bit;
517struct task_struct *ll_task;
518
519static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
520{
521 unsigned long value, __user *vaddr;
522 long offset;
523
524
525
526
527
528
529
530 offset = opcode & OFFSET;
531 offset <<= 16;
532 offset >>= 16;
533
534 vaddr = (unsigned long __user *)
535 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
536
537 if ((unsigned long)vaddr & 3)
538 return SIGBUS;
539 if (get_user(value, vaddr))
540 return SIGSEGV;
541
542 preempt_disable();
543
544 if (ll_task == NULL || ll_task == current) {
545 ll_bit = 1;
546 } else {
547 ll_bit = 0;
548 }
549 ll_task = current;
550
551 preempt_enable();
552
553 regs->regs[(opcode & RT) >> 16] = value;
554
555 return 0;
556}
557
558static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
559{
560 unsigned long __user *vaddr;
561 unsigned long reg;
562 long offset;
563
564
565
566
567
568
569
570 offset = opcode & OFFSET;
571 offset <<= 16;
572 offset >>= 16;
573
574 vaddr = (unsigned long __user *)
575 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
576 reg = (opcode & RT) >> 16;
577
578 if ((unsigned long)vaddr & 3)
579 return SIGBUS;
580
581 preempt_disable();
582
583 if (ll_bit == 0 || ll_task != current) {
584 regs->regs[reg] = 0;
585 preempt_enable();
586 return 0;
587 }
588
589 preempt_enable();
590
591 if (put_user(regs->regs[reg], vaddr))
592 return SIGSEGV;
593
594 regs->regs[reg] = 1;
595
596 return 0;
597}
598
599
600
601
602
603
604
605
606static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
607{
608 if ((opcode & OPCODE) == LL) {
609 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
610 1, regs, 0);
611 return simulate_ll(regs, opcode);
612 }
613 if ((opcode & OPCODE) == SC) {
614 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
615 1, regs, 0);
616 return simulate_sc(regs, opcode);
617 }
618
619 return -1;
620}
621
622
623
624
625
626static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
627{
628 struct thread_info *ti = task_thread_info(current);
629
630 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
631 1, regs, 0);
632 switch (rd) {
633 case MIPS_HWR_CPUNUM:
634 regs->regs[rt] = smp_processor_id();
635 return 0;
636 case MIPS_HWR_SYNCISTEP:
637 regs->regs[rt] = min(current_cpu_data.dcache.linesz,
638 current_cpu_data.icache.linesz);
639 return 0;
640 case MIPS_HWR_CC:
641 regs->regs[rt] = read_c0_count();
642 return 0;
643 case MIPS_HWR_CCRES:
644 switch (current_cpu_type()) {
645 case CPU_20KC:
646 case CPU_25KF:
647 regs->regs[rt] = 1;
648 break;
649 default:
650 regs->regs[rt] = 2;
651 }
652 return 0;
653 case MIPS_HWR_ULR:
654 regs->regs[rt] = ti->tp_value;
655 return 0;
656 default:
657 return -1;
658 }
659}
660
661static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
662{
663 if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
664 int rd = (opcode & RD) >> 11;
665 int rt = (opcode & RT) >> 16;
666
667 simulate_rdhwr(regs, rd, rt);
668 return 0;
669 }
670
671
672 return -1;
673}
674
675static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned int opcode)
676{
677 if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
678 int rd = (opcode & MM_RS) >> 16;
679 int rt = (opcode & MM_RT) >> 21;
680 simulate_rdhwr(regs, rd, rt);
681 return 0;
682 }
683
684
685 return -1;
686}
687
688static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
689{
690 if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
691 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
692 1, regs, 0);
693 return 0;
694 }
695
696 return -1;
697}
698
699asmlinkage void do_ov(struct pt_regs *regs)
700{
701 enum ctx_state prev_state;
702 siginfo_t info = {
703 .si_signo = SIGFPE,
704 .si_code = FPE_INTOVF,
705 .si_addr = (void __user *)regs->cp0_epc,
706 };
707
708 prev_state = exception_enter();
709 die_if_kernel("Integer overflow", regs);
710
711 force_sig_info(SIGFPE, &info, current);
712 exception_exit(prev_state);
713}
714
715
716
717
718
719
720
721void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
722 struct task_struct *tsk)
723{
724 struct siginfo si = { .si_addr = fault_addr, .si_signo = SIGFPE };
725
726 if (fcr31 & FPU_CSR_INV_X)
727 si.si_code = FPE_FLTINV;
728 else if (fcr31 & FPU_CSR_DIV_X)
729 si.si_code = FPE_FLTDIV;
730 else if (fcr31 & FPU_CSR_OVF_X)
731 si.si_code = FPE_FLTOVF;
732 else if (fcr31 & FPU_CSR_UDF_X)
733 si.si_code = FPE_FLTUND;
734 else if (fcr31 & FPU_CSR_INE_X)
735 si.si_code = FPE_FLTRES;
736 else
737 si.si_code = __SI_FAULT;
738 force_sig_info(SIGFPE, &si, tsk);
739}
740
741int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
742{
743 struct siginfo si = { 0 };
744 struct vm_area_struct *vma;
745
746 switch (sig) {
747 case 0:
748 return 0;
749
750 case SIGFPE:
751 force_fcr31_sig(fcr31, fault_addr, current);
752 return 1;
753
754 case SIGBUS:
755 si.si_addr = fault_addr;
756 si.si_signo = sig;
757 si.si_code = BUS_ADRERR;
758 force_sig_info(sig, &si, current);
759 return 1;
760
761 case SIGSEGV:
762 si.si_addr = fault_addr;
763 si.si_signo = sig;
764 down_read(¤t->mm->mmap_sem);
765 vma = find_vma(current->mm, (unsigned long)fault_addr);
766 if (vma && (vma->vm_start <= (unsigned long)fault_addr))
767 si.si_code = SEGV_ACCERR;
768 else
769 si.si_code = SEGV_MAPERR;
770 up_read(¤t->mm->mmap_sem);
771 force_sig_info(sig, &si, current);
772 return 1;
773
774 default:
775 force_sig(sig, current);
776 return 1;
777 }
778}
779
780static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
781 unsigned long old_epc, unsigned long old_ra)
782{
783 union mips_instruction inst = { .word = opcode };
784 void __user *fault_addr;
785 unsigned long fcr31;
786 int sig;
787
788
789 switch (inst.i_format.opcode) {
790 case cop1_op:
791 case cop1x_op:
792 case lwc1_op:
793 case ldc1_op:
794 case swc1_op:
795 case sdc1_op:
796 break;
797
798 default:
799 return -1;
800 }
801
802
803
804
805
806 regs->cp0_epc = old_epc;
807 regs->regs[31] = old_ra;
808
809
810 lose_fpu(1);
811
812
813 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
814 &fault_addr);
815
816
817
818
819
820 fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
821 current->thread.fpu.fcr31 &= ~fcr31;
822
823
824 own_fpu(1);
825
826
827 process_fpemu_return(sig, fault_addr, fcr31);
828
829 return 0;
830}
831
832
833
834
835asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
836{
837 enum ctx_state prev_state;
838 void __user *fault_addr;
839 int sig;
840
841 prev_state = exception_enter();
842 if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
843 SIGFPE) == NOTIFY_STOP)
844 goto out;
845
846
847 write_32bit_cp1_register(CP1_STATUS, fcr31 & ~mask_fcr31_x(fcr31));
848 local_irq_enable();
849
850 die_if_kernel("FP exception in kernel code", regs);
851
852 if (fcr31 & FPU_CSR_UNI_X) {
853
854
855
856
857
858
859
860
861
862
863
864 lose_fpu(1);
865
866
867 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
868 &fault_addr);
869
870
871
872
873
874 fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
875 current->thread.fpu.fcr31 &= ~fcr31;
876
877
878 own_fpu(1);
879 } else {
880 sig = SIGFPE;
881 fault_addr = (void __user *) regs->cp0_epc;
882 }
883
884
885 process_fpemu_return(sig, fault_addr, fcr31);
886
887out:
888 exception_exit(prev_state);
889}
890
891void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
892 const char *str)
893{
894 siginfo_t info = { 0 };
895 char b[40];
896
897#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
898 if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr,
899 SIGTRAP) == NOTIFY_STOP)
900 return;
901#endif
902
903 if (notify_die(DIE_TRAP, str, regs, code, current->thread.trap_nr,
904 SIGTRAP) == NOTIFY_STOP)
905 return;
906
907
908
909
910
911
912
913 switch (code) {
914 case BRK_OVERFLOW:
915 case BRK_DIVZERO:
916 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
917 die_if_kernel(b, regs);
918 if (code == BRK_DIVZERO)
919 info.si_code = FPE_INTDIV;
920 else
921 info.si_code = FPE_INTOVF;
922 info.si_signo = SIGFPE;
923 info.si_addr = (void __user *) regs->cp0_epc;
924 force_sig_info(SIGFPE, &info, current);
925 break;
926 case BRK_BUG:
927 die_if_kernel("Kernel bug detected", regs);
928 force_sig(SIGTRAP, current);
929 break;
930 case BRK_MEMU:
931
932
933
934
935
936
937
938
939 if (do_dsemulret(regs))
940 return;
941
942 die_if_kernel("Math emu break/trap", regs);
943 force_sig(SIGTRAP, current);
944 break;
945 default:
946 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
947 die_if_kernel(b, regs);
948 if (si_code) {
949 info.si_signo = SIGTRAP;
950 info.si_code = si_code;
951 force_sig_info(SIGTRAP, &info, current);
952 } else {
953 force_sig(SIGTRAP, current);
954 }
955 }
956}
957
958asmlinkage void do_bp(struct pt_regs *regs)
959{
960 unsigned long epc = msk_isa16_mode(exception_epc(regs));
961 unsigned int opcode, bcode;
962 enum ctx_state prev_state;
963 mm_segment_t seg;
964
965 seg = get_fs();
966 if (!user_mode(regs))
967 set_fs(KERNEL_DS);
968
969 prev_state = exception_enter();
970 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
971 if (get_isa16_mode(regs->cp0_epc)) {
972 u16 instr[2];
973
974 if (__get_user(instr[0], (u16 __user *)epc))
975 goto out_sigsegv;
976
977 if (!cpu_has_mmips) {
978
979 bcode = (instr[0] >> 5) & 0x3f;
980 } else if (mm_insn_16bit(instr[0])) {
981
982 bcode = instr[0] & 0xf;
983 } else {
984
985 if (__get_user(instr[1], (u16 __user *)(epc + 2)))
986 goto out_sigsegv;
987 opcode = (instr[0] << 16) | instr[1];
988 bcode = (opcode >> 6) & ((1 << 20) - 1);
989 }
990 } else {
991 if (__get_user(opcode, (unsigned int __user *)epc))
992 goto out_sigsegv;
993 bcode = (opcode >> 6) & ((1 << 20) - 1);
994 }
995
996
997
998
999
1000
1001
1002 if (bcode >= (1 << 10))
1003 bcode = ((bcode & ((1 << 10) - 1)) << 10) | (bcode >> 10);
1004
1005
1006
1007
1008
1009 switch (bcode) {
1010 case BRK_UPROBE:
1011 if (notify_die(DIE_UPROBE, "uprobe", regs, bcode,
1012 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1013 goto out;
1014 else
1015 break;
1016 case BRK_UPROBE_XOL:
1017 if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, bcode,
1018 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1019 goto out;
1020 else
1021 break;
1022 case BRK_KPROBE_BP:
1023 if (notify_die(DIE_BREAK, "debug", regs, bcode,
1024 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1025 goto out;
1026 else
1027 break;
1028 case BRK_KPROBE_SSTEPBP:
1029 if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
1030 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1031 goto out;
1032 else
1033 break;
1034 default:
1035 break;
1036 }
1037
1038 do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break");
1039
1040out:
1041 set_fs(seg);
1042 exception_exit(prev_state);
1043 return;
1044
1045out_sigsegv:
1046 force_sig(SIGSEGV, current);
1047 goto out;
1048}
1049
1050asmlinkage void do_tr(struct pt_regs *regs)
1051{
1052 u32 opcode, tcode = 0;
1053 enum ctx_state prev_state;
1054 u16 instr[2];
1055 mm_segment_t seg;
1056 unsigned long epc = msk_isa16_mode(exception_epc(regs));
1057
1058 seg = get_fs();
1059 if (!user_mode(regs))
1060 set_fs(get_ds());
1061
1062 prev_state = exception_enter();
1063 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1064 if (get_isa16_mode(regs->cp0_epc)) {
1065 if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
1066 __get_user(instr[1], (u16 __user *)(epc + 2)))
1067 goto out_sigsegv;
1068 opcode = (instr[0] << 16) | instr[1];
1069
1070 if (!(opcode & OPCODE))
1071 tcode = (opcode >> 12) & ((1 << 4) - 1);
1072 } else {
1073 if (__get_user(opcode, (u32 __user *)epc))
1074 goto out_sigsegv;
1075
1076 if (!(opcode & OPCODE))
1077 tcode = (opcode >> 6) & ((1 << 10) - 1);
1078 }
1079
1080 do_trap_or_bp(regs, tcode, 0, "Trap");
1081
1082out:
1083 set_fs(seg);
1084 exception_exit(prev_state);
1085 return;
1086
1087out_sigsegv:
1088 force_sig(SIGSEGV, current);
1089 goto out;
1090}
1091
1092asmlinkage void do_ri(struct pt_regs *regs)
1093{
1094 unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
1095 unsigned long old_epc = regs->cp0_epc;
1096 unsigned long old31 = regs->regs[31];
1097 enum ctx_state prev_state;
1098 unsigned int opcode = 0;
1099 int status = -1;
1100
1101
1102
1103
1104
1105 if (mipsr2_emulation && cpu_has_mips_r6 &&
1106 likely(user_mode(regs)) &&
1107 likely(get_user(opcode, epc) >= 0)) {
1108 unsigned long fcr31 = 0;
1109
1110 status = mipsr2_decoder(regs, opcode, &fcr31);
1111 switch (status) {
1112 case 0:
1113 case SIGEMT:
1114 return;
1115 case SIGILL:
1116 goto no_r2_instr;
1117 default:
1118 process_fpemu_return(status,
1119 ¤t->thread.cp0_baduaddr,
1120 fcr31);
1121 return;
1122 }
1123 }
1124
1125no_r2_instr:
1126
1127 prev_state = exception_enter();
1128 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1129
1130 if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
1131 SIGILL) == NOTIFY_STOP)
1132 goto out;
1133
1134 die_if_kernel("Reserved instruction in kernel code", regs);
1135
1136 if (unlikely(compute_return_epc(regs) < 0))
1137 goto out;
1138
1139 if (!get_isa16_mode(regs->cp0_epc)) {
1140 if (unlikely(get_user(opcode, epc) < 0))
1141 status = SIGSEGV;
1142
1143 if (!cpu_has_llsc && status < 0)
1144 status = simulate_llsc(regs, opcode);
1145
1146 if (status < 0)
1147 status = simulate_rdhwr_normal(regs, opcode);
1148
1149 if (status < 0)
1150 status = simulate_sync(regs, opcode);
1151
1152 if (status < 0)
1153 status = simulate_fp(regs, opcode, old_epc, old31);
1154 } else if (cpu_has_mmips) {
1155 unsigned short mmop[2] = { 0 };
1156
1157 if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
1158 status = SIGSEGV;
1159 if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
1160 status = SIGSEGV;
1161 opcode = mmop[0];
1162 opcode = (opcode << 16) | mmop[1];
1163
1164 if (status < 0)
1165 status = simulate_rdhwr_mm(regs, opcode);
1166 }
1167
1168 if (status < 0)
1169 status = SIGILL;
1170
1171 if (unlikely(status > 0)) {
1172 regs->cp0_epc = old_epc;
1173 regs->regs[31] = old31;
1174 force_sig(status, current);
1175 }
1176
1177out:
1178 exception_exit(prev_state);
1179}
1180
1181
1182
1183
1184
1185
1186static void mt_ase_fp_affinity(void)
1187{
1188#ifdef CONFIG_MIPS_MT_FPAFF
1189 if (mt_fpemul_threshold > 0 &&
1190 ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
1191
1192
1193
1194
1195
1196 if (cpumask_intersects(¤t->cpus_allowed, &mt_fpu_cpumask)) {
1197 cpumask_t tmask;
1198
1199 current->thread.user_cpus_allowed
1200 = current->cpus_allowed;
1201 cpumask_and(&tmask, ¤t->cpus_allowed,
1202 &mt_fpu_cpumask);
1203 set_cpus_allowed_ptr(current, &tmask);
1204 set_thread_flag(TIF_FPUBOUND);
1205 }
1206 }
1207#endif
1208}
1209
1210
1211
1212
1213static RAW_NOTIFIER_HEAD(cu2_chain);
1214
1215int __ref register_cu2_notifier(struct notifier_block *nb)
1216{
1217 return raw_notifier_chain_register(&cu2_chain, nb);
1218}
1219
1220int cu2_notifier_call_chain(unsigned long val, void *v)
1221{
1222 return raw_notifier_call_chain(&cu2_chain, val, v);
1223}
1224
1225static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
1226 void *data)
1227{
1228 struct pt_regs *regs = data;
1229
1230 die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
1231 "instruction", regs);
1232 force_sig(SIGILL, current);
1233
1234 return NOTIFY_OK;
1235}
1236
1237static int wait_on_fp_mode_switch(atomic_t *p)
1238{
1239
1240
1241
1242
1243
1244
1245 schedule();
1246 return 0;
1247}
1248
1249static int enable_restore_fp_context(int msa)
1250{
1251 int err, was_fpu_owner, prior_msa;
1252
1253
1254
1255
1256
1257 wait_on_atomic_t(¤t->mm->context.fp_mode_switching,
1258 wait_on_fp_mode_switch, TASK_KILLABLE);
1259
1260 if (!used_math()) {
1261
1262 preempt_disable();
1263 err = init_fpu();
1264 if (msa && !err) {
1265 enable_msa();
1266 init_msa_upper();
1267 set_thread_flag(TIF_USEDMSA);
1268 set_thread_flag(TIF_MSA_CTX_LIVE);
1269 }
1270 preempt_enable();
1271 if (!err)
1272 set_used_math();
1273 return err;
1274 }
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303 if (!msa && !thread_msa_context_live())
1304 return own_fpu(1);
1305
1306
1307
1308
1309
1310 preempt_disable();
1311 was_fpu_owner = is_fpu_owner();
1312 err = own_fpu_inatomic(0);
1313 if (err)
1314 goto out;
1315
1316 enable_msa();
1317 write_msa_csr(current->thread.fpu.msacsr);
1318 set_thread_flag(TIF_USEDMSA);
1319
1320
1321
1322
1323
1324
1325
1326
1327 prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
1328 if (!prior_msa && was_fpu_owner) {
1329 init_msa_upper();
1330
1331 goto out;
1332 }
1333
1334 if (!prior_msa) {
1335
1336
1337
1338
1339 _restore_fp(current);
1340
1341
1342
1343
1344
1345
1346 init_msa_upper();
1347 } else {
1348
1349 restore_msa(current);
1350
1351
1352 if (!was_fpu_owner)
1353 write_32bit_cp1_register(CP1_STATUS,
1354 current->thread.fpu.fcr31);
1355 }
1356
1357out:
1358 preempt_enable();
1359
1360 return 0;
1361}
1362
1363asmlinkage void do_cpu(struct pt_regs *regs)
1364{
1365 enum ctx_state prev_state;
1366 unsigned int __user *epc;
1367 unsigned long old_epc, old31;
1368 void __user *fault_addr;
1369 unsigned int opcode;
1370 unsigned long fcr31;
1371 unsigned int cpid;
1372 int status, err;
1373 int sig;
1374
1375 prev_state = exception_enter();
1376 cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
1377
1378 if (cpid != 2)
1379 die_if_kernel("do_cpu invoked from kernel context!", regs);
1380
1381 switch (cpid) {
1382 case 0:
1383 epc = (unsigned int __user *)exception_epc(regs);
1384 old_epc = regs->cp0_epc;
1385 old31 = regs->regs[31];
1386 opcode = 0;
1387 status = -1;
1388
1389 if (unlikely(compute_return_epc(regs) < 0))
1390 break;
1391
1392 if (!get_isa16_mode(regs->cp0_epc)) {
1393 if (unlikely(get_user(opcode, epc) < 0))
1394 status = SIGSEGV;
1395
1396 if (!cpu_has_llsc && status < 0)
1397 status = simulate_llsc(regs, opcode);
1398 }
1399
1400 if (status < 0)
1401 status = SIGILL;
1402
1403 if (unlikely(status > 0)) {
1404 regs->cp0_epc = old_epc;
1405 regs->regs[31] = old31;
1406 force_sig(status, current);
1407 }
1408
1409 break;
1410
1411 case 3:
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424 if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) {
1425 force_sig(SIGILL, current);
1426 break;
1427 }
1428
1429
1430 case 1:
1431 err = enable_restore_fp_context(0);
1432
1433 if (raw_cpu_has_fpu && !err)
1434 break;
1435
1436 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0,
1437 &fault_addr);
1438
1439
1440
1441
1442
1443 fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
1444 current->thread.fpu.fcr31 &= ~fcr31;
1445
1446
1447 if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
1448 mt_ase_fp_affinity();
1449
1450 break;
1451
1452 case 2:
1453 raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
1454 break;
1455 }
1456
1457 exception_exit(prev_state);
1458}
1459
1460asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr)
1461{
1462 enum ctx_state prev_state;
1463
1464 prev_state = exception_enter();
1465 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1466 if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
1467 current->thread.trap_nr, SIGFPE) == NOTIFY_STOP)
1468 goto out;
1469
1470
1471 write_msa_csr(msacsr & ~MSA_CSR_CAUSEF);
1472 local_irq_enable();
1473
1474 die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
1475 force_sig(SIGFPE, current);
1476out:
1477 exception_exit(prev_state);
1478}
1479
1480asmlinkage void do_msa(struct pt_regs *regs)
1481{
1482 enum ctx_state prev_state;
1483 int err;
1484
1485 prev_state = exception_enter();
1486
1487 if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
1488 force_sig(SIGILL, current);
1489 goto out;
1490 }
1491
1492 die_if_kernel("do_msa invoked from kernel context!", regs);
1493
1494 err = enable_restore_fp_context(1);
1495 if (err)
1496 force_sig(SIGILL, current);
1497out:
1498 exception_exit(prev_state);
1499}
1500
1501asmlinkage void do_mdmx(struct pt_regs *regs)
1502{
1503 enum ctx_state prev_state;
1504
1505 prev_state = exception_enter();
1506 force_sig(SIGILL, current);
1507 exception_exit(prev_state);
1508}
1509
1510
1511
1512
1513asmlinkage void do_watch(struct pt_regs *regs)
1514{
1515 siginfo_t info = { .si_signo = SIGTRAP, .si_code = TRAP_HWBKPT };
1516 enum ctx_state prev_state;
1517
1518 prev_state = exception_enter();
1519
1520
1521
1522
1523 clear_c0_cause(CAUSEF_WP);
1524
1525
1526
1527
1528
1529
1530 if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
1531 mips_read_watch_registers();
1532 local_irq_enable();
1533 force_sig_info(SIGTRAP, &info, current);
1534 } else {
1535 mips_clear_watch_registers();
1536 local_irq_enable();
1537 }
1538 exception_exit(prev_state);
1539}
1540
1541asmlinkage void do_mcheck(struct pt_regs *regs)
1542{
1543 int multi_match = regs->cp0_status & ST0_TS;
1544 enum ctx_state prev_state;
1545 mm_segment_t old_fs = get_fs();
1546
1547 prev_state = exception_enter();
1548 show_regs(regs);
1549
1550 if (multi_match) {
1551 dump_tlb_regs();
1552 pr_info("\n");
1553 dump_tlb_all();
1554 }
1555
1556 if (!user_mode(regs))
1557 set_fs(KERNEL_DS);
1558
1559 show_code((unsigned int __user *) regs->cp0_epc);
1560
1561 set_fs(old_fs);
1562
1563
1564
1565
1566
1567 panic("Caught Machine Check exception - %scaused by multiple "
1568 "matching entries in the TLB.",
1569 (multi_match) ? "" : "not ");
1570}
1571
1572asmlinkage void do_mt(struct pt_regs *regs)
1573{
1574 int subcode;
1575
1576 subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
1577 >> VPECONTROL_EXCPT_SHIFT;
1578 switch (subcode) {
1579 case 0:
1580 printk(KERN_DEBUG "Thread Underflow\n");
1581 break;
1582 case 1:
1583 printk(KERN_DEBUG "Thread Overflow\n");
1584 break;
1585 case 2:
1586 printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
1587 break;
1588 case 3:
1589 printk(KERN_DEBUG "Gating Storage Exception\n");
1590 break;
1591 case 4:
1592 printk(KERN_DEBUG "YIELD Scheduler Exception\n");
1593 break;
1594 case 5:
1595 printk(KERN_DEBUG "Gating Storage Scheduler Exception\n");
1596 break;
1597 default:
1598 printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
1599 subcode);
1600 break;
1601 }
1602 die_if_kernel("MIPS MT Thread exception in kernel", regs);
1603
1604 force_sig(SIGILL, current);
1605}
1606
1607
1608asmlinkage void do_dsp(struct pt_regs *regs)
1609{
1610 if (cpu_has_dsp)
1611 panic("Unexpected DSP exception");
1612
1613 force_sig(SIGILL, current);
1614}
1615
1616asmlinkage void do_reserved(struct pt_regs *regs)
1617{
1618
1619
1620
1621
1622
1623 show_regs(regs);
1624 panic("Caught reserved exception %ld - should not happen.",
1625 (regs->cp0_cause & 0x7f) >> 2);
1626}
1627
1628static int __initdata l1parity = 1;
1629static int __init nol1parity(char *s)
1630{
1631 l1parity = 0;
1632 return 1;
1633}
1634__setup("nol1par", nol1parity);
1635static int __initdata l2parity = 1;
1636static int __init nol2parity(char *s)
1637{
1638 l2parity = 0;
1639 return 1;
1640}
1641__setup("nol2par", nol2parity);
1642
1643
1644
1645
1646
1647static inline void parity_protection_init(void)
1648{
1649#define ERRCTL_PE 0x80000000
1650#define ERRCTL_L2P 0x00800000
1651
1652 if (mips_cm_revision() >= CM_REV_CM3) {
1653 ulong gcr_ectl, cp0_ectl;
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663 l1parity &= l2parity;
1664 l2parity &= l1parity;
1665
1666
1667 cp0_ectl = read_c0_ecc();
1668 write_c0_ecc(cp0_ectl | ERRCTL_PE);
1669 back_to_back_c0_hazard();
1670 cp0_ectl = read_c0_ecc();
1671
1672
1673 gcr_ectl = read_gcr_err_control();
1674
1675 if (!(gcr_ectl & CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT_MSK) ||
1676 !(cp0_ectl & ERRCTL_PE)) {
1677
1678
1679
1680
1681 l1parity = l2parity = 0;
1682 }
1683
1684
1685 if (l1parity)
1686 cp0_ectl |= ERRCTL_PE;
1687 else
1688 cp0_ectl &= ~ERRCTL_PE;
1689 write_c0_ecc(cp0_ectl);
1690 back_to_back_c0_hazard();
1691 WARN_ON(!!(read_c0_ecc() & ERRCTL_PE) != l1parity);
1692
1693
1694 if (l2parity)
1695 gcr_ectl |= CM_GCR_ERR_CONTROL_L2_ECC_EN_MSK;
1696 else
1697 gcr_ectl &= ~CM_GCR_ERR_CONTROL_L2_ECC_EN_MSK;
1698 write_gcr_err_control(gcr_ectl);
1699 gcr_ectl = read_gcr_err_control();
1700 gcr_ectl &= CM_GCR_ERR_CONTROL_L2_ECC_EN_MSK;
1701 WARN_ON(!!gcr_ectl != l2parity);
1702
1703 pr_info("Cache parity protection %sabled\n",
1704 l1parity ? "en" : "dis");
1705 return;
1706 }
1707
1708 switch (current_cpu_type()) {
1709 case CPU_24K:
1710 case CPU_34K:
1711 case CPU_74K:
1712 case CPU_1004K:
1713 case CPU_1074K:
1714 case CPU_INTERAPTIV:
1715 case CPU_PROAPTIV:
1716 case CPU_P5600:
1717 case CPU_QEMU_GENERIC:
1718 case CPU_P6600:
1719 {
1720 unsigned long errctl;
1721 unsigned int l1parity_present, l2parity_present;
1722
1723 errctl = read_c0_ecc();
1724 errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
1725
1726
1727 write_c0_ecc(errctl | ERRCTL_PE);
1728 back_to_back_c0_hazard();
1729 l1parity_present = (read_c0_ecc() & ERRCTL_PE);
1730
1731
1732 write_c0_ecc(errctl|ERRCTL_L2P);
1733 back_to_back_c0_hazard();
1734 l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
1735
1736 if (l1parity_present && l2parity_present) {
1737 if (l1parity)
1738 errctl |= ERRCTL_PE;
1739 if (l1parity ^ l2parity)
1740 errctl |= ERRCTL_L2P;
1741 } else if (l1parity_present) {
1742 if (l1parity)
1743 errctl |= ERRCTL_PE;
1744 } else if (l2parity_present) {
1745 if (l2parity)
1746 errctl |= ERRCTL_L2P;
1747 } else {
1748
1749 }
1750
1751 printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
1752
1753 write_c0_ecc(errctl);
1754 back_to_back_c0_hazard();
1755 errctl = read_c0_ecc();
1756 printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
1757
1758 if (l1parity_present)
1759 printk(KERN_INFO "Cache parity protection %sabled\n",
1760 (errctl & ERRCTL_PE) ? "en" : "dis");
1761
1762 if (l2parity_present) {
1763 if (l1parity_present && l1parity)
1764 errctl ^= ERRCTL_L2P;
1765 printk(KERN_INFO "L2 cache parity protection %sabled\n",
1766 (errctl & ERRCTL_L2P) ? "en" : "dis");
1767 }
1768 }
1769 break;
1770
1771 case CPU_5KC:
1772 case CPU_5KE:
1773 case CPU_LOONGSON1:
1774 write_c0_ecc(0x80000000);
1775 back_to_back_c0_hazard();
1776
1777 printk(KERN_INFO "Cache parity protection %sabled\n",
1778 (read_c0_ecc() & 0x80000000) ? "en" : "dis");
1779 break;
1780 case CPU_20KC:
1781 case CPU_25KF:
1782
1783 printk(KERN_INFO "Enable cache parity protection for "
1784 "MIPS 20KC/25KF CPUs.\n");
1785 clear_c0_status(ST0_DE);
1786 break;
1787 default:
1788 break;
1789 }
1790}
1791
1792asmlinkage void cache_parity_error(void)
1793{
1794 const int field = 2 * sizeof(unsigned long);
1795 unsigned int reg_val;
1796
1797
1798 printk("Cache error exception:\n");
1799 printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1800 reg_val = read_c0_cacheerr();
1801 printk("c0_cacheerr == %08x\n", reg_val);
1802
1803 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1804 reg_val & (1<<30) ? "secondary" : "primary",
1805 reg_val & (1<<31) ? "data" : "insn");
1806 if ((cpu_has_mips_r2_r6) &&
1807 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1808 pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
1809 reg_val & (1<<29) ? "ED " : "",
1810 reg_val & (1<<28) ? "ET " : "",
1811 reg_val & (1<<27) ? "ES " : "",
1812 reg_val & (1<<26) ? "EE " : "",
1813 reg_val & (1<<25) ? "EB " : "",
1814 reg_val & (1<<24) ? "EI " : "",
1815 reg_val & (1<<23) ? "E1 " : "",
1816 reg_val & (1<<22) ? "E0 " : "");
1817 } else {
1818 pr_err("Error bits: %s%s%s%s%s%s%s\n",
1819 reg_val & (1<<29) ? "ED " : "",
1820 reg_val & (1<<28) ? "ET " : "",
1821 reg_val & (1<<26) ? "EE " : "",
1822 reg_val & (1<<25) ? "EB " : "",
1823 reg_val & (1<<24) ? "EI " : "",
1824 reg_val & (1<<23) ? "E1 " : "",
1825 reg_val & (1<<22) ? "E0 " : "");
1826 }
1827 printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
1828
1829#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1830 if (reg_val & (1<<22))
1831 printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
1832
1833 if (reg_val & (1<<23))
1834 printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
1835#endif
1836
1837 panic("Can't handle the cache error!");
1838}
1839
1840asmlinkage void do_ftlb(void)
1841{
1842 const int field = 2 * sizeof(unsigned long);
1843 unsigned int reg_val;
1844
1845
1846 if ((cpu_has_mips_r2_r6) &&
1847 (((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS) ||
1848 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_LOONGSON))) {
1849 pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
1850 read_c0_ecc());
1851 pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1852 reg_val = read_c0_cacheerr();
1853 pr_err("c0_cacheerr == %08x\n", reg_val);
1854
1855 if ((reg_val & 0xc0000000) == 0xc0000000) {
1856 pr_err("Decoded c0_cacheerr: FTLB parity error\n");
1857 } else {
1858 pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1859 reg_val & (1<<30) ? "secondary" : "primary",
1860 reg_val & (1<<31) ? "data" : "insn");
1861 }
1862 } else {
1863 pr_err("FTLB error exception\n");
1864 }
1865
1866 cache_parity_error();
1867}
1868
1869
1870
1871
1872
1873void ejtag_exception_handler(struct pt_regs *regs)
1874{
1875 const int field = 2 * sizeof(unsigned long);
1876 unsigned long depc, old_epc, old_ra;
1877 unsigned int debug;
1878
1879 printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
1880 depc = read_c0_depc();
1881 debug = read_c0_debug();
1882 printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
1883 if (debug & 0x80000000) {
1884
1885
1886
1887
1888
1889
1890 old_epc = regs->cp0_epc;
1891 old_ra = regs->regs[31];
1892 regs->cp0_epc = depc;
1893 compute_return_epc(regs);
1894 depc = regs->cp0_epc;
1895 regs->cp0_epc = old_epc;
1896 regs->regs[31] = old_ra;
1897 } else
1898 depc += 4;
1899 write_c0_depc(depc);
1900
1901#if 0
1902 printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
1903 write_c0_debug(debug | 0x100);
1904#endif
1905}
1906
1907
1908
1909
1910
1911static RAW_NOTIFIER_HEAD(nmi_chain);
1912
1913int register_nmi_notifier(struct notifier_block *nb)
1914{
1915 return raw_notifier_chain_register(&nmi_chain, nb);
1916}
1917
1918void __noreturn nmi_exception_handler(struct pt_regs *regs)
1919{
1920 char str[100];
1921
1922 nmi_enter();
1923 raw_notifier_call_chain(&nmi_chain, 0, regs);
1924 bust_spinlocks(1);
1925 snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
1926 smp_processor_id(), regs->cp0_epc);
1927 regs->cp0_epc = read_c0_errorepc();
1928 die(str, regs);
1929 nmi_exit();
1930}
1931
1932#define VECTORSPACING 0x100
1933
1934unsigned long ebase;
1935EXPORT_SYMBOL_GPL(ebase);
1936unsigned long exception_handlers[32];
1937unsigned long vi_handlers[64];
1938
1939void __init *set_except_vector(int n, void *addr)
1940{
1941 unsigned long handler = (unsigned long) addr;
1942 unsigned long old_handler;
1943
1944#ifdef CONFIG_CPU_MICROMIPS
1945
1946
1947
1948
1949
1950
1951
1952 if (!(handler & 0x1))
1953 handler |= 1;
1954#endif
1955 old_handler = xchg(&exception_handlers[n], handler);
1956
1957 if (n == 0 && cpu_has_divec) {
1958#ifdef CONFIG_CPU_MICROMIPS
1959 unsigned long jump_mask = ~((1 << 27) - 1);
1960#else
1961 unsigned long jump_mask = ~((1 << 28) - 1);
1962#endif
1963 u32 *buf = (u32 *)(ebase + 0x200);
1964 unsigned int k0 = 26;
1965 if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
1966 uasm_i_j(&buf, handler & ~jump_mask);
1967 uasm_i_nop(&buf);
1968 } else {
1969 UASM_i_LA(&buf, k0, handler);
1970 uasm_i_jr(&buf, k0);
1971 uasm_i_nop(&buf);
1972 }
1973 local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
1974 }
1975 return (void *)old_handler;
1976}
1977
1978static void do_default_vi(void)
1979{
1980 show_regs(get_irq_regs());
1981 panic("Caught unexpected vectored interrupt.");
1982}
1983
1984static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1985{
1986 unsigned long handler;
1987 unsigned long old_handler = vi_handlers[n];
1988 int srssets = current_cpu_data.srsets;
1989 u16 *h;
1990 unsigned char *b;
1991
1992 BUG_ON(!cpu_has_veic && !cpu_has_vint);
1993
1994 if (addr == NULL) {
1995 handler = (unsigned long) do_default_vi;
1996 srs = 0;
1997 } else
1998 handler = (unsigned long) addr;
1999 vi_handlers[n] = handler;
2000
2001 b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
2002
2003 if (srs >= srssets)
2004 panic("Shadow register set %d not supported", srs);
2005
2006 if (cpu_has_veic) {
2007 if (board_bind_eic_interrupt)
2008 board_bind_eic_interrupt(n, srs);
2009 } else if (cpu_has_vint) {
2010
2011 if (srssets > 1)
2012 change_c0_srsmap(0xf << n*4, srs << n*4);
2013 }
2014
2015 if (srs == 0) {
2016
2017
2018
2019
2020 extern char except_vec_vi, except_vec_vi_lui;
2021 extern char except_vec_vi_ori, except_vec_vi_end;
2022 extern char rollback_except_vec_vi;
2023 char *vec_start = using_rollback_handler() ?
2024 &rollback_except_vec_vi : &except_vec_vi;
2025#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
2026 const int lui_offset = &except_vec_vi_lui - vec_start + 2;
2027 const int ori_offset = &except_vec_vi_ori - vec_start + 2;
2028#else
2029 const int lui_offset = &except_vec_vi_lui - vec_start;
2030 const int ori_offset = &except_vec_vi_ori - vec_start;
2031#endif
2032 const int handler_len = &except_vec_vi_end - vec_start;
2033
2034 if (handler_len > VECTORSPACING) {
2035
2036
2037
2038
2039 panic("VECTORSPACING too small");
2040 }
2041
2042 set_handler(((unsigned long)b - ebase), vec_start,
2043#ifdef CONFIG_CPU_MICROMIPS
2044 (handler_len - 1));
2045#else
2046 handler_len);
2047#endif
2048 h = (u16 *)(b + lui_offset);
2049 *h = (handler >> 16) & 0xffff;
2050 h = (u16 *)(b + ori_offset);
2051 *h = (handler & 0xffff);
2052 local_flush_icache_range((unsigned long)b,
2053 (unsigned long)(b+handler_len));
2054 }
2055 else {
2056
2057
2058
2059
2060
2061 u32 insn;
2062
2063 h = (u16 *)b;
2064
2065#ifdef CONFIG_CPU_MICROMIPS
2066 insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
2067#else
2068 insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
2069#endif
2070 h[0] = (insn >> 16) & 0xffff;
2071 h[1] = insn & 0xffff;
2072 h[2] = 0;
2073 h[3] = 0;
2074 local_flush_icache_range((unsigned long)b,
2075 (unsigned long)(b+8));
2076 }
2077
2078 return (void *)old_handler;
2079}
2080
2081void *set_vi_handler(int n, vi_handler_t addr)
2082{
2083 return set_vi_srs_handler(n, addr, 0);
2084}
2085
2086extern void tlb_init(void);
2087
2088
2089
2090
2091int cp0_compare_irq;
2092EXPORT_SYMBOL_GPL(cp0_compare_irq);
2093int cp0_compare_irq_shift;
2094
2095
2096
2097
2098int cp0_perfcount_irq;
2099EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
2100
2101
2102
2103
2104int cp0_fdc_irq;
2105EXPORT_SYMBOL_GPL(cp0_fdc_irq);
2106
2107static int noulri;
2108
2109static int __init ulri_disable(char *s)
2110{
2111 pr_info("Disabling ulri\n");
2112 noulri = 1;
2113
2114 return 1;
2115}
2116__setup("noulri", ulri_disable);
2117
2118
2119static void configure_status(void)
2120{
2121
2122
2123
2124
2125
2126
2127 unsigned int status_set = ST0_CU0;
2128#ifdef CONFIG_64BIT
2129 status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
2130#endif
2131 if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
2132 status_set |= ST0_XX;
2133 if (cpu_has_dsp)
2134 status_set |= ST0_MX;
2135
2136 change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
2137 status_set);
2138}
2139
2140unsigned int hwrena;
2141EXPORT_SYMBOL_GPL(hwrena);
2142
2143
2144static void configure_hwrena(void)
2145{
2146 hwrena = cpu_hwrena_impl_bits;
2147
2148 if (cpu_has_mips_r2_r6)
2149 hwrena |= MIPS_HWRENA_CPUNUM |
2150 MIPS_HWRENA_SYNCISTEP |
2151 MIPS_HWRENA_CC |
2152 MIPS_HWRENA_CCRES;
2153
2154 if (!noulri && cpu_has_userlocal)
2155 hwrena |= MIPS_HWRENA_ULR;
2156
2157 if (hwrena)
2158 write_c0_hwrena(hwrena);
2159}
2160
2161static void configure_exception_vector(void)
2162{
2163 if (cpu_has_veic || cpu_has_vint) {
2164 unsigned long sr = set_c0_status(ST0_BEV);
2165
2166 if (cpu_has_ebase_wg) {
2167#ifdef CONFIG_64BIT
2168 write_c0_ebase_64(ebase | MIPS_EBASE_WG);
2169#else
2170 write_c0_ebase(ebase | MIPS_EBASE_WG);
2171#endif
2172 }
2173 write_c0_ebase(ebase);
2174 write_c0_status(sr);
2175
2176 change_c0_intctl(0x3e0, VECTORSPACING);
2177 }
2178 if (cpu_has_divec) {
2179 if (cpu_has_mipsmt) {
2180 unsigned int vpflags = dvpe();
2181 set_c0_cause(CAUSEF_IV);
2182 evpe(vpflags);
2183 } else
2184 set_c0_cause(CAUSEF_IV);
2185 }
2186}
2187
2188void per_cpu_trap_init(bool is_boot_cpu)
2189{
2190 unsigned int cpu = smp_processor_id();
2191
2192 configure_status();
2193 configure_hwrena();
2194
2195 configure_exception_vector();
2196
2197
2198
2199
2200
2201
2202
2203
2204 if (cpu_has_mips_r2_r6) {
2205
2206
2207
2208
2209 if (!is_boot_cpu) {
2210
2211 if (cpu_has_ebase_wg) {
2212#ifdef CONFIG_64BIT
2213 write_c0_ebase_64(ebase | MIPS_EBASE_WG);
2214#else
2215 write_c0_ebase(ebase | MIPS_EBASE_WG);
2216#endif
2217 }
2218 write_c0_ebase(ebase);
2219 }
2220
2221 cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
2222 cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
2223 cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
2224 cp0_fdc_irq = (read_c0_intctl() >> INTCTLB_IPFDC) & 7;
2225 if (!cp0_fdc_irq)
2226 cp0_fdc_irq = -1;
2227
2228 } else {
2229 cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
2230 cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
2231 cp0_perfcount_irq = -1;
2232 cp0_fdc_irq = -1;
2233 }
2234
2235 if (!cpu_data[cpu].asid_cache)
2236 cpu_data[cpu].asid_cache = asid_first_version(cpu);
2237
2238 mmgrab(&init_mm);
2239 current->active_mm = &init_mm;
2240 BUG_ON(current->mm);
2241 enter_lazy_tlb(&init_mm, current);
2242
2243
2244 if (!is_boot_cpu)
2245 cpu_cache_init();
2246 tlb_init();
2247 TLBMISS_HANDLER_SETUP();
2248}
2249
2250
2251void set_handler(unsigned long offset, void *addr, unsigned long size)
2252{
2253#ifdef CONFIG_CPU_MICROMIPS
2254 memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
2255#else
2256 memcpy((void *)(ebase + offset), addr, size);
2257#endif
2258 local_flush_icache_range(ebase + offset, ebase + offset + size);
2259}
2260
2261static const char panic_null_cerr[] =
2262 "Trying to set NULL cache error exception handler\n";
2263
2264
2265
2266
2267
2268
2269void set_uncached_handler(unsigned long offset, void *addr,
2270 unsigned long size)
2271{
2272 unsigned long uncached_ebase = CKSEG1ADDR(ebase);
2273
2274 if (!addr)
2275 panic(panic_null_cerr);
2276
2277 memcpy((void *)(uncached_ebase + offset), addr, size);
2278}
2279
2280static int __initdata rdhwr_noopt;
2281static int __init set_rdhwr_noopt(char *str)
2282{
2283 rdhwr_noopt = 1;
2284 return 1;
2285}
2286
2287__setup("rdhwr_noopt", set_rdhwr_noopt);
2288
2289void __init trap_init(void)
2290{
2291 extern char except_vec3_generic;
2292 extern char except_vec4;
2293 extern char except_vec3_r4000;
2294 unsigned long i;
2295
2296 check_wait();
2297
2298 if (cpu_has_veic || cpu_has_vint) {
2299 unsigned long size = 0x200 + VECTORSPACING*64;
2300 phys_addr_t ebase_pa;
2301
2302 ebase = (unsigned long)
2303 __alloc_bootmem(size, 1 << fls(size), 0);
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316 ebase_pa = __pa(ebase);
2317 if (!IS_ENABLED(CONFIG_EVA) && !WARN_ON(ebase_pa >= 0x20000000))
2318 ebase = CKSEG0ADDR(ebase_pa);
2319 } else {
2320 ebase = CAC_BASE;
2321
2322 if (cpu_has_mips_r2_r6) {
2323 if (cpu_has_ebase_wg) {
2324#ifdef CONFIG_64BIT
2325 ebase = (read_c0_ebase_64() & ~0xfff);
2326#else
2327 ebase = (read_c0_ebase() & ~0xfff);
2328#endif
2329 } else {
2330 ebase += (read_c0_ebase() & 0x3ffff000);
2331 }
2332 }
2333 }
2334
2335 if (cpu_has_mmips) {
2336 unsigned int config3 = read_c0_config3();
2337
2338 if (IS_ENABLED(CONFIG_CPU_MICROMIPS))
2339 write_c0_config3(config3 | MIPS_CONF3_ISA_OE);
2340 else
2341 write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE);
2342 }
2343
2344 if (board_ebase_setup)
2345 board_ebase_setup();
2346 per_cpu_trap_init(true);
2347
2348
2349
2350
2351
2352
2353 set_handler(0x180, &except_vec3_generic, 0x80);
2354
2355
2356
2357
2358 for (i = 0; i <= 31; i++)
2359 set_except_vector(i, handle_reserved);
2360
2361
2362
2363
2364
2365 if (cpu_has_ejtag && board_ejtag_handler_setup)
2366 board_ejtag_handler_setup();
2367
2368
2369
2370
2371 if (cpu_has_watch)
2372 set_except_vector(EXCCODE_WATCH, handle_watch);
2373
2374
2375
2376
2377 if (cpu_has_veic || cpu_has_vint) {
2378 int nvec = cpu_has_veic ? 64 : 8;
2379 for (i = 0; i < nvec; i++)
2380 set_vi_handler(i, NULL);
2381 }
2382 else if (cpu_has_divec)
2383 set_handler(0x200, &except_vec4, 0x8);
2384
2385
2386
2387
2388
2389 parity_protection_init();
2390
2391
2392
2393
2394
2395
2396 if (board_be_init)
2397 board_be_init();
2398
2399 set_except_vector(EXCCODE_INT, using_rollback_handler() ?
2400 rollback_handle_int : handle_int);
2401 set_except_vector(EXCCODE_MOD, handle_tlbm);
2402 set_except_vector(EXCCODE_TLBL, handle_tlbl);
2403 set_except_vector(EXCCODE_TLBS, handle_tlbs);
2404
2405 set_except_vector(EXCCODE_ADEL, handle_adel);
2406 set_except_vector(EXCCODE_ADES, handle_ades);
2407
2408 set_except_vector(EXCCODE_IBE, handle_ibe);
2409 set_except_vector(EXCCODE_DBE, handle_dbe);
2410
2411 set_except_vector(EXCCODE_SYS, handle_sys);
2412 set_except_vector(EXCCODE_BP, handle_bp);
2413
2414 if (rdhwr_noopt)
2415 set_except_vector(EXCCODE_RI, handle_ri);
2416 else {
2417 if (cpu_has_vtag_icache)
2418 set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2419 else if (current_cpu_type() == CPU_LOONGSON3)
2420 set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2421 else
2422 set_except_vector(EXCCODE_RI, handle_ri_rdhwr);
2423 }
2424
2425 set_except_vector(EXCCODE_CPU, handle_cpu);
2426 set_except_vector(EXCCODE_OV, handle_ov);
2427 set_except_vector(EXCCODE_TR, handle_tr);
2428 set_except_vector(EXCCODE_MSAFPE, handle_msa_fpe);
2429
2430 if (current_cpu_type() == CPU_R6000 ||
2431 current_cpu_type() == CPU_R6000A) {
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442 }
2443
2444
2445 if (board_nmi_handler_setup)
2446 board_nmi_handler_setup();
2447
2448 if (cpu_has_fpu && !cpu_has_nofpuex)
2449 set_except_vector(EXCCODE_FPE, handle_fpe);
2450
2451 set_except_vector(MIPS_EXCCODE_TLBPAR, handle_ftlb);
2452
2453 if (cpu_has_rixiex) {
2454 set_except_vector(EXCCODE_TLBRI, tlb_do_page_fault_0);
2455 set_except_vector(EXCCODE_TLBXI, tlb_do_page_fault_0);
2456 }
2457
2458 set_except_vector(EXCCODE_MSADIS, handle_msa);
2459 set_except_vector(EXCCODE_MDMX, handle_mdmx);
2460
2461 if (cpu_has_mcheck)
2462 set_except_vector(EXCCODE_MCHECK, handle_mcheck);
2463
2464 if (cpu_has_mipsmt)
2465 set_except_vector(EXCCODE_THREAD, handle_mt);
2466
2467 set_except_vector(EXCCODE_DSPDIS, handle_dsp);
2468
2469 if (board_cache_error_setup)
2470 board_cache_error_setup();
2471
2472 if (cpu_has_vce)
2473
2474 set_handler(0x180, &except_vec3_r4000, 0x100);
2475 else if (cpu_has_4kex)
2476 set_handler(0x180, &except_vec3_generic, 0x80);
2477 else
2478 set_handler(0x080, &except_vec3_generic, 0x80);
2479
2480 local_flush_icache_range(ebase, ebase + 0x400);
2481
2482 sort_extable(__start___dbe_table, __stop___dbe_table);
2483
2484 cu2_notifier(default_cu2_call, 0x80000000);
2485}
2486
2487static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd,
2488 void *v)
2489{
2490 switch (cmd) {
2491 case CPU_PM_ENTER_FAILED:
2492 case CPU_PM_EXIT:
2493 configure_status();
2494 configure_hwrena();
2495 configure_exception_vector();
2496
2497
2498 TLBMISS_HANDLER_RESTORE();
2499
2500 break;
2501 }
2502
2503 return NOTIFY_OK;
2504}
2505
2506static struct notifier_block trap_pm_notifier_block = {
2507 .notifier_call = trap_pm_notifier,
2508};
2509
2510static int __init trap_pm_init(void)
2511{
2512 return cpu_pm_register_notifier(&trap_pm_notifier_block);
2513}
2514arch_initcall(trap_pm_init);
2515