1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/bitops.h>
16#include <linux/bug.h>
17#include <linux/compiler.h>
18#include <linux/context_tracking.h>
19#include <linux/cpu_pm.h>
20#include <linux/kexec.h>
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/extable.h>
25#include <linux/mm.h>
26#include <linux/sched/mm.h>
27#include <linux/sched/debug.h>
28#include <linux/smp.h>
29#include <linux/spinlock.h>
30#include <linux/kallsyms.h>
31#include <linux/memblock.h>
32#include <linux/interrupt.h>
33#include <linux/ptrace.h>
34#include <linux/kgdb.h>
35#include <linux/kdebug.h>
36#include <linux/kprobes.h>
37#include <linux/notifier.h>
38#include <linux/kdb.h>
39#include <linux/irq.h>
40#include <linux/perf_event.h>
41
42#include <asm/addrspace.h>
43#include <asm/bootinfo.h>
44#include <asm/branch.h>
45#include <asm/break.h>
46#include <asm/cop2.h>
47#include <asm/cpu.h>
48#include <asm/cpu-type.h>
49#include <asm/dsp.h>
50#include <asm/fpu.h>
51#include <asm/fpu_emulator.h>
52#include <asm/idle.h>
53#include <asm/isa-rev.h>
54#include <asm/mips-cps.h>
55#include <asm/mips-r2-to-r6-emul.h>
56#include <asm/mipsregs.h>
57#include <asm/mipsmtregs.h>
58#include <asm/module.h>
59#include <asm/msa.h>
60#include <asm/ptrace.h>
61#include <asm/sections.h>
62#include <asm/siginfo.h>
63#include <asm/tlbdebug.h>
64#include <asm/traps.h>
65#include <linux/uaccess.h>
66#include <asm/watch.h>
67#include <asm/mmu_context.h>
68#include <asm/types.h>
69#include <asm/stacktrace.h>
70#include <asm/tlbex.h>
71#include <asm/uasm.h>
72
73#include <asm/mach-loongson64/cpucfg-emul.h>
74
75#include "access-helper.h"
76
77extern void check_wait(void);
78extern asmlinkage void rollback_handle_int(void);
79extern asmlinkage void handle_int(void);
80extern asmlinkage void handle_adel(void);
81extern asmlinkage void handle_ades(void);
82extern asmlinkage void handle_ibe(void);
83extern asmlinkage void handle_dbe(void);
84extern asmlinkage void handle_sys(void);
85extern asmlinkage void handle_bp(void);
86extern asmlinkage void handle_ri(void);
87extern asmlinkage void handle_ri_rdhwr_tlbp(void);
88extern asmlinkage void handle_ri_rdhwr(void);
89extern asmlinkage void handle_cpu(void);
90extern asmlinkage void handle_ov(void);
91extern asmlinkage void handle_tr(void);
92extern asmlinkage void handle_msa_fpe(void);
93extern asmlinkage void handle_fpe(void);
94extern asmlinkage void handle_ftlb(void);
95extern asmlinkage void handle_gsexc(void);
96extern asmlinkage void handle_msa(void);
97extern asmlinkage void handle_mdmx(void);
98extern asmlinkage void handle_watch(void);
99extern asmlinkage void handle_mt(void);
100extern asmlinkage void handle_dsp(void);
101extern asmlinkage void handle_mcheck(void);
102extern asmlinkage void handle_reserved(void);
103extern void tlb_do_page_fault_0(void);
104
105void (*board_be_init)(void);
106static int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
107void (*board_nmi_handler_setup)(void);
108void (*board_ejtag_handler_setup)(void);
109void (*board_bind_eic_interrupt)(int irq, int regset);
110void (*board_ebase_setup)(void);
111void(*board_cache_error_setup)(void);
112
113void mips_set_be_handler(int (*handler)(struct pt_regs *regs, int is_fixup))
114{
115 board_be_handler = handler;
116}
117EXPORT_SYMBOL_GPL(mips_set_be_handler);
118
119static void show_raw_backtrace(unsigned long reg29, const char *loglvl,
120 bool user)
121{
122 unsigned long *sp = (unsigned long *)(reg29 & ~3);
123 unsigned long addr;
124
125 printk("%sCall Trace:", loglvl);
126#ifdef CONFIG_KALLSYMS
127 printk("%s\n", loglvl);
128#endif
129 while (!kstack_end(sp)) {
130 if (__get_addr(&addr, sp++, user)) {
131 printk("%s (Bad stack address)", loglvl);
132 break;
133 }
134 if (__kernel_text_address(addr))
135 print_ip_sym(loglvl, addr);
136 }
137 printk("%s\n", loglvl);
138}
139
140#ifdef CONFIG_KALLSYMS
141int raw_show_trace;
142static int __init set_raw_show_trace(char *str)
143{
144 raw_show_trace = 1;
145 return 1;
146}
147__setup("raw_show_trace", set_raw_show_trace);
148#endif
149
150static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
151 const char *loglvl, bool user)
152{
153 unsigned long sp = regs->regs[29];
154 unsigned long ra = regs->regs[31];
155 unsigned long pc = regs->cp0_epc;
156
157 if (!task)
158 task = current;
159
160 if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) {
161 show_raw_backtrace(sp, loglvl, user);
162 return;
163 }
164 printk("%sCall Trace:\n", loglvl);
165 do {
166 print_ip_sym(loglvl, pc);
167 pc = unwind_stack(task, &sp, pc, &ra);
168 } while (pc);
169 pr_cont("\n");
170}
171
172
173
174
175
176static void show_stacktrace(struct task_struct *task,
177 const struct pt_regs *regs, const char *loglvl, bool user)
178{
179 const int field = 2 * sizeof(unsigned long);
180 unsigned long stackdata;
181 int i;
182 unsigned long *sp = (unsigned long *)regs->regs[29];
183
184 printk("%sStack :", loglvl);
185 i = 0;
186 while ((unsigned long) sp & (PAGE_SIZE - 1)) {
187 if (i && ((i % (64 / field)) == 0)) {
188 pr_cont("\n");
189 printk("%s ", loglvl);
190 }
191 if (i > 39) {
192 pr_cont(" ...");
193 break;
194 }
195
196 if (__get_addr(&stackdata, sp++, user)) {
197 pr_cont(" (Bad stack address)");
198 break;
199 }
200
201 pr_cont(" %0*lx", field, stackdata);
202 i++;
203 }
204 pr_cont("\n");
205 show_backtrace(task, regs, loglvl, user);
206}
207
208void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
209{
210 struct pt_regs regs;
211
212 regs.cp0_status = KSU_KERNEL;
213 if (sp) {
214 regs.regs[29] = (unsigned long)sp;
215 regs.regs[31] = 0;
216 regs.cp0_epc = 0;
217 } else {
218 if (task && task != current) {
219 regs.regs[29] = task->thread.reg29;
220 regs.regs[31] = 0;
221 regs.cp0_epc = task->thread.reg31;
222 } else {
223 prepare_frametrace(®s);
224 }
225 }
226 show_stacktrace(task, ®s, loglvl, false);
227}
228
229static void show_code(void *pc, bool user)
230{
231 long i;
232 unsigned short *pc16 = NULL;
233
234 printk("Code:");
235
236 if ((unsigned long)pc & 1)
237 pc16 = (u16 *)((unsigned long)pc & ~1);
238
239 for(i = -3 ; i < 6 ; i++) {
240 if (pc16) {
241 u16 insn16;
242
243 if (__get_inst16(&insn16, pc16 + i, user))
244 goto bad_address;
245
246 pr_cont("%c%04x%c", (i?' ':'<'), insn16, (i?' ':'>'));
247 } else {
248 u32 insn32;
249
250 if (__get_inst32(&insn32, (u32 *)pc + i, user))
251 goto bad_address;
252
253 pr_cont("%c%08x%c", (i?' ':'<'), insn32, (i?' ':'>'));
254 }
255 }
256 pr_cont("\n");
257 return;
258
259bad_address:
260 pr_cont(" (Bad address in epc)\n\n");
261}
262
263static void __show_regs(const struct pt_regs *regs)
264{
265 const int field = 2 * sizeof(unsigned long);
266 unsigned int cause = regs->cp0_cause;
267 unsigned int exccode;
268 int i;
269
270 show_regs_print_info(KERN_DEFAULT);
271
272
273
274
275 for (i = 0; i < 32; ) {
276 if ((i % 4) == 0)
277 printk("$%2d :", i);
278 if (i == 0)
279 pr_cont(" %0*lx", field, 0UL);
280 else if (i == 26 || i == 27)
281 pr_cont(" %*s", field, "");
282 else
283 pr_cont(" %0*lx", field, regs->regs[i]);
284
285 i++;
286 if ((i % 4) == 0)
287 pr_cont("\n");
288 }
289
290#ifdef CONFIG_CPU_HAS_SMARTMIPS
291 printk("Acx : %0*lx\n", field, regs->acx);
292#endif
293 if (MIPS_ISA_REV < 6) {
294 printk("Hi : %0*lx\n", field, regs->hi);
295 printk("Lo : %0*lx\n", field, regs->lo);
296 }
297
298
299
300
301 printk("epc : %0*lx %pS\n", field, regs->cp0_epc,
302 (void *) regs->cp0_epc);
303 printk("ra : %0*lx %pS\n", field, regs->regs[31],
304 (void *) regs->regs[31]);
305
306 printk("Status: %08x ", (uint32_t) regs->cp0_status);
307
308 if (cpu_has_3kex) {
309 if (regs->cp0_status & ST0_KUO)
310 pr_cont("KUo ");
311 if (regs->cp0_status & ST0_IEO)
312 pr_cont("IEo ");
313 if (regs->cp0_status & ST0_KUP)
314 pr_cont("KUp ");
315 if (regs->cp0_status & ST0_IEP)
316 pr_cont("IEp ");
317 if (regs->cp0_status & ST0_KUC)
318 pr_cont("KUc ");
319 if (regs->cp0_status & ST0_IEC)
320 pr_cont("IEc ");
321 } else if (cpu_has_4kex) {
322 if (regs->cp0_status & ST0_KX)
323 pr_cont("KX ");
324 if (regs->cp0_status & ST0_SX)
325 pr_cont("SX ");
326 if (regs->cp0_status & ST0_UX)
327 pr_cont("UX ");
328 switch (regs->cp0_status & ST0_KSU) {
329 case KSU_USER:
330 pr_cont("USER ");
331 break;
332 case KSU_SUPERVISOR:
333 pr_cont("SUPERVISOR ");
334 break;
335 case KSU_KERNEL:
336 pr_cont("KERNEL ");
337 break;
338 default:
339 pr_cont("BAD_MODE ");
340 break;
341 }
342 if (regs->cp0_status & ST0_ERL)
343 pr_cont("ERL ");
344 if (regs->cp0_status & ST0_EXL)
345 pr_cont("EXL ");
346 if (regs->cp0_status & ST0_IE)
347 pr_cont("IE ");
348 }
349 pr_cont("\n");
350
351 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
352 printk("Cause : %08x (ExcCode %02x)\n", cause, exccode);
353
354 if (1 <= exccode && exccode <= 5)
355 printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
356
357 printk("PrId : %08x (%s)\n", read_c0_prid(),
358 cpu_name_string());
359}
360
361
362
363
364void show_regs(struct pt_regs *regs)
365{
366 __show_regs(regs);
367 dump_stack();
368}
369
370void show_registers(struct pt_regs *regs)
371{
372 const int field = 2 * sizeof(unsigned long);
373
374 __show_regs(regs);
375 print_modules();
376 printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
377 current->comm, current->pid, current_thread_info(), current,
378 field, current_thread_info()->tp_value);
379 if (cpu_has_userlocal) {
380 unsigned long tls;
381
382 tls = read_c0_userlocal();
383 if (tls != current_thread_info()->tp_value)
384 printk("*HwTLS: %0*lx\n", field, tls);
385 }
386
387 show_stacktrace(current, regs, KERN_DEFAULT, user_mode(regs));
388 show_code((void *)regs->cp0_epc, user_mode(regs));
389 printk("\n");
390}
391
392static DEFINE_RAW_SPINLOCK(die_lock);
393
394void __noreturn die(const char *str, struct pt_regs *regs)
395{
396 static int die_counter;
397 int sig = SIGSEGV;
398
399 oops_enter();
400
401 if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
402 SIGSEGV) == NOTIFY_STOP)
403 sig = 0;
404
405 console_verbose();
406 raw_spin_lock_irq(&die_lock);
407 bust_spinlocks(1);
408
409 printk("%s[#%d]:\n", str, ++die_counter);
410 show_registers(regs);
411 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
412 raw_spin_unlock_irq(&die_lock);
413
414 oops_exit();
415
416 if (in_interrupt())
417 panic("Fatal exception in interrupt");
418
419 if (panic_on_oops)
420 panic("Fatal exception");
421
422 if (regs && kexec_should_crash(current))
423 crash_kexec(regs);
424
425 do_exit(sig);
426}
427
428extern struct exception_table_entry __start___dbe_table[];
429extern struct exception_table_entry __stop___dbe_table[];
430
431__asm__(
432" .section __dbe_table, \"a\"\n"
433" .previous \n");
434
435
436static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
437{
438 const struct exception_table_entry *e;
439
440 e = search_extable(__start___dbe_table,
441 __stop___dbe_table - __start___dbe_table, addr);
442 if (!e)
443 e = search_module_dbetables(addr);
444 return e;
445}
446
447asmlinkage void do_be(struct pt_regs *regs)
448{
449 const int field = 2 * sizeof(unsigned long);
450 const struct exception_table_entry *fixup = NULL;
451 int data = regs->cp0_cause & 4;
452 int action = MIPS_BE_FATAL;
453 enum ctx_state prev_state;
454
455 prev_state = exception_enter();
456
457 if (data && !user_mode(regs))
458 fixup = search_dbe_tables(exception_epc(regs));
459
460 if (fixup)
461 action = MIPS_BE_FIXUP;
462
463 if (board_be_handler)
464 action = board_be_handler(regs, fixup != NULL);
465 else
466 mips_cm_error_report();
467
468 switch (action) {
469 case MIPS_BE_DISCARD:
470 goto out;
471 case MIPS_BE_FIXUP:
472 if (fixup) {
473 regs->cp0_epc = fixup->nextinsn;
474 goto out;
475 }
476 break;
477 default:
478 break;
479 }
480
481
482
483
484 printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
485 data ? "Data" : "Instruction",
486 field, regs->cp0_epc, field, regs->regs[31]);
487 if (notify_die(DIE_OOPS, "bus error", regs, 0, current->thread.trap_nr,
488 SIGBUS) == NOTIFY_STOP)
489 goto out;
490
491 die_if_kernel("Oops", regs);
492 force_sig(SIGBUS);
493
494out:
495 exception_exit(prev_state);
496}
497
498
499
500
501
502#define OPCODE 0xfc000000
503#define BASE 0x03e00000
504#define RT 0x001f0000
505#define OFFSET 0x0000ffff
506#define LL 0xc0000000
507#define SC 0xe0000000
508#define SPEC0 0x00000000
509#define SPEC3 0x7c000000
510#define RD 0x0000f800
511#define FUNC 0x0000003f
512#define SYNC 0x0000000f
513#define RDHWR 0x0000003b
514
515
516#define MM_POOL32A_FUNC 0xfc00ffff
517#define MM_RDHWR 0x00006b3c
518#define MM_RS 0x001f0000
519#define MM_RT 0x03e00000
520
521
522
523
524
525unsigned int ll_bit;
526struct task_struct *ll_task;
527
528static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
529{
530 unsigned long value, __user *vaddr;
531 long offset;
532
533
534
535
536
537
538
539 offset = opcode & OFFSET;
540 offset <<= 16;
541 offset >>= 16;
542
543 vaddr = (unsigned long __user *)
544 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
545
546 if ((unsigned long)vaddr & 3)
547 return SIGBUS;
548 if (get_user(value, vaddr))
549 return SIGSEGV;
550
551 preempt_disable();
552
553 if (ll_task == NULL || ll_task == current) {
554 ll_bit = 1;
555 } else {
556 ll_bit = 0;
557 }
558 ll_task = current;
559
560 preempt_enable();
561
562 regs->regs[(opcode & RT) >> 16] = value;
563
564 return 0;
565}
566
567static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
568{
569 unsigned long __user *vaddr;
570 unsigned long reg;
571 long offset;
572
573
574
575
576
577
578
579 offset = opcode & OFFSET;
580 offset <<= 16;
581 offset >>= 16;
582
583 vaddr = (unsigned long __user *)
584 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
585 reg = (opcode & RT) >> 16;
586
587 if ((unsigned long)vaddr & 3)
588 return SIGBUS;
589
590 preempt_disable();
591
592 if (ll_bit == 0 || ll_task != current) {
593 regs->regs[reg] = 0;
594 preempt_enable();
595 return 0;
596 }
597
598 preempt_enable();
599
600 if (put_user(regs->regs[reg], vaddr))
601 return SIGSEGV;
602
603 regs->regs[reg] = 1;
604
605 return 0;
606}
607
608
609
610
611
612
613
614
615static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
616{
617 if ((opcode & OPCODE) == LL) {
618 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
619 1, regs, 0);
620 return simulate_ll(regs, opcode);
621 }
622 if ((opcode & OPCODE) == SC) {
623 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
624 1, regs, 0);
625 return simulate_sc(regs, opcode);
626 }
627
628 return -1;
629}
630
631
632
633
634
635static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
636{
637 struct thread_info *ti = task_thread_info(current);
638
639 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
640 1, regs, 0);
641 switch (rd) {
642 case MIPS_HWR_CPUNUM:
643 regs->regs[rt] = smp_processor_id();
644 return 0;
645 case MIPS_HWR_SYNCISTEP:
646 regs->regs[rt] = min(current_cpu_data.dcache.linesz,
647 current_cpu_data.icache.linesz);
648 return 0;
649 case MIPS_HWR_CC:
650 regs->regs[rt] = read_c0_count();
651 return 0;
652 case MIPS_HWR_CCRES:
653 switch (current_cpu_type()) {
654 case CPU_20KC:
655 case CPU_25KF:
656 regs->regs[rt] = 1;
657 break;
658 default:
659 regs->regs[rt] = 2;
660 }
661 return 0;
662 case MIPS_HWR_ULR:
663 regs->regs[rt] = ti->tp_value;
664 return 0;
665 default:
666 return -1;
667 }
668}
669
670static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
671{
672 if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
673 int rd = (opcode & RD) >> 11;
674 int rt = (opcode & RT) >> 16;
675
676 simulate_rdhwr(regs, rd, rt);
677 return 0;
678 }
679
680
681 return -1;
682}
683
684static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned int opcode)
685{
686 if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
687 int rd = (opcode & MM_RS) >> 16;
688 int rt = (opcode & MM_RT) >> 21;
689 simulate_rdhwr(regs, rd, rt);
690 return 0;
691 }
692
693
694 return -1;
695}
696
697static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
698{
699 if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
700 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
701 1, regs, 0);
702 return 0;
703 }
704
705 return -1;
706}
707
708
709
710
711
712#ifdef CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION
713
714#define LWC2 0xc8000000
715#define RS BASE
716#define CSR_OPCODE2 0x00000118
717#define CSR_OPCODE2_MASK 0x000007ff
718#define CSR_FUNC_MASK RT
719#define CSR_FUNC_CPUCFG 0x8
720
721static int simulate_loongson3_cpucfg(struct pt_regs *regs,
722 unsigned int opcode)
723{
724 int op = opcode & OPCODE;
725 int op2 = opcode & CSR_OPCODE2_MASK;
726 int csr_func = (opcode & CSR_FUNC_MASK) >> 16;
727
728 if (op == LWC2 && op2 == CSR_OPCODE2 && csr_func == CSR_FUNC_CPUCFG) {
729 int rd = (opcode & RD) >> 11;
730 int rs = (opcode & RS) >> 21;
731 __u64 sel = regs->regs[rs];
732
733 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
734
735
736 preempt_disable();
737 if (!loongson3_cpucfg_emulation_enabled(¤t_cpu_data)) {
738 preempt_enable();
739 return -1;
740 }
741 regs->regs[rd] = loongson3_cpucfg_read_synthesized(
742 ¤t_cpu_data, sel);
743 preempt_enable();
744 return 0;
745 }
746
747
748 return -1;
749}
750#endif
751
752asmlinkage void do_ov(struct pt_regs *regs)
753{
754 enum ctx_state prev_state;
755
756 prev_state = exception_enter();
757 die_if_kernel("Integer overflow", regs);
758
759 force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->cp0_epc);
760 exception_exit(prev_state);
761}
762
763#ifdef CONFIG_MIPS_FP_SUPPORT
764
765
766
767
768
769
770
771void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
772 struct task_struct *tsk)
773{
774 int si_code = FPE_FLTUNK;
775
776 if (fcr31 & FPU_CSR_INV_X)
777 si_code = FPE_FLTINV;
778 else if (fcr31 & FPU_CSR_DIV_X)
779 si_code = FPE_FLTDIV;
780 else if (fcr31 & FPU_CSR_OVF_X)
781 si_code = FPE_FLTOVF;
782 else if (fcr31 & FPU_CSR_UDF_X)
783 si_code = FPE_FLTUND;
784 else if (fcr31 & FPU_CSR_INE_X)
785 si_code = FPE_FLTRES;
786
787 force_sig_fault_to_task(SIGFPE, si_code, fault_addr, tsk);
788}
789
790int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
791{
792 int si_code;
793
794 switch (sig) {
795 case 0:
796 return 0;
797
798 case SIGFPE:
799 force_fcr31_sig(fcr31, fault_addr, current);
800 return 1;
801
802 case SIGBUS:
803 force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr);
804 return 1;
805
806 case SIGSEGV:
807 mmap_read_lock(current->mm);
808 if (vma_lookup(current->mm, (unsigned long)fault_addr))
809 si_code = SEGV_ACCERR;
810 else
811 si_code = SEGV_MAPERR;
812 mmap_read_unlock(current->mm);
813 force_sig_fault(SIGSEGV, si_code, fault_addr);
814 return 1;
815
816 default:
817 force_sig(sig);
818 return 1;
819 }
820}
821
822static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
823 unsigned long old_epc, unsigned long old_ra)
824{
825 union mips_instruction inst = { .word = opcode };
826 void __user *fault_addr;
827 unsigned long fcr31;
828 int sig;
829
830
831 switch (inst.i_format.opcode) {
832 case cop1_op:
833 case cop1x_op:
834 case lwc1_op:
835 case ldc1_op:
836 case swc1_op:
837 case sdc1_op:
838 break;
839
840 default:
841 return -1;
842 }
843
844
845
846
847
848 regs->cp0_epc = old_epc;
849 regs->regs[31] = old_ra;
850
851
852 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
853 &fault_addr);
854
855
856
857
858
859 fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
860 current->thread.fpu.fcr31 &= ~fcr31;
861
862
863 own_fpu(1);
864
865
866 process_fpemu_return(sig, fault_addr, fcr31);
867
868 return 0;
869}
870
871
872
873
874asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
875{
876 enum ctx_state prev_state;
877 void __user *fault_addr;
878 int sig;
879
880 prev_state = exception_enter();
881 if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
882 SIGFPE) == NOTIFY_STOP)
883 goto out;
884
885
886 write_32bit_cp1_register(CP1_STATUS, fcr31 & ~mask_fcr31_x(fcr31));
887 local_irq_enable();
888
889 die_if_kernel("FP exception in kernel code", regs);
890
891 if (fcr31 & FPU_CSR_UNI_X) {
892
893
894
895
896
897
898
899
900
901
902
903
904 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
905 &fault_addr);
906
907
908
909
910
911 fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
912 current->thread.fpu.fcr31 &= ~fcr31;
913
914
915 own_fpu(1);
916 } else {
917 sig = SIGFPE;
918 fault_addr = (void __user *) regs->cp0_epc;
919 }
920
921
922 process_fpemu_return(sig, fault_addr, fcr31);
923
924out:
925 exception_exit(prev_state);
926}
927
928
929
930
931
932
933static void mt_ase_fp_affinity(void)
934{
935#ifdef CONFIG_MIPS_MT_FPAFF
936 if (mt_fpemul_threshold > 0 &&
937 ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
938
939
940
941
942
943 if (cpumask_intersects(¤t->cpus_mask, &mt_fpu_cpumask)) {
944 cpumask_t tmask;
945
946 current->thread.user_cpus_allowed
947 = current->cpus_mask;
948 cpumask_and(&tmask, ¤t->cpus_mask,
949 &mt_fpu_cpumask);
950 set_cpus_allowed_ptr(current, &tmask);
951 set_thread_flag(TIF_FPUBOUND);
952 }
953 }
954#endif
955}
956
957#else
958
959static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
960 unsigned long old_epc, unsigned long old_ra)
961{
962 return -1;
963}
964
965#endif
966
967void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
968 const char *str)
969{
970 char b[40];
971
972#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
973 if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr,
974 SIGTRAP) == NOTIFY_STOP)
975 return;
976#endif
977
978 if (notify_die(DIE_TRAP, str, regs, code, current->thread.trap_nr,
979 SIGTRAP) == NOTIFY_STOP)
980 return;
981
982
983
984
985
986
987
988 switch (code) {
989 case BRK_OVERFLOW:
990 case BRK_DIVZERO:
991 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
992 die_if_kernel(b, regs);
993 force_sig_fault(SIGFPE,
994 code == BRK_DIVZERO ? FPE_INTDIV : FPE_INTOVF,
995 (void __user *) regs->cp0_epc);
996 break;
997 case BRK_BUG:
998 die_if_kernel("Kernel bug detected", regs);
999 force_sig(SIGTRAP);
1000 break;
1001 case BRK_MEMU:
1002
1003
1004
1005
1006
1007
1008
1009
1010 if (do_dsemulret(regs))
1011 return;
1012
1013 die_if_kernel("Math emu break/trap", regs);
1014 force_sig(SIGTRAP);
1015 break;
1016 default:
1017 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
1018 die_if_kernel(b, regs);
1019 if (si_code) {
1020 force_sig_fault(SIGTRAP, si_code, NULL);
1021 } else {
1022 force_sig(SIGTRAP);
1023 }
1024 }
1025}
1026
1027asmlinkage void do_bp(struct pt_regs *regs)
1028{
1029 unsigned long epc = msk_isa16_mode(exception_epc(regs));
1030 unsigned int opcode, bcode;
1031 enum ctx_state prev_state;
1032 bool user = user_mode(regs);
1033
1034 prev_state = exception_enter();
1035 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1036 if (get_isa16_mode(regs->cp0_epc)) {
1037 u16 instr[2];
1038
1039 if (__get_inst16(&instr[0], (u16 *)epc, user))
1040 goto out_sigsegv;
1041
1042 if (!cpu_has_mmips) {
1043
1044 bcode = (instr[0] >> 5) & 0x3f;
1045 } else if (mm_insn_16bit(instr[0])) {
1046
1047 bcode = instr[0] & 0xf;
1048 } else {
1049
1050 if (__get_inst16(&instr[1], (u16 *)(epc + 2), user))
1051 goto out_sigsegv;
1052 opcode = (instr[0] << 16) | instr[1];
1053 bcode = (opcode >> 6) & ((1 << 20) - 1);
1054 }
1055 } else {
1056 if (__get_inst32(&opcode, (u32 *)epc, user))
1057 goto out_sigsegv;
1058 bcode = (opcode >> 6) & ((1 << 20) - 1);
1059 }
1060
1061
1062
1063
1064
1065
1066
1067 if (bcode >= (1 << 10))
1068 bcode = ((bcode & ((1 << 10) - 1)) << 10) | (bcode >> 10);
1069
1070
1071
1072
1073
1074 switch (bcode) {
1075 case BRK_UPROBE:
1076 if (notify_die(DIE_UPROBE, "uprobe", regs, bcode,
1077 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1078 goto out;
1079 else
1080 break;
1081 case BRK_UPROBE_XOL:
1082 if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, bcode,
1083 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1084 goto out;
1085 else
1086 break;
1087 case BRK_KPROBE_BP:
1088 if (notify_die(DIE_BREAK, "debug", regs, bcode,
1089 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1090 goto out;
1091 else
1092 break;
1093 case BRK_KPROBE_SSTEPBP:
1094 if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
1095 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1096 goto out;
1097 else
1098 break;
1099 default:
1100 break;
1101 }
1102
1103 do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break");
1104
1105out:
1106 exception_exit(prev_state);
1107 return;
1108
1109out_sigsegv:
1110 force_sig(SIGSEGV);
1111 goto out;
1112}
1113
1114asmlinkage void do_tr(struct pt_regs *regs)
1115{
1116 u32 opcode, tcode = 0;
1117 enum ctx_state prev_state;
1118 u16 instr[2];
1119 bool user = user_mode(regs);
1120 unsigned long epc = msk_isa16_mode(exception_epc(regs));
1121
1122 prev_state = exception_enter();
1123 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1124 if (get_isa16_mode(regs->cp0_epc)) {
1125 if (__get_inst16(&instr[0], (u16 *)(epc + 0), user) ||
1126 __get_inst16(&instr[1], (u16 *)(epc + 2), user))
1127 goto out_sigsegv;
1128 opcode = (instr[0] << 16) | instr[1];
1129
1130 if (!(opcode & OPCODE))
1131 tcode = (opcode >> 12) & ((1 << 4) - 1);
1132 } else {
1133 if (__get_inst32(&opcode, (u32 *)epc, user))
1134 goto out_sigsegv;
1135
1136 if (!(opcode & OPCODE))
1137 tcode = (opcode >> 6) & ((1 << 10) - 1);
1138 }
1139
1140 do_trap_or_bp(regs, tcode, 0, "Trap");
1141
1142out:
1143 exception_exit(prev_state);
1144 return;
1145
1146out_sigsegv:
1147 force_sig(SIGSEGV);
1148 goto out;
1149}
1150
1151asmlinkage void do_ri(struct pt_regs *regs)
1152{
1153 unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
1154 unsigned long old_epc = regs->cp0_epc;
1155 unsigned long old31 = regs->regs[31];
1156 enum ctx_state prev_state;
1157 unsigned int opcode = 0;
1158 int status = -1;
1159
1160
1161
1162
1163
1164 if (mipsr2_emulation && cpu_has_mips_r6 &&
1165 likely(user_mode(regs)) &&
1166 likely(get_user(opcode, epc) >= 0)) {
1167 unsigned long fcr31 = 0;
1168
1169 status = mipsr2_decoder(regs, opcode, &fcr31);
1170 switch (status) {
1171 case 0:
1172 case SIGEMT:
1173 return;
1174 case SIGILL:
1175 goto no_r2_instr;
1176 default:
1177 process_fpemu_return(status,
1178 ¤t->thread.cp0_baduaddr,
1179 fcr31);
1180 return;
1181 }
1182 }
1183
1184no_r2_instr:
1185
1186 prev_state = exception_enter();
1187 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1188
1189 if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
1190 SIGILL) == NOTIFY_STOP)
1191 goto out;
1192
1193 die_if_kernel("Reserved instruction in kernel code", regs);
1194
1195 if (unlikely(compute_return_epc(regs) < 0))
1196 goto out;
1197
1198 if (!get_isa16_mode(regs->cp0_epc)) {
1199 if (unlikely(get_user(opcode, epc) < 0))
1200 status = SIGSEGV;
1201
1202 if (!cpu_has_llsc && status < 0)
1203 status = simulate_llsc(regs, opcode);
1204
1205 if (status < 0)
1206 status = simulate_rdhwr_normal(regs, opcode);
1207
1208 if (status < 0)
1209 status = simulate_sync(regs, opcode);
1210
1211 if (status < 0)
1212 status = simulate_fp(regs, opcode, old_epc, old31);
1213
1214#ifdef CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION
1215 if (status < 0)
1216 status = simulate_loongson3_cpucfg(regs, opcode);
1217#endif
1218 } else if (cpu_has_mmips) {
1219 unsigned short mmop[2] = { 0 };
1220
1221 if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
1222 status = SIGSEGV;
1223 if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
1224 status = SIGSEGV;
1225 opcode = mmop[0];
1226 opcode = (opcode << 16) | mmop[1];
1227
1228 if (status < 0)
1229 status = simulate_rdhwr_mm(regs, opcode);
1230 }
1231
1232 if (status < 0)
1233 status = SIGILL;
1234
1235 if (unlikely(status > 0)) {
1236 regs->cp0_epc = old_epc;
1237 regs->regs[31] = old31;
1238 force_sig(status);
1239 }
1240
1241out:
1242 exception_exit(prev_state);
1243}
1244
1245
1246
1247
1248static RAW_NOTIFIER_HEAD(cu2_chain);
1249
1250int __ref register_cu2_notifier(struct notifier_block *nb)
1251{
1252 return raw_notifier_chain_register(&cu2_chain, nb);
1253}
1254
1255int cu2_notifier_call_chain(unsigned long val, void *v)
1256{
1257 return raw_notifier_call_chain(&cu2_chain, val, v);
1258}
1259
1260static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
1261 void *data)
1262{
1263 struct pt_regs *regs = data;
1264
1265 die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
1266 "instruction", regs);
1267 force_sig(SIGILL);
1268
1269 return NOTIFY_OK;
1270}
1271
1272#ifdef CONFIG_MIPS_FP_SUPPORT
1273
1274static int enable_restore_fp_context(int msa)
1275{
1276 int err, was_fpu_owner, prior_msa;
1277 bool first_fp;
1278
1279
1280 first_fp = init_fp_ctx(current);
1281
1282 if (first_fp) {
1283 preempt_disable();
1284 err = own_fpu_inatomic(1);
1285 if (msa && !err) {
1286 enable_msa();
1287
1288
1289
1290
1291
1292
1293 write_msa_csr(current->thread.fpu.msacsr);
1294
1295
1296
1297
1298 init_msa_upper();
1299 set_thread_flag(TIF_USEDMSA);
1300 set_thread_flag(TIF_MSA_CTX_LIVE);
1301 }
1302 preempt_enable();
1303 return err;
1304 }
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333 if (!msa && !thread_msa_context_live())
1334 return own_fpu(1);
1335
1336
1337
1338
1339
1340 preempt_disable();
1341 was_fpu_owner = is_fpu_owner();
1342 err = own_fpu_inatomic(0);
1343 if (err)
1344 goto out;
1345
1346 enable_msa();
1347 write_msa_csr(current->thread.fpu.msacsr);
1348 set_thread_flag(TIF_USEDMSA);
1349
1350
1351
1352
1353
1354
1355
1356
1357 prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
1358 if (!prior_msa && was_fpu_owner) {
1359 init_msa_upper();
1360
1361 goto out;
1362 }
1363
1364 if (!prior_msa) {
1365
1366
1367
1368
1369 _restore_fp(current);
1370
1371
1372
1373
1374
1375
1376 init_msa_upper();
1377 } else {
1378
1379 restore_msa(current);
1380
1381
1382 if (!was_fpu_owner)
1383 write_32bit_cp1_register(CP1_STATUS,
1384 current->thread.fpu.fcr31);
1385 }
1386
1387out:
1388 preempt_enable();
1389
1390 return 0;
1391}
1392
1393#else
1394
1395static int enable_restore_fp_context(int msa)
1396{
1397 return SIGILL;
1398}
1399
1400#endif
1401
1402asmlinkage void do_cpu(struct pt_regs *regs)
1403{
1404 enum ctx_state prev_state;
1405 unsigned int __user *epc;
1406 unsigned long old_epc, old31;
1407 unsigned int opcode;
1408 unsigned int cpid;
1409 int status;
1410
1411 prev_state = exception_enter();
1412 cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
1413
1414 if (cpid != 2)
1415 die_if_kernel("do_cpu invoked from kernel context!", regs);
1416
1417 switch (cpid) {
1418 case 0:
1419 epc = (unsigned int __user *)exception_epc(regs);
1420 old_epc = regs->cp0_epc;
1421 old31 = regs->regs[31];
1422 opcode = 0;
1423 status = -1;
1424
1425 if (unlikely(compute_return_epc(regs) < 0))
1426 break;
1427
1428 if (!get_isa16_mode(regs->cp0_epc)) {
1429 if (unlikely(get_user(opcode, epc) < 0))
1430 status = SIGSEGV;
1431
1432 if (!cpu_has_llsc && status < 0)
1433 status = simulate_llsc(regs, opcode);
1434 }
1435
1436 if (status < 0)
1437 status = SIGILL;
1438
1439 if (unlikely(status > 0)) {
1440 regs->cp0_epc = old_epc;
1441 regs->regs[31] = old31;
1442 force_sig(status);
1443 }
1444
1445 break;
1446
1447#ifdef CONFIG_MIPS_FP_SUPPORT
1448 case 3:
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461 if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) {
1462 force_sig(SIGILL);
1463 break;
1464 }
1465 fallthrough;
1466 case 1: {
1467 void __user *fault_addr;
1468 unsigned long fcr31;
1469 int err, sig;
1470
1471 err = enable_restore_fp_context(0);
1472
1473 if (raw_cpu_has_fpu && !err)
1474 break;
1475
1476 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0,
1477 &fault_addr);
1478
1479
1480
1481
1482
1483 fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
1484 current->thread.fpu.fcr31 &= ~fcr31;
1485
1486
1487 if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
1488 mt_ase_fp_affinity();
1489
1490 break;
1491 }
1492#else
1493 case 1:
1494 case 3:
1495 force_sig(SIGILL);
1496 break;
1497#endif
1498
1499 case 2:
1500 raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
1501 break;
1502 }
1503
1504 exception_exit(prev_state);
1505}
1506
1507asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr)
1508{
1509 enum ctx_state prev_state;
1510
1511 prev_state = exception_enter();
1512 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1513 if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
1514 current->thread.trap_nr, SIGFPE) == NOTIFY_STOP)
1515 goto out;
1516
1517
1518 write_msa_csr(msacsr & ~MSA_CSR_CAUSEF);
1519 local_irq_enable();
1520
1521 die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
1522 force_sig(SIGFPE);
1523out:
1524 exception_exit(prev_state);
1525}
1526
1527asmlinkage void do_msa(struct pt_regs *regs)
1528{
1529 enum ctx_state prev_state;
1530 int err;
1531
1532 prev_state = exception_enter();
1533
1534 if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
1535 force_sig(SIGILL);
1536 goto out;
1537 }
1538
1539 die_if_kernel("do_msa invoked from kernel context!", regs);
1540
1541 err = enable_restore_fp_context(1);
1542 if (err)
1543 force_sig(SIGILL);
1544out:
1545 exception_exit(prev_state);
1546}
1547
1548asmlinkage void do_mdmx(struct pt_regs *regs)
1549{
1550 enum ctx_state prev_state;
1551
1552 prev_state = exception_enter();
1553 force_sig(SIGILL);
1554 exception_exit(prev_state);
1555}
1556
1557
1558
1559
1560asmlinkage void do_watch(struct pt_regs *regs)
1561{
1562 enum ctx_state prev_state;
1563
1564 prev_state = exception_enter();
1565
1566
1567
1568
1569 clear_c0_cause(CAUSEF_WP);
1570
1571
1572
1573
1574
1575
1576 if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
1577 mips_read_watch_registers();
1578 local_irq_enable();
1579 force_sig_fault(SIGTRAP, TRAP_HWBKPT, NULL);
1580 } else {
1581 mips_clear_watch_registers();
1582 local_irq_enable();
1583 }
1584 exception_exit(prev_state);
1585}
1586
1587asmlinkage void do_mcheck(struct pt_regs *regs)
1588{
1589 int multi_match = regs->cp0_status & ST0_TS;
1590 enum ctx_state prev_state;
1591
1592 prev_state = exception_enter();
1593 show_regs(regs);
1594
1595 if (multi_match) {
1596 dump_tlb_regs();
1597 pr_info("\n");
1598 dump_tlb_all();
1599 }
1600
1601 show_code((void *)regs->cp0_epc, user_mode(regs));
1602
1603
1604
1605
1606
1607 panic("Caught Machine Check exception - %scaused by multiple "
1608 "matching entries in the TLB.",
1609 (multi_match) ? "" : "not ");
1610}
1611
1612asmlinkage void do_mt(struct pt_regs *regs)
1613{
1614 int subcode;
1615
1616 subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
1617 >> VPECONTROL_EXCPT_SHIFT;
1618 switch (subcode) {
1619 case 0:
1620 printk(KERN_DEBUG "Thread Underflow\n");
1621 break;
1622 case 1:
1623 printk(KERN_DEBUG "Thread Overflow\n");
1624 break;
1625 case 2:
1626 printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
1627 break;
1628 case 3:
1629 printk(KERN_DEBUG "Gating Storage Exception\n");
1630 break;
1631 case 4:
1632 printk(KERN_DEBUG "YIELD Scheduler Exception\n");
1633 break;
1634 case 5:
1635 printk(KERN_DEBUG "Gating Storage Scheduler Exception\n");
1636 break;
1637 default:
1638 printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
1639 subcode);
1640 break;
1641 }
1642 die_if_kernel("MIPS MT Thread exception in kernel", regs);
1643
1644 force_sig(SIGILL);
1645}
1646
1647
1648asmlinkage void do_dsp(struct pt_regs *regs)
1649{
1650 if (cpu_has_dsp)
1651 panic("Unexpected DSP exception");
1652
1653 force_sig(SIGILL);
1654}
1655
1656asmlinkage void do_reserved(struct pt_regs *regs)
1657{
1658
1659
1660
1661
1662
1663 show_regs(regs);
1664 panic("Caught reserved exception %ld - should not happen.",
1665 (regs->cp0_cause & 0x7f) >> 2);
1666}
1667
1668static int __initdata l1parity = 1;
1669static int __init nol1parity(char *s)
1670{
1671 l1parity = 0;
1672 return 1;
1673}
1674__setup("nol1par", nol1parity);
1675static int __initdata l2parity = 1;
1676static int __init nol2parity(char *s)
1677{
1678 l2parity = 0;
1679 return 1;
1680}
1681__setup("nol2par", nol2parity);
1682
1683
1684
1685
1686
1687static inline __init void parity_protection_init(void)
1688{
1689#define ERRCTL_PE 0x80000000
1690#define ERRCTL_L2P 0x00800000
1691
1692 if (mips_cm_revision() >= CM_REV_CM3) {
1693 ulong gcr_ectl, cp0_ectl;
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703 l1parity &= l2parity;
1704 l2parity &= l1parity;
1705
1706
1707 cp0_ectl = read_c0_ecc();
1708 write_c0_ecc(cp0_ectl | ERRCTL_PE);
1709 back_to_back_c0_hazard();
1710 cp0_ectl = read_c0_ecc();
1711
1712
1713 gcr_ectl = read_gcr_err_control();
1714
1715 if (!(gcr_ectl & CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT) ||
1716 !(cp0_ectl & ERRCTL_PE)) {
1717
1718
1719
1720
1721 l1parity = l2parity = 0;
1722 }
1723
1724
1725 if (l1parity)
1726 cp0_ectl |= ERRCTL_PE;
1727 else
1728 cp0_ectl &= ~ERRCTL_PE;
1729 write_c0_ecc(cp0_ectl);
1730 back_to_back_c0_hazard();
1731 WARN_ON(!!(read_c0_ecc() & ERRCTL_PE) != l1parity);
1732
1733
1734 if (l2parity)
1735 gcr_ectl |= CM_GCR_ERR_CONTROL_L2_ECC_EN;
1736 else
1737 gcr_ectl &= ~CM_GCR_ERR_CONTROL_L2_ECC_EN;
1738 write_gcr_err_control(gcr_ectl);
1739 gcr_ectl = read_gcr_err_control();
1740 gcr_ectl &= CM_GCR_ERR_CONTROL_L2_ECC_EN;
1741 WARN_ON(!!gcr_ectl != l2parity);
1742
1743 pr_info("Cache parity protection %sabled\n",
1744 l1parity ? "en" : "dis");
1745 return;
1746 }
1747
1748 switch (current_cpu_type()) {
1749 case CPU_24K:
1750 case CPU_34K:
1751 case CPU_74K:
1752 case CPU_1004K:
1753 case CPU_1074K:
1754 case CPU_INTERAPTIV:
1755 case CPU_PROAPTIV:
1756 case CPU_P5600:
1757 case CPU_QEMU_GENERIC:
1758 case CPU_P6600:
1759 {
1760 unsigned long errctl;
1761 unsigned int l1parity_present, l2parity_present;
1762
1763 errctl = read_c0_ecc();
1764 errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
1765
1766
1767 write_c0_ecc(errctl | ERRCTL_PE);
1768 back_to_back_c0_hazard();
1769 l1parity_present = (read_c0_ecc() & ERRCTL_PE);
1770
1771
1772 write_c0_ecc(errctl|ERRCTL_L2P);
1773 back_to_back_c0_hazard();
1774 l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
1775
1776 if (l1parity_present && l2parity_present) {
1777 if (l1parity)
1778 errctl |= ERRCTL_PE;
1779 if (l1parity ^ l2parity)
1780 errctl |= ERRCTL_L2P;
1781 } else if (l1parity_present) {
1782 if (l1parity)
1783 errctl |= ERRCTL_PE;
1784 } else if (l2parity_present) {
1785 if (l2parity)
1786 errctl |= ERRCTL_L2P;
1787 } else {
1788
1789 }
1790
1791 printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
1792
1793 write_c0_ecc(errctl);
1794 back_to_back_c0_hazard();
1795 errctl = read_c0_ecc();
1796 printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
1797
1798 if (l1parity_present)
1799 printk(KERN_INFO "Cache parity protection %sabled\n",
1800 (errctl & ERRCTL_PE) ? "en" : "dis");
1801
1802 if (l2parity_present) {
1803 if (l1parity_present && l1parity)
1804 errctl ^= ERRCTL_L2P;
1805 printk(KERN_INFO "L2 cache parity protection %sabled\n",
1806 (errctl & ERRCTL_L2P) ? "en" : "dis");
1807 }
1808 }
1809 break;
1810
1811 case CPU_5KC:
1812 case CPU_5KE:
1813 case CPU_LOONGSON32:
1814 write_c0_ecc(0x80000000);
1815 back_to_back_c0_hazard();
1816
1817 printk(KERN_INFO "Cache parity protection %sabled\n",
1818 (read_c0_ecc() & 0x80000000) ? "en" : "dis");
1819 break;
1820 case CPU_20KC:
1821 case CPU_25KF:
1822
1823 printk(KERN_INFO "Enable cache parity protection for "
1824 "MIPS 20KC/25KF CPUs.\n");
1825 clear_c0_status(ST0_DE);
1826 break;
1827 default:
1828 break;
1829 }
1830}
1831
1832asmlinkage void cache_parity_error(void)
1833{
1834 const int field = 2 * sizeof(unsigned long);
1835 unsigned int reg_val;
1836
1837
1838 printk("Cache error exception:\n");
1839 printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1840 reg_val = read_c0_cacheerr();
1841 printk("c0_cacheerr == %08x\n", reg_val);
1842
1843 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1844 reg_val & (1<<30) ? "secondary" : "primary",
1845 reg_val & (1<<31) ? "data" : "insn");
1846 if ((cpu_has_mips_r2_r6) &&
1847 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1848 pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
1849 reg_val & (1<<29) ? "ED " : "",
1850 reg_val & (1<<28) ? "ET " : "",
1851 reg_val & (1<<27) ? "ES " : "",
1852 reg_val & (1<<26) ? "EE " : "",
1853 reg_val & (1<<25) ? "EB " : "",
1854 reg_val & (1<<24) ? "EI " : "",
1855 reg_val & (1<<23) ? "E1 " : "",
1856 reg_val & (1<<22) ? "E0 " : "");
1857 } else {
1858 pr_err("Error bits: %s%s%s%s%s%s%s\n",
1859 reg_val & (1<<29) ? "ED " : "",
1860 reg_val & (1<<28) ? "ET " : "",
1861 reg_val & (1<<26) ? "EE " : "",
1862 reg_val & (1<<25) ? "EB " : "",
1863 reg_val & (1<<24) ? "EI " : "",
1864 reg_val & (1<<23) ? "E1 " : "",
1865 reg_val & (1<<22) ? "E0 " : "");
1866 }
1867 printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
1868
1869#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1870 if (reg_val & (1<<22))
1871 printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
1872
1873 if (reg_val & (1<<23))
1874 printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
1875#endif
1876
1877 panic("Can't handle the cache error!");
1878}
1879
1880asmlinkage void do_ftlb(void)
1881{
1882 const int field = 2 * sizeof(unsigned long);
1883 unsigned int reg_val;
1884
1885
1886 if ((cpu_has_mips_r2_r6) &&
1887 (((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS) ||
1888 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_LOONGSON))) {
1889 pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
1890 read_c0_ecc());
1891 pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1892 reg_val = read_c0_cacheerr();
1893 pr_err("c0_cacheerr == %08x\n", reg_val);
1894
1895 if ((reg_val & 0xc0000000) == 0xc0000000) {
1896 pr_err("Decoded c0_cacheerr: FTLB parity error\n");
1897 } else {
1898 pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1899 reg_val & (1<<30) ? "secondary" : "primary",
1900 reg_val & (1<<31) ? "data" : "insn");
1901 }
1902 } else {
1903 pr_err("FTLB error exception\n");
1904 }
1905
1906 cache_parity_error();
1907}
1908
1909asmlinkage void do_gsexc(struct pt_regs *regs, u32 diag1)
1910{
1911 u32 exccode = (diag1 & LOONGSON_DIAG1_EXCCODE) >>
1912 LOONGSON_DIAG1_EXCCODE_SHIFT;
1913 enum ctx_state prev_state;
1914
1915 prev_state = exception_enter();
1916
1917 switch (exccode) {
1918 case 0x08:
1919
1920
1921
1922
1923
1924
1925 force_sig(SIGILL);
1926 break;
1927
1928 default:
1929
1930
1931
1932
1933 show_regs(regs);
1934 panic("Unhandled Loongson exception - GSCause = %08x", diag1);
1935 }
1936
1937 exception_exit(prev_state);
1938}
1939
1940
1941
1942
1943
1944void ejtag_exception_handler(struct pt_regs *regs)
1945{
1946 const int field = 2 * sizeof(unsigned long);
1947 unsigned long depc, old_epc, old_ra;
1948 unsigned int debug;
1949
1950 printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
1951 depc = read_c0_depc();
1952 debug = read_c0_debug();
1953 printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
1954 if (debug & 0x80000000) {
1955
1956
1957
1958
1959
1960
1961 old_epc = regs->cp0_epc;
1962 old_ra = regs->regs[31];
1963 regs->cp0_epc = depc;
1964 compute_return_epc(regs);
1965 depc = regs->cp0_epc;
1966 regs->cp0_epc = old_epc;
1967 regs->regs[31] = old_ra;
1968 } else
1969 depc += 4;
1970 write_c0_depc(depc);
1971
1972#if 0
1973 printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
1974 write_c0_debug(debug | 0x100);
1975#endif
1976}
1977
1978
1979
1980
1981
1982static RAW_NOTIFIER_HEAD(nmi_chain);
1983
1984int register_nmi_notifier(struct notifier_block *nb)
1985{
1986 return raw_notifier_chain_register(&nmi_chain, nb);
1987}
1988
1989void __noreturn nmi_exception_handler(struct pt_regs *regs)
1990{
1991 char str[100];
1992
1993 nmi_enter();
1994 raw_notifier_call_chain(&nmi_chain, 0, regs);
1995 bust_spinlocks(1);
1996 snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
1997 smp_processor_id(), regs->cp0_epc);
1998 regs->cp0_epc = read_c0_errorepc();
1999 die(str, regs);
2000 nmi_exit();
2001}
2002
2003unsigned long ebase;
2004EXPORT_SYMBOL_GPL(ebase);
2005unsigned long exception_handlers[32];
2006unsigned long vi_handlers[64];
2007
2008void reserve_exception_space(phys_addr_t addr, unsigned long size)
2009{
2010 memblock_reserve(addr, size);
2011}
2012
2013void __init *set_except_vector(int n, void *addr)
2014{
2015 unsigned long handler = (unsigned long) addr;
2016 unsigned long old_handler;
2017
2018#ifdef CONFIG_CPU_MICROMIPS
2019
2020
2021
2022
2023
2024
2025
2026 if (!(handler & 0x1))
2027 handler |= 1;
2028#endif
2029 old_handler = xchg(&exception_handlers[n], handler);
2030
2031 if (n == 0 && cpu_has_divec) {
2032#ifdef CONFIG_CPU_MICROMIPS
2033 unsigned long jump_mask = ~((1 << 27) - 1);
2034#else
2035 unsigned long jump_mask = ~((1 << 28) - 1);
2036#endif
2037 u32 *buf = (u32 *)(ebase + 0x200);
2038 unsigned int k0 = 26;
2039 if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
2040 uasm_i_j(&buf, handler & ~jump_mask);
2041 uasm_i_nop(&buf);
2042 } else {
2043 UASM_i_LA(&buf, k0, handler);
2044 uasm_i_jr(&buf, k0);
2045 uasm_i_nop(&buf);
2046 }
2047 local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
2048 }
2049 return (void *)old_handler;
2050}
2051
2052static void do_default_vi(void)
2053{
2054 show_regs(get_irq_regs());
2055 panic("Caught unexpected vectored interrupt.");
2056}
2057
2058static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
2059{
2060 unsigned long handler;
2061 unsigned long old_handler = vi_handlers[n];
2062 int srssets = current_cpu_data.srsets;
2063 u16 *h;
2064 unsigned char *b;
2065
2066 BUG_ON(!cpu_has_veic && !cpu_has_vint);
2067
2068 if (addr == NULL) {
2069 handler = (unsigned long) do_default_vi;
2070 srs = 0;
2071 } else
2072 handler = (unsigned long) addr;
2073 vi_handlers[n] = handler;
2074
2075 b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
2076
2077 if (srs >= srssets)
2078 panic("Shadow register set %d not supported", srs);
2079
2080 if (cpu_has_veic) {
2081 if (board_bind_eic_interrupt)
2082 board_bind_eic_interrupt(n, srs);
2083 } else if (cpu_has_vint) {
2084
2085 if (srssets > 1)
2086 change_c0_srsmap(0xf << n*4, srs << n*4);
2087 }
2088
2089 if (srs == 0) {
2090
2091
2092
2093
2094 extern char except_vec_vi, except_vec_vi_lui;
2095 extern char except_vec_vi_ori, except_vec_vi_end;
2096 extern char rollback_except_vec_vi;
2097 char *vec_start = using_rollback_handler() ?
2098 &rollback_except_vec_vi : &except_vec_vi;
2099#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
2100 const int lui_offset = &except_vec_vi_lui - vec_start + 2;
2101 const int ori_offset = &except_vec_vi_ori - vec_start + 2;
2102#else
2103 const int lui_offset = &except_vec_vi_lui - vec_start;
2104 const int ori_offset = &except_vec_vi_ori - vec_start;
2105#endif
2106 const int handler_len = &except_vec_vi_end - vec_start;
2107
2108 if (handler_len > VECTORSPACING) {
2109
2110
2111
2112
2113 panic("VECTORSPACING too small");
2114 }
2115
2116 set_handler(((unsigned long)b - ebase), vec_start,
2117#ifdef CONFIG_CPU_MICROMIPS
2118 (handler_len - 1));
2119#else
2120 handler_len);
2121#endif
2122 h = (u16 *)(b + lui_offset);
2123 *h = (handler >> 16) & 0xffff;
2124 h = (u16 *)(b + ori_offset);
2125 *h = (handler & 0xffff);
2126 local_flush_icache_range((unsigned long)b,
2127 (unsigned long)(b+handler_len));
2128 }
2129 else {
2130
2131
2132
2133
2134
2135 u32 insn;
2136
2137 h = (u16 *)b;
2138
2139#ifdef CONFIG_CPU_MICROMIPS
2140 insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
2141#else
2142 insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
2143#endif
2144 h[0] = (insn >> 16) & 0xffff;
2145 h[1] = insn & 0xffff;
2146 h[2] = 0;
2147 h[3] = 0;
2148 local_flush_icache_range((unsigned long)b,
2149 (unsigned long)(b+8));
2150 }
2151
2152 return (void *)old_handler;
2153}
2154
2155void *set_vi_handler(int n, vi_handler_t addr)
2156{
2157 return set_vi_srs_handler(n, addr, 0);
2158}
2159
2160extern void tlb_init(void);
2161
2162
2163
2164
2165int cp0_compare_irq;
2166EXPORT_SYMBOL_GPL(cp0_compare_irq);
2167int cp0_compare_irq_shift;
2168
2169
2170
2171
2172int cp0_perfcount_irq;
2173EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
2174
2175
2176
2177
2178int cp0_fdc_irq;
2179EXPORT_SYMBOL_GPL(cp0_fdc_irq);
2180
2181static int noulri;
2182
2183static int __init ulri_disable(char *s)
2184{
2185 pr_info("Disabling ulri\n");
2186 noulri = 1;
2187
2188 return 1;
2189}
2190__setup("noulri", ulri_disable);
2191
2192
2193static void configure_status(void)
2194{
2195
2196
2197
2198
2199
2200
2201 unsigned int status_set = ST0_KERNEL_CUMASK;
2202#ifdef CONFIG_64BIT
2203 status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
2204#endif
2205 if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
2206 status_set |= ST0_XX;
2207 if (cpu_has_dsp)
2208 status_set |= ST0_MX;
2209
2210 change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
2211 status_set);
2212 back_to_back_c0_hazard();
2213}
2214
2215unsigned int hwrena;
2216EXPORT_SYMBOL_GPL(hwrena);
2217
2218
2219static void configure_hwrena(void)
2220{
2221 hwrena = cpu_hwrena_impl_bits;
2222
2223 if (cpu_has_mips_r2_r6)
2224 hwrena |= MIPS_HWRENA_CPUNUM |
2225 MIPS_HWRENA_SYNCISTEP |
2226 MIPS_HWRENA_CC |
2227 MIPS_HWRENA_CCRES;
2228
2229 if (!noulri && cpu_has_userlocal)
2230 hwrena |= MIPS_HWRENA_ULR;
2231
2232 if (hwrena)
2233 write_c0_hwrena(hwrena);
2234}
2235
2236static void configure_exception_vector(void)
2237{
2238 if (cpu_has_mips_r2_r6) {
2239 unsigned long sr = set_c0_status(ST0_BEV);
2240
2241 if (cpu_has_ebase_wg) {
2242#ifdef CONFIG_64BIT
2243 write_c0_ebase_64(ebase | MIPS_EBASE_WG);
2244#else
2245 write_c0_ebase(ebase | MIPS_EBASE_WG);
2246#endif
2247 }
2248 write_c0_ebase(ebase);
2249 write_c0_status(sr);
2250 }
2251 if (cpu_has_veic || cpu_has_vint) {
2252
2253 change_c0_intctl(0x3e0, VECTORSPACING);
2254 }
2255 if (cpu_has_divec) {
2256 if (cpu_has_mipsmt) {
2257 unsigned int vpflags = dvpe();
2258 set_c0_cause(CAUSEF_IV);
2259 evpe(vpflags);
2260 } else
2261 set_c0_cause(CAUSEF_IV);
2262 }
2263}
2264
2265void per_cpu_trap_init(bool is_boot_cpu)
2266{
2267 unsigned int cpu = smp_processor_id();
2268
2269 configure_status();
2270 configure_hwrena();
2271
2272 configure_exception_vector();
2273
2274
2275
2276
2277
2278
2279
2280
2281 if (cpu_has_mips_r2_r6) {
2282 cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
2283 cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
2284 cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
2285 cp0_fdc_irq = (read_c0_intctl() >> INTCTLB_IPFDC) & 7;
2286 if (!cp0_fdc_irq)
2287 cp0_fdc_irq = -1;
2288
2289 } else {
2290 cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
2291 cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
2292 cp0_perfcount_irq = -1;
2293 cp0_fdc_irq = -1;
2294 }
2295
2296 if (cpu_has_mmid)
2297 cpu_data[cpu].asid_cache = 0;
2298 else if (!cpu_data[cpu].asid_cache)
2299 cpu_data[cpu].asid_cache = asid_first_version(cpu);
2300
2301 mmgrab(&init_mm);
2302 current->active_mm = &init_mm;
2303 BUG_ON(current->mm);
2304 enter_lazy_tlb(&init_mm, current);
2305
2306
2307 if (!is_boot_cpu)
2308 cpu_cache_init();
2309 tlb_init();
2310 TLBMISS_HANDLER_SETUP();
2311}
2312
2313
2314void set_handler(unsigned long offset, void *addr, unsigned long size)
2315{
2316#ifdef CONFIG_CPU_MICROMIPS
2317 memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
2318#else
2319 memcpy((void *)(ebase + offset), addr, size);
2320#endif
2321 local_flush_icache_range(ebase + offset, ebase + offset + size);
2322}
2323
2324static const char panic_null_cerr[] =
2325 "Trying to set NULL cache error exception handler\n";
2326
2327
2328
2329
2330
2331
2332void set_uncached_handler(unsigned long offset, void *addr,
2333 unsigned long size)
2334{
2335 unsigned long uncached_ebase = CKSEG1ADDR(ebase);
2336
2337 if (!addr)
2338 panic(panic_null_cerr);
2339
2340 memcpy((void *)(uncached_ebase + offset), addr, size);
2341}
2342
2343static int __initdata rdhwr_noopt;
2344static int __init set_rdhwr_noopt(char *str)
2345{
2346 rdhwr_noopt = 1;
2347 return 1;
2348}
2349
2350__setup("rdhwr_noopt", set_rdhwr_noopt);
2351
2352void __init trap_init(void)
2353{
2354 extern char except_vec3_generic;
2355 extern char except_vec4;
2356 extern char except_vec3_r4000;
2357 unsigned long i, vec_size;
2358 phys_addr_t ebase_pa;
2359
2360 check_wait();
2361
2362 if (!cpu_has_mips_r2_r6) {
2363 ebase = CAC_BASE;
2364 vec_size = 0x400;
2365 } else {
2366 if (cpu_has_veic || cpu_has_vint)
2367 vec_size = 0x200 + VECTORSPACING*64;
2368 else
2369 vec_size = PAGE_SIZE;
2370
2371 ebase_pa = memblock_phys_alloc(vec_size, 1 << fls(vec_size));
2372 if (!ebase_pa)
2373 panic("%s: Failed to allocate %lu bytes align=0x%x\n",
2374 __func__, vec_size, 1 << fls(vec_size));
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387 if (!IS_ENABLED(CONFIG_EVA) && !WARN_ON(ebase_pa >= 0x20000000))
2388 ebase = CKSEG0ADDR(ebase_pa);
2389 else
2390 ebase = (unsigned long)phys_to_virt(ebase_pa);
2391 }
2392
2393 if (cpu_has_mmips) {
2394 unsigned int config3 = read_c0_config3();
2395
2396 if (IS_ENABLED(CONFIG_CPU_MICROMIPS))
2397 write_c0_config3(config3 | MIPS_CONF3_ISA_OE);
2398 else
2399 write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE);
2400 }
2401
2402 if (board_ebase_setup)
2403 board_ebase_setup();
2404 per_cpu_trap_init(true);
2405 memblock_set_bottom_up(false);
2406
2407
2408
2409
2410
2411
2412 set_handler(0x180, &except_vec3_generic, 0x80);
2413
2414
2415
2416
2417 for (i = 0; i <= 31; i++)
2418 set_except_vector(i, handle_reserved);
2419
2420
2421
2422
2423
2424 if (cpu_has_ejtag && board_ejtag_handler_setup)
2425 board_ejtag_handler_setup();
2426
2427
2428
2429
2430 if (cpu_has_watch)
2431 set_except_vector(EXCCODE_WATCH, handle_watch);
2432
2433
2434
2435
2436 if (cpu_has_veic || cpu_has_vint) {
2437 int nvec = cpu_has_veic ? 64 : 8;
2438 for (i = 0; i < nvec; i++)
2439 set_vi_handler(i, NULL);
2440 }
2441 else if (cpu_has_divec)
2442 set_handler(0x200, &except_vec4, 0x8);
2443
2444
2445
2446
2447
2448 parity_protection_init();
2449
2450
2451
2452
2453
2454
2455 if (board_be_init)
2456 board_be_init();
2457
2458 set_except_vector(EXCCODE_INT, using_rollback_handler() ?
2459 rollback_handle_int : handle_int);
2460 set_except_vector(EXCCODE_MOD, handle_tlbm);
2461 set_except_vector(EXCCODE_TLBL, handle_tlbl);
2462 set_except_vector(EXCCODE_TLBS, handle_tlbs);
2463
2464 set_except_vector(EXCCODE_ADEL, handle_adel);
2465 set_except_vector(EXCCODE_ADES, handle_ades);
2466
2467 set_except_vector(EXCCODE_IBE, handle_ibe);
2468 set_except_vector(EXCCODE_DBE, handle_dbe);
2469
2470 set_except_vector(EXCCODE_SYS, handle_sys);
2471 set_except_vector(EXCCODE_BP, handle_bp);
2472
2473 if (rdhwr_noopt)
2474 set_except_vector(EXCCODE_RI, handle_ri);
2475 else {
2476 if (cpu_has_vtag_icache)
2477 set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2478 else if (current_cpu_type() == CPU_LOONGSON64)
2479 set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2480 else
2481 set_except_vector(EXCCODE_RI, handle_ri_rdhwr);
2482 }
2483
2484 set_except_vector(EXCCODE_CPU, handle_cpu);
2485 set_except_vector(EXCCODE_OV, handle_ov);
2486 set_except_vector(EXCCODE_TR, handle_tr);
2487 set_except_vector(EXCCODE_MSAFPE, handle_msa_fpe);
2488
2489 if (board_nmi_handler_setup)
2490 board_nmi_handler_setup();
2491
2492 if (cpu_has_fpu && !cpu_has_nofpuex)
2493 set_except_vector(EXCCODE_FPE, handle_fpe);
2494
2495 if (cpu_has_ftlbparex)
2496 set_except_vector(MIPS_EXCCODE_TLBPAR, handle_ftlb);
2497
2498 if (cpu_has_gsexcex)
2499 set_except_vector(LOONGSON_EXCCODE_GSEXC, handle_gsexc);
2500
2501 if (cpu_has_rixiex) {
2502 set_except_vector(EXCCODE_TLBRI, tlb_do_page_fault_0);
2503 set_except_vector(EXCCODE_TLBXI, tlb_do_page_fault_0);
2504 }
2505
2506 set_except_vector(EXCCODE_MSADIS, handle_msa);
2507 set_except_vector(EXCCODE_MDMX, handle_mdmx);
2508
2509 if (cpu_has_mcheck)
2510 set_except_vector(EXCCODE_MCHECK, handle_mcheck);
2511
2512 if (cpu_has_mipsmt)
2513 set_except_vector(EXCCODE_THREAD, handle_mt);
2514
2515 set_except_vector(EXCCODE_DSPDIS, handle_dsp);
2516
2517 if (board_cache_error_setup)
2518 board_cache_error_setup();
2519
2520 if (cpu_has_vce)
2521
2522 set_handler(0x180, &except_vec3_r4000, 0x100);
2523 else if (cpu_has_4kex)
2524 set_handler(0x180, &except_vec3_generic, 0x80);
2525 else
2526 set_handler(0x080, &except_vec3_generic, 0x80);
2527
2528 local_flush_icache_range(ebase, ebase + vec_size);
2529
2530 sort_extable(__start___dbe_table, __stop___dbe_table);
2531
2532 cu2_notifier(default_cu2_call, 0x80000000);
2533}
2534
2535static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd,
2536 void *v)
2537{
2538 switch (cmd) {
2539 case CPU_PM_ENTER_FAILED:
2540 case CPU_PM_EXIT:
2541 configure_status();
2542 configure_hwrena();
2543 configure_exception_vector();
2544
2545
2546 TLBMISS_HANDLER_RESTORE();
2547
2548 break;
2549 }
2550
2551 return NOTIFY_OK;
2552}
2553
2554static struct notifier_block trap_pm_notifier_block = {
2555 .notifier_call = trap_pm_notifier,
2556};
2557
2558static int __init trap_pm_init(void)
2559{
2560 return cpu_pm_register_notifier(&trap_pm_notifier_block);
2561}
2562arch_initcall(trap_pm_init);
2563