1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/bitops.h>
16#include <linux/bug.h>
17#include <linux/compiler.h>
18#include <linux/context_tracking.h>
19#include <linux/cpu_pm.h>
20#include <linux/kexec.h>
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/extable.h>
25#include <linux/mm.h>
26#include <linux/sched/mm.h>
27#include <linux/sched/debug.h>
28#include <linux/smp.h>
29#include <linux/spinlock.h>
30#include <linux/kallsyms.h>
31#include <linux/memblock.h>
32#include <linux/interrupt.h>
33#include <linux/ptrace.h>
34#include <linux/kgdb.h>
35#include <linux/kdebug.h>
36#include <linux/kprobes.h>
37#include <linux/notifier.h>
38#include <linux/kdb.h>
39#include <linux/irq.h>
40#include <linux/perf_event.h>
41
42#include <asm/addrspace.h>
43#include <asm/bootinfo.h>
44#include <asm/branch.h>
45#include <asm/break.h>
46#include <asm/cop2.h>
47#include <asm/cpu.h>
48#include <asm/cpu-type.h>
49#include <asm/dsp.h>
50#include <asm/fpu.h>
51#include <asm/fpu_emulator.h>
52#include <asm/idle.h>
53#include <asm/isa-rev.h>
54#include <asm/mips-cps.h>
55#include <asm/mips-r2-to-r6-emul.h>
56#include <asm/mipsregs.h>
57#include <asm/mipsmtregs.h>
58#include <asm/module.h>
59#include <asm/msa.h>
60#include <asm/ptrace.h>
61#include <asm/sections.h>
62#include <asm/siginfo.h>
63#include <asm/tlbdebug.h>
64#include <asm/traps.h>
65#include <linux/uaccess.h>
66#include <asm/watch.h>
67#include <asm/mmu_context.h>
68#include <asm/types.h>
69#include <asm/stacktrace.h>
70#include <asm/tlbex.h>
71#include <asm/uasm.h>
72
73#include <asm/mach-loongson64/cpucfg-emul.h>
74
75#include "access-helper.h"
76
77extern void check_wait(void);
78extern asmlinkage void rollback_handle_int(void);
79extern asmlinkage void handle_int(void);
80extern asmlinkage void handle_adel(void);
81extern asmlinkage void handle_ades(void);
82extern asmlinkage void handle_ibe(void);
83extern asmlinkage void handle_dbe(void);
84extern asmlinkage void handle_sys(void);
85extern asmlinkage void handle_bp(void);
86extern asmlinkage void handle_ri(void);
87extern asmlinkage void handle_ri_rdhwr_tlbp(void);
88extern asmlinkage void handle_ri_rdhwr(void);
89extern asmlinkage void handle_cpu(void);
90extern asmlinkage void handle_ov(void);
91extern asmlinkage void handle_tr(void);
92extern asmlinkage void handle_msa_fpe(void);
93extern asmlinkage void handle_fpe(void);
94extern asmlinkage void handle_ftlb(void);
95extern asmlinkage void handle_gsexc(void);
96extern asmlinkage void handle_msa(void);
97extern asmlinkage void handle_mdmx(void);
98extern asmlinkage void handle_watch(void);
99extern asmlinkage void handle_mt(void);
100extern asmlinkage void handle_dsp(void);
101extern asmlinkage void handle_mcheck(void);
102extern asmlinkage void handle_reserved(void);
103extern void tlb_do_page_fault_0(void);
104
105void (*board_be_init)(void);
106int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
107void (*board_nmi_handler_setup)(void);
108void (*board_ejtag_handler_setup)(void);
109void (*board_bind_eic_interrupt)(int irq, int regset);
110void (*board_ebase_setup)(void);
111void(*board_cache_error_setup)(void);
112
113static void show_raw_backtrace(unsigned long reg29, const char *loglvl,
114 bool user)
115{
116 unsigned long *sp = (unsigned long *)(reg29 & ~3);
117 unsigned long addr;
118
119 printk("%sCall Trace:", loglvl);
120#ifdef CONFIG_KALLSYMS
121 printk("%s\n", loglvl);
122#endif
123 while (!kstack_end(sp)) {
124 if (__get_addr(&addr, sp++, user)) {
125 printk("%s (Bad stack address)", loglvl);
126 break;
127 }
128 if (__kernel_text_address(addr))
129 print_ip_sym(loglvl, addr);
130 }
131 printk("%s\n", loglvl);
132}
133
134#ifdef CONFIG_KALLSYMS
135int raw_show_trace;
136static int __init set_raw_show_trace(char *str)
137{
138 raw_show_trace = 1;
139 return 1;
140}
141__setup("raw_show_trace", set_raw_show_trace);
142#endif
143
144static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
145 const char *loglvl, bool user)
146{
147 unsigned long sp = regs->regs[29];
148 unsigned long ra = regs->regs[31];
149 unsigned long pc = regs->cp0_epc;
150
151 if (!task)
152 task = current;
153
154 if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) {
155 show_raw_backtrace(sp, loglvl, user);
156 return;
157 }
158 printk("%sCall Trace:\n", loglvl);
159 do {
160 print_ip_sym(loglvl, pc);
161 pc = unwind_stack(task, &sp, pc, &ra);
162 } while (pc);
163 pr_cont("\n");
164}
165
166
167
168
169
170static void show_stacktrace(struct task_struct *task,
171 const struct pt_regs *regs, const char *loglvl, bool user)
172{
173 const int field = 2 * sizeof(unsigned long);
174 unsigned long stackdata;
175 int i;
176 unsigned long *sp = (unsigned long *)regs->regs[29];
177
178 printk("%sStack :", loglvl);
179 i = 0;
180 while ((unsigned long) sp & (PAGE_SIZE - 1)) {
181 if (i && ((i % (64 / field)) == 0)) {
182 pr_cont("\n");
183 printk("%s ", loglvl);
184 }
185 if (i > 39) {
186 pr_cont(" ...");
187 break;
188 }
189
190 if (__get_addr(&stackdata, sp++, user)) {
191 pr_cont(" (Bad stack address)");
192 break;
193 }
194
195 pr_cont(" %0*lx", field, stackdata);
196 i++;
197 }
198 pr_cont("\n");
199 show_backtrace(task, regs, loglvl, user);
200}
201
202void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
203{
204 struct pt_regs regs;
205
206 regs.cp0_status = KSU_KERNEL;
207 if (sp) {
208 regs.regs[29] = (unsigned long)sp;
209 regs.regs[31] = 0;
210 regs.cp0_epc = 0;
211 } else {
212 if (task && task != current) {
213 regs.regs[29] = task->thread.reg29;
214 regs.regs[31] = 0;
215 regs.cp0_epc = task->thread.reg31;
216 } else {
217 prepare_frametrace(®s);
218 }
219 }
220 show_stacktrace(task, ®s, loglvl, false);
221}
222
223static void show_code(void *pc, bool user)
224{
225 long i;
226 unsigned short *pc16 = NULL;
227
228 printk("Code:");
229
230 if ((unsigned long)pc & 1)
231 pc16 = (u16 *)((unsigned long)pc & ~1);
232
233 for(i = -3 ; i < 6 ; i++) {
234 if (pc16) {
235 u16 insn16;
236
237 if (__get_inst16(&insn16, pc16 + i, user))
238 goto bad_address;
239
240 pr_cont("%c%04x%c", (i?' ':'<'), insn16, (i?' ':'>'));
241 } else {
242 u32 insn32;
243
244 if (__get_inst32(&insn32, (u32 *)pc + i, user))
245 goto bad_address;
246
247 pr_cont("%c%08x%c", (i?' ':'<'), insn32, (i?' ':'>'));
248 }
249 }
250 pr_cont("\n");
251 return;
252
253bad_address:
254 pr_cont(" (Bad address in epc)\n\n");
255}
256
257static void __show_regs(const struct pt_regs *regs)
258{
259 const int field = 2 * sizeof(unsigned long);
260 unsigned int cause = regs->cp0_cause;
261 unsigned int exccode;
262 int i;
263
264 show_regs_print_info(KERN_DEFAULT);
265
266
267
268
269 for (i = 0; i < 32; ) {
270 if ((i % 4) == 0)
271 printk("$%2d :", i);
272 if (i == 0)
273 pr_cont(" %0*lx", field, 0UL);
274 else if (i == 26 || i == 27)
275 pr_cont(" %*s", field, "");
276 else
277 pr_cont(" %0*lx", field, regs->regs[i]);
278
279 i++;
280 if ((i % 4) == 0)
281 pr_cont("\n");
282 }
283
284#ifdef CONFIG_CPU_HAS_SMARTMIPS
285 printk("Acx : %0*lx\n", field, regs->acx);
286#endif
287 if (MIPS_ISA_REV < 6) {
288 printk("Hi : %0*lx\n", field, regs->hi);
289 printk("Lo : %0*lx\n", field, regs->lo);
290 }
291
292
293
294
295 printk("epc : %0*lx %pS\n", field, regs->cp0_epc,
296 (void *) regs->cp0_epc);
297 printk("ra : %0*lx %pS\n", field, regs->regs[31],
298 (void *) regs->regs[31]);
299
300 printk("Status: %08x ", (uint32_t) regs->cp0_status);
301
302 if (cpu_has_3kex) {
303 if (regs->cp0_status & ST0_KUO)
304 pr_cont("KUo ");
305 if (regs->cp0_status & ST0_IEO)
306 pr_cont("IEo ");
307 if (regs->cp0_status & ST0_KUP)
308 pr_cont("KUp ");
309 if (regs->cp0_status & ST0_IEP)
310 pr_cont("IEp ");
311 if (regs->cp0_status & ST0_KUC)
312 pr_cont("KUc ");
313 if (regs->cp0_status & ST0_IEC)
314 pr_cont("IEc ");
315 } else if (cpu_has_4kex) {
316 if (regs->cp0_status & ST0_KX)
317 pr_cont("KX ");
318 if (regs->cp0_status & ST0_SX)
319 pr_cont("SX ");
320 if (regs->cp0_status & ST0_UX)
321 pr_cont("UX ");
322 switch (regs->cp0_status & ST0_KSU) {
323 case KSU_USER:
324 pr_cont("USER ");
325 break;
326 case KSU_SUPERVISOR:
327 pr_cont("SUPERVISOR ");
328 break;
329 case KSU_KERNEL:
330 pr_cont("KERNEL ");
331 break;
332 default:
333 pr_cont("BAD_MODE ");
334 break;
335 }
336 if (regs->cp0_status & ST0_ERL)
337 pr_cont("ERL ");
338 if (regs->cp0_status & ST0_EXL)
339 pr_cont("EXL ");
340 if (regs->cp0_status & ST0_IE)
341 pr_cont("IE ");
342 }
343 pr_cont("\n");
344
345 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
346 printk("Cause : %08x (ExcCode %02x)\n", cause, exccode);
347
348 if (1 <= exccode && exccode <= 5)
349 printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
350
351 printk("PrId : %08x (%s)\n", read_c0_prid(),
352 cpu_name_string());
353}
354
355
356
357
358void show_regs(struct pt_regs *regs)
359{
360 __show_regs(regs);
361 dump_stack();
362}
363
364void show_registers(struct pt_regs *regs)
365{
366 const int field = 2 * sizeof(unsigned long);
367
368 __show_regs(regs);
369 print_modules();
370 printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
371 current->comm, current->pid, current_thread_info(), current,
372 field, current_thread_info()->tp_value);
373 if (cpu_has_userlocal) {
374 unsigned long tls;
375
376 tls = read_c0_userlocal();
377 if (tls != current_thread_info()->tp_value)
378 printk("*HwTLS: %0*lx\n", field, tls);
379 }
380
381 show_stacktrace(current, regs, KERN_DEFAULT, user_mode(regs));
382 show_code((void *)regs->cp0_epc, user_mode(regs));
383 printk("\n");
384}
385
386static DEFINE_RAW_SPINLOCK(die_lock);
387
388void __noreturn die(const char *str, struct pt_regs *regs)
389{
390 static int die_counter;
391 int sig = SIGSEGV;
392
393 oops_enter();
394
395 if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
396 SIGSEGV) == NOTIFY_STOP)
397 sig = 0;
398
399 console_verbose();
400 raw_spin_lock_irq(&die_lock);
401 bust_spinlocks(1);
402
403 printk("%s[#%d]:\n", str, ++die_counter);
404 show_registers(regs);
405 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
406 raw_spin_unlock_irq(&die_lock);
407
408 oops_exit();
409
410 if (in_interrupt())
411 panic("Fatal exception in interrupt");
412
413 if (panic_on_oops)
414 panic("Fatal exception");
415
416 if (regs && kexec_should_crash(current))
417 crash_kexec(regs);
418
419 do_exit(sig);
420}
421
422extern struct exception_table_entry __start___dbe_table[];
423extern struct exception_table_entry __stop___dbe_table[];
424
425__asm__(
426" .section __dbe_table, \"a\"\n"
427" .previous \n");
428
429
430static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
431{
432 const struct exception_table_entry *e;
433
434 e = search_extable(__start___dbe_table,
435 __stop___dbe_table - __start___dbe_table, addr);
436 if (!e)
437 e = search_module_dbetables(addr);
438 return e;
439}
440
441asmlinkage void do_be(struct pt_regs *regs)
442{
443 const int field = 2 * sizeof(unsigned long);
444 const struct exception_table_entry *fixup = NULL;
445 int data = regs->cp0_cause & 4;
446 int action = MIPS_BE_FATAL;
447 enum ctx_state prev_state;
448
449 prev_state = exception_enter();
450
451 if (data && !user_mode(regs))
452 fixup = search_dbe_tables(exception_epc(regs));
453
454 if (fixup)
455 action = MIPS_BE_FIXUP;
456
457 if (board_be_handler)
458 action = board_be_handler(regs, fixup != NULL);
459 else
460 mips_cm_error_report();
461
462 switch (action) {
463 case MIPS_BE_DISCARD:
464 goto out;
465 case MIPS_BE_FIXUP:
466 if (fixup) {
467 regs->cp0_epc = fixup->nextinsn;
468 goto out;
469 }
470 break;
471 default:
472 break;
473 }
474
475
476
477
478 printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
479 data ? "Data" : "Instruction",
480 field, regs->cp0_epc, field, regs->regs[31]);
481 if (notify_die(DIE_OOPS, "bus error", regs, 0, current->thread.trap_nr,
482 SIGBUS) == NOTIFY_STOP)
483 goto out;
484
485 die_if_kernel("Oops", regs);
486 force_sig(SIGBUS);
487
488out:
489 exception_exit(prev_state);
490}
491
492
493
494
495
496#define OPCODE 0xfc000000
497#define BASE 0x03e00000
498#define RT 0x001f0000
499#define OFFSET 0x0000ffff
500#define LL 0xc0000000
501#define SC 0xe0000000
502#define SPEC0 0x00000000
503#define SPEC3 0x7c000000
504#define RD 0x0000f800
505#define FUNC 0x0000003f
506#define SYNC 0x0000000f
507#define RDHWR 0x0000003b
508
509
510#define MM_POOL32A_FUNC 0xfc00ffff
511#define MM_RDHWR 0x00006b3c
512#define MM_RS 0x001f0000
513#define MM_RT 0x03e00000
514
515
516
517
518
519unsigned int ll_bit;
520struct task_struct *ll_task;
521
522static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
523{
524 unsigned long value, __user *vaddr;
525 long offset;
526
527
528
529
530
531
532
533 offset = opcode & OFFSET;
534 offset <<= 16;
535 offset >>= 16;
536
537 vaddr = (unsigned long __user *)
538 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
539
540 if ((unsigned long)vaddr & 3)
541 return SIGBUS;
542 if (get_user(value, vaddr))
543 return SIGSEGV;
544
545 preempt_disable();
546
547 if (ll_task == NULL || ll_task == current) {
548 ll_bit = 1;
549 } else {
550 ll_bit = 0;
551 }
552 ll_task = current;
553
554 preempt_enable();
555
556 regs->regs[(opcode & RT) >> 16] = value;
557
558 return 0;
559}
560
561static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
562{
563 unsigned long __user *vaddr;
564 unsigned long reg;
565 long offset;
566
567
568
569
570
571
572
573 offset = opcode & OFFSET;
574 offset <<= 16;
575 offset >>= 16;
576
577 vaddr = (unsigned long __user *)
578 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
579 reg = (opcode & RT) >> 16;
580
581 if ((unsigned long)vaddr & 3)
582 return SIGBUS;
583
584 preempt_disable();
585
586 if (ll_bit == 0 || ll_task != current) {
587 regs->regs[reg] = 0;
588 preempt_enable();
589 return 0;
590 }
591
592 preempt_enable();
593
594 if (put_user(regs->regs[reg], vaddr))
595 return SIGSEGV;
596
597 regs->regs[reg] = 1;
598
599 return 0;
600}
601
602
603
604
605
606
607
608
609static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
610{
611 if ((opcode & OPCODE) == LL) {
612 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
613 1, regs, 0);
614 return simulate_ll(regs, opcode);
615 }
616 if ((opcode & OPCODE) == SC) {
617 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
618 1, regs, 0);
619 return simulate_sc(regs, opcode);
620 }
621
622 return -1;
623}
624
625
626
627
628
629static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
630{
631 struct thread_info *ti = task_thread_info(current);
632
633 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
634 1, regs, 0);
635 switch (rd) {
636 case MIPS_HWR_CPUNUM:
637 regs->regs[rt] = smp_processor_id();
638 return 0;
639 case MIPS_HWR_SYNCISTEP:
640 regs->regs[rt] = min(current_cpu_data.dcache.linesz,
641 current_cpu_data.icache.linesz);
642 return 0;
643 case MIPS_HWR_CC:
644 regs->regs[rt] = read_c0_count();
645 return 0;
646 case MIPS_HWR_CCRES:
647 switch (current_cpu_type()) {
648 case CPU_20KC:
649 case CPU_25KF:
650 regs->regs[rt] = 1;
651 break;
652 default:
653 regs->regs[rt] = 2;
654 }
655 return 0;
656 case MIPS_HWR_ULR:
657 regs->regs[rt] = ti->tp_value;
658 return 0;
659 default:
660 return -1;
661 }
662}
663
664static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
665{
666 if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
667 int rd = (opcode & RD) >> 11;
668 int rt = (opcode & RT) >> 16;
669
670 simulate_rdhwr(regs, rd, rt);
671 return 0;
672 }
673
674
675 return -1;
676}
677
678static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned int opcode)
679{
680 if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
681 int rd = (opcode & MM_RS) >> 16;
682 int rt = (opcode & MM_RT) >> 21;
683 simulate_rdhwr(regs, rd, rt);
684 return 0;
685 }
686
687
688 return -1;
689}
690
691static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
692{
693 if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
694 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
695 1, regs, 0);
696 return 0;
697 }
698
699 return -1;
700}
701
702
703
704
705
706#ifdef CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION
707
708#define LWC2 0xc8000000
709#define RS BASE
710#define CSR_OPCODE2 0x00000118
711#define CSR_OPCODE2_MASK 0x000007ff
712#define CSR_FUNC_MASK RT
713#define CSR_FUNC_CPUCFG 0x8
714
715static int simulate_loongson3_cpucfg(struct pt_regs *regs,
716 unsigned int opcode)
717{
718 int op = opcode & OPCODE;
719 int op2 = opcode & CSR_OPCODE2_MASK;
720 int csr_func = (opcode & CSR_FUNC_MASK) >> 16;
721
722 if (op == LWC2 && op2 == CSR_OPCODE2 && csr_func == CSR_FUNC_CPUCFG) {
723 int rd = (opcode & RD) >> 11;
724 int rs = (opcode & RS) >> 21;
725 __u64 sel = regs->regs[rs];
726
727 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
728
729
730 preempt_disable();
731 if (!loongson3_cpucfg_emulation_enabled(¤t_cpu_data)) {
732 preempt_enable();
733 return -1;
734 }
735 regs->regs[rd] = loongson3_cpucfg_read_synthesized(
736 ¤t_cpu_data, sel);
737 preempt_enable();
738 return 0;
739 }
740
741
742 return -1;
743}
744#endif
745
746asmlinkage void do_ov(struct pt_regs *regs)
747{
748 enum ctx_state prev_state;
749
750 prev_state = exception_enter();
751 die_if_kernel("Integer overflow", regs);
752
753 force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->cp0_epc);
754 exception_exit(prev_state);
755}
756
757#ifdef CONFIG_MIPS_FP_SUPPORT
758
759
760
761
762
763
764
765void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
766 struct task_struct *tsk)
767{
768 int si_code = FPE_FLTUNK;
769
770 if (fcr31 & FPU_CSR_INV_X)
771 si_code = FPE_FLTINV;
772 else if (fcr31 & FPU_CSR_DIV_X)
773 si_code = FPE_FLTDIV;
774 else if (fcr31 & FPU_CSR_OVF_X)
775 si_code = FPE_FLTOVF;
776 else if (fcr31 & FPU_CSR_UDF_X)
777 si_code = FPE_FLTUND;
778 else if (fcr31 & FPU_CSR_INE_X)
779 si_code = FPE_FLTRES;
780
781 force_sig_fault_to_task(SIGFPE, si_code, fault_addr, tsk);
782}
783
784int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
785{
786 int si_code;
787
788 switch (sig) {
789 case 0:
790 return 0;
791
792 case SIGFPE:
793 force_fcr31_sig(fcr31, fault_addr, current);
794 return 1;
795
796 case SIGBUS:
797 force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr);
798 return 1;
799
800 case SIGSEGV:
801 mmap_read_lock(current->mm);
802 if (vma_lookup(current->mm, (unsigned long)fault_addr))
803 si_code = SEGV_ACCERR;
804 else
805 si_code = SEGV_MAPERR;
806 mmap_read_unlock(current->mm);
807 force_sig_fault(SIGSEGV, si_code, fault_addr);
808 return 1;
809
810 default:
811 force_sig(sig);
812 return 1;
813 }
814}
815
816static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
817 unsigned long old_epc, unsigned long old_ra)
818{
819 union mips_instruction inst = { .word = opcode };
820 void __user *fault_addr;
821 unsigned long fcr31;
822 int sig;
823
824
825 switch (inst.i_format.opcode) {
826 case cop1_op:
827 case cop1x_op:
828 case lwc1_op:
829 case ldc1_op:
830 case swc1_op:
831 case sdc1_op:
832 break;
833
834 default:
835 return -1;
836 }
837
838
839
840
841
842 regs->cp0_epc = old_epc;
843 regs->regs[31] = old_ra;
844
845
846 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
847 &fault_addr);
848
849
850
851
852
853 fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
854 current->thread.fpu.fcr31 &= ~fcr31;
855
856
857 own_fpu(1);
858
859
860 process_fpemu_return(sig, fault_addr, fcr31);
861
862 return 0;
863}
864
865
866
867
868asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
869{
870 enum ctx_state prev_state;
871 void __user *fault_addr;
872 int sig;
873
874 prev_state = exception_enter();
875 if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
876 SIGFPE) == NOTIFY_STOP)
877 goto out;
878
879
880 write_32bit_cp1_register(CP1_STATUS, fcr31 & ~mask_fcr31_x(fcr31));
881 local_irq_enable();
882
883 die_if_kernel("FP exception in kernel code", regs);
884
885 if (fcr31 & FPU_CSR_UNI_X) {
886
887
888
889
890
891
892
893
894
895
896
897
898 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
899 &fault_addr);
900
901
902
903
904
905 fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
906 current->thread.fpu.fcr31 &= ~fcr31;
907
908
909 own_fpu(1);
910 } else {
911 sig = SIGFPE;
912 fault_addr = (void __user *) regs->cp0_epc;
913 }
914
915
916 process_fpemu_return(sig, fault_addr, fcr31);
917
918out:
919 exception_exit(prev_state);
920}
921
922
923
924
925
926
927static void mt_ase_fp_affinity(void)
928{
929#ifdef CONFIG_MIPS_MT_FPAFF
930 if (mt_fpemul_threshold > 0 &&
931 ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
932
933
934
935
936
937 if (cpumask_intersects(¤t->cpus_mask, &mt_fpu_cpumask)) {
938 cpumask_t tmask;
939
940 current->thread.user_cpus_allowed
941 = current->cpus_mask;
942 cpumask_and(&tmask, ¤t->cpus_mask,
943 &mt_fpu_cpumask);
944 set_cpus_allowed_ptr(current, &tmask);
945 set_thread_flag(TIF_FPUBOUND);
946 }
947 }
948#endif
949}
950
951#else
952
953static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
954 unsigned long old_epc, unsigned long old_ra)
955{
956 return -1;
957}
958
959#endif
960
961void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
962 const char *str)
963{
964 char b[40];
965
966#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
967 if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr,
968 SIGTRAP) == NOTIFY_STOP)
969 return;
970#endif
971
972 if (notify_die(DIE_TRAP, str, regs, code, current->thread.trap_nr,
973 SIGTRAP) == NOTIFY_STOP)
974 return;
975
976
977
978
979
980
981
982 switch (code) {
983 case BRK_OVERFLOW:
984 case BRK_DIVZERO:
985 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
986 die_if_kernel(b, regs);
987 force_sig_fault(SIGFPE,
988 code == BRK_DIVZERO ? FPE_INTDIV : FPE_INTOVF,
989 (void __user *) regs->cp0_epc);
990 break;
991 case BRK_BUG:
992 die_if_kernel("Kernel bug detected", regs);
993 force_sig(SIGTRAP);
994 break;
995 case BRK_MEMU:
996
997
998
999
1000
1001
1002
1003
1004 if (do_dsemulret(regs))
1005 return;
1006
1007 die_if_kernel("Math emu break/trap", regs);
1008 force_sig(SIGTRAP);
1009 break;
1010 default:
1011 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
1012 die_if_kernel(b, regs);
1013 if (si_code) {
1014 force_sig_fault(SIGTRAP, si_code, NULL);
1015 } else {
1016 force_sig(SIGTRAP);
1017 }
1018 }
1019}
1020
1021asmlinkage void do_bp(struct pt_regs *regs)
1022{
1023 unsigned long epc = msk_isa16_mode(exception_epc(regs));
1024 unsigned int opcode, bcode;
1025 enum ctx_state prev_state;
1026 bool user = user_mode(regs);
1027
1028 prev_state = exception_enter();
1029 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1030 if (get_isa16_mode(regs->cp0_epc)) {
1031 u16 instr[2];
1032
1033 if (__get_inst16(&instr[0], (u16 *)epc, user))
1034 goto out_sigsegv;
1035
1036 if (!cpu_has_mmips) {
1037
1038 bcode = (instr[0] >> 5) & 0x3f;
1039 } else if (mm_insn_16bit(instr[0])) {
1040
1041 bcode = instr[0] & 0xf;
1042 } else {
1043
1044 if (__get_inst16(&instr[1], (u16 *)(epc + 2), user))
1045 goto out_sigsegv;
1046 opcode = (instr[0] << 16) | instr[1];
1047 bcode = (opcode >> 6) & ((1 << 20) - 1);
1048 }
1049 } else {
1050 if (__get_inst32(&opcode, (u32 *)epc, user))
1051 goto out_sigsegv;
1052 bcode = (opcode >> 6) & ((1 << 20) - 1);
1053 }
1054
1055
1056
1057
1058
1059
1060
1061 if (bcode >= (1 << 10))
1062 bcode = ((bcode & ((1 << 10) - 1)) << 10) | (bcode >> 10);
1063
1064
1065
1066
1067
1068 switch (bcode) {
1069 case BRK_UPROBE:
1070 if (notify_die(DIE_UPROBE, "uprobe", regs, bcode,
1071 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1072 goto out;
1073 else
1074 break;
1075 case BRK_UPROBE_XOL:
1076 if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, bcode,
1077 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1078 goto out;
1079 else
1080 break;
1081 case BRK_KPROBE_BP:
1082 if (notify_die(DIE_BREAK, "debug", regs, bcode,
1083 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1084 goto out;
1085 else
1086 break;
1087 case BRK_KPROBE_SSTEPBP:
1088 if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
1089 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1090 goto out;
1091 else
1092 break;
1093 default:
1094 break;
1095 }
1096
1097 do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break");
1098
1099out:
1100 exception_exit(prev_state);
1101 return;
1102
1103out_sigsegv:
1104 force_sig(SIGSEGV);
1105 goto out;
1106}
1107
1108asmlinkage void do_tr(struct pt_regs *regs)
1109{
1110 u32 opcode, tcode = 0;
1111 enum ctx_state prev_state;
1112 u16 instr[2];
1113 bool user = user_mode(regs);
1114 unsigned long epc = msk_isa16_mode(exception_epc(regs));
1115
1116 prev_state = exception_enter();
1117 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1118 if (get_isa16_mode(regs->cp0_epc)) {
1119 if (__get_inst16(&instr[0], (u16 *)(epc + 0), user) ||
1120 __get_inst16(&instr[1], (u16 *)(epc + 2), user))
1121 goto out_sigsegv;
1122 opcode = (instr[0] << 16) | instr[1];
1123
1124 if (!(opcode & OPCODE))
1125 tcode = (opcode >> 12) & ((1 << 4) - 1);
1126 } else {
1127 if (__get_inst32(&opcode, (u32 *)epc, user))
1128 goto out_sigsegv;
1129
1130 if (!(opcode & OPCODE))
1131 tcode = (opcode >> 6) & ((1 << 10) - 1);
1132 }
1133
1134 do_trap_or_bp(regs, tcode, 0, "Trap");
1135
1136out:
1137 exception_exit(prev_state);
1138 return;
1139
1140out_sigsegv:
1141 force_sig(SIGSEGV);
1142 goto out;
1143}
1144
1145asmlinkage void do_ri(struct pt_regs *regs)
1146{
1147 unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
1148 unsigned long old_epc = regs->cp0_epc;
1149 unsigned long old31 = regs->regs[31];
1150 enum ctx_state prev_state;
1151 unsigned int opcode = 0;
1152 int status = -1;
1153
1154
1155
1156
1157
1158 if (mipsr2_emulation && cpu_has_mips_r6 &&
1159 likely(user_mode(regs)) &&
1160 likely(get_user(opcode, epc) >= 0)) {
1161 unsigned long fcr31 = 0;
1162
1163 status = mipsr2_decoder(regs, opcode, &fcr31);
1164 switch (status) {
1165 case 0:
1166 case SIGEMT:
1167 return;
1168 case SIGILL:
1169 goto no_r2_instr;
1170 default:
1171 process_fpemu_return(status,
1172 ¤t->thread.cp0_baduaddr,
1173 fcr31);
1174 return;
1175 }
1176 }
1177
1178no_r2_instr:
1179
1180 prev_state = exception_enter();
1181 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1182
1183 if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
1184 SIGILL) == NOTIFY_STOP)
1185 goto out;
1186
1187 die_if_kernel("Reserved instruction in kernel code", regs);
1188
1189 if (unlikely(compute_return_epc(regs) < 0))
1190 goto out;
1191
1192 if (!get_isa16_mode(regs->cp0_epc)) {
1193 if (unlikely(get_user(opcode, epc) < 0))
1194 status = SIGSEGV;
1195
1196 if (!cpu_has_llsc && status < 0)
1197 status = simulate_llsc(regs, opcode);
1198
1199 if (status < 0)
1200 status = simulate_rdhwr_normal(regs, opcode);
1201
1202 if (status < 0)
1203 status = simulate_sync(regs, opcode);
1204
1205 if (status < 0)
1206 status = simulate_fp(regs, opcode, old_epc, old31);
1207
1208#ifdef CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION
1209 if (status < 0)
1210 status = simulate_loongson3_cpucfg(regs, opcode);
1211#endif
1212 } else if (cpu_has_mmips) {
1213 unsigned short mmop[2] = { 0 };
1214
1215 if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
1216 status = SIGSEGV;
1217 if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
1218 status = SIGSEGV;
1219 opcode = mmop[0];
1220 opcode = (opcode << 16) | mmop[1];
1221
1222 if (status < 0)
1223 status = simulate_rdhwr_mm(regs, opcode);
1224 }
1225
1226 if (status < 0)
1227 status = SIGILL;
1228
1229 if (unlikely(status > 0)) {
1230 regs->cp0_epc = old_epc;
1231 regs->regs[31] = old31;
1232 force_sig(status);
1233 }
1234
1235out:
1236 exception_exit(prev_state);
1237}
1238
1239
1240
1241
1242static RAW_NOTIFIER_HEAD(cu2_chain);
1243
1244int __ref register_cu2_notifier(struct notifier_block *nb)
1245{
1246 return raw_notifier_chain_register(&cu2_chain, nb);
1247}
1248
1249int cu2_notifier_call_chain(unsigned long val, void *v)
1250{
1251 return raw_notifier_call_chain(&cu2_chain, val, v);
1252}
1253
1254static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
1255 void *data)
1256{
1257 struct pt_regs *regs = data;
1258
1259 die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
1260 "instruction", regs);
1261 force_sig(SIGILL);
1262
1263 return NOTIFY_OK;
1264}
1265
1266#ifdef CONFIG_MIPS_FP_SUPPORT
1267
1268static int enable_restore_fp_context(int msa)
1269{
1270 int err, was_fpu_owner, prior_msa;
1271 bool first_fp;
1272
1273
1274 first_fp = init_fp_ctx(current);
1275
1276 if (first_fp) {
1277 preempt_disable();
1278 err = own_fpu_inatomic(1);
1279 if (msa && !err) {
1280 enable_msa();
1281
1282
1283
1284
1285
1286
1287 write_msa_csr(current->thread.fpu.msacsr);
1288
1289
1290
1291
1292 init_msa_upper();
1293 set_thread_flag(TIF_USEDMSA);
1294 set_thread_flag(TIF_MSA_CTX_LIVE);
1295 }
1296 preempt_enable();
1297 return err;
1298 }
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327 if (!msa && !thread_msa_context_live())
1328 return own_fpu(1);
1329
1330
1331
1332
1333
1334 preempt_disable();
1335 was_fpu_owner = is_fpu_owner();
1336 err = own_fpu_inatomic(0);
1337 if (err)
1338 goto out;
1339
1340 enable_msa();
1341 write_msa_csr(current->thread.fpu.msacsr);
1342 set_thread_flag(TIF_USEDMSA);
1343
1344
1345
1346
1347
1348
1349
1350
1351 prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
1352 if (!prior_msa && was_fpu_owner) {
1353 init_msa_upper();
1354
1355 goto out;
1356 }
1357
1358 if (!prior_msa) {
1359
1360
1361
1362
1363 _restore_fp(current);
1364
1365
1366
1367
1368
1369
1370 init_msa_upper();
1371 } else {
1372
1373 restore_msa(current);
1374
1375
1376 if (!was_fpu_owner)
1377 write_32bit_cp1_register(CP1_STATUS,
1378 current->thread.fpu.fcr31);
1379 }
1380
1381out:
1382 preempt_enable();
1383
1384 return 0;
1385}
1386
1387#else
1388
1389static int enable_restore_fp_context(int msa)
1390{
1391 return SIGILL;
1392}
1393
1394#endif
1395
1396asmlinkage void do_cpu(struct pt_regs *regs)
1397{
1398 enum ctx_state prev_state;
1399 unsigned int __user *epc;
1400 unsigned long old_epc, old31;
1401 unsigned int opcode;
1402 unsigned int cpid;
1403 int status;
1404
1405 prev_state = exception_enter();
1406 cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
1407
1408 if (cpid != 2)
1409 die_if_kernel("do_cpu invoked from kernel context!", regs);
1410
1411 switch (cpid) {
1412 case 0:
1413 epc = (unsigned int __user *)exception_epc(regs);
1414 old_epc = regs->cp0_epc;
1415 old31 = regs->regs[31];
1416 opcode = 0;
1417 status = -1;
1418
1419 if (unlikely(compute_return_epc(regs) < 0))
1420 break;
1421
1422 if (!get_isa16_mode(regs->cp0_epc)) {
1423 if (unlikely(get_user(opcode, epc) < 0))
1424 status = SIGSEGV;
1425
1426 if (!cpu_has_llsc && status < 0)
1427 status = simulate_llsc(regs, opcode);
1428 }
1429
1430 if (status < 0)
1431 status = SIGILL;
1432
1433 if (unlikely(status > 0)) {
1434 regs->cp0_epc = old_epc;
1435 regs->regs[31] = old31;
1436 force_sig(status);
1437 }
1438
1439 break;
1440
1441#ifdef CONFIG_MIPS_FP_SUPPORT
1442 case 3:
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455 if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) {
1456 force_sig(SIGILL);
1457 break;
1458 }
1459 fallthrough;
1460 case 1: {
1461 void __user *fault_addr;
1462 unsigned long fcr31;
1463 int err, sig;
1464
1465 err = enable_restore_fp_context(0);
1466
1467 if (raw_cpu_has_fpu && !err)
1468 break;
1469
1470 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0,
1471 &fault_addr);
1472
1473
1474
1475
1476
1477 fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
1478 current->thread.fpu.fcr31 &= ~fcr31;
1479
1480
1481 if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
1482 mt_ase_fp_affinity();
1483
1484 break;
1485 }
1486#else
1487 case 1:
1488 case 3:
1489 force_sig(SIGILL);
1490 break;
1491#endif
1492
1493 case 2:
1494 raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
1495 break;
1496 }
1497
1498 exception_exit(prev_state);
1499}
1500
1501asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr)
1502{
1503 enum ctx_state prev_state;
1504
1505 prev_state = exception_enter();
1506 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1507 if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
1508 current->thread.trap_nr, SIGFPE) == NOTIFY_STOP)
1509 goto out;
1510
1511
1512 write_msa_csr(msacsr & ~MSA_CSR_CAUSEF);
1513 local_irq_enable();
1514
1515 die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
1516 force_sig(SIGFPE);
1517out:
1518 exception_exit(prev_state);
1519}
1520
1521asmlinkage void do_msa(struct pt_regs *regs)
1522{
1523 enum ctx_state prev_state;
1524 int err;
1525
1526 prev_state = exception_enter();
1527
1528 if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
1529 force_sig(SIGILL);
1530 goto out;
1531 }
1532
1533 die_if_kernel("do_msa invoked from kernel context!", regs);
1534
1535 err = enable_restore_fp_context(1);
1536 if (err)
1537 force_sig(SIGILL);
1538out:
1539 exception_exit(prev_state);
1540}
1541
1542asmlinkage void do_mdmx(struct pt_regs *regs)
1543{
1544 enum ctx_state prev_state;
1545
1546 prev_state = exception_enter();
1547 force_sig(SIGILL);
1548 exception_exit(prev_state);
1549}
1550
1551
1552
1553
1554asmlinkage void do_watch(struct pt_regs *regs)
1555{
1556 enum ctx_state prev_state;
1557
1558 prev_state = exception_enter();
1559
1560
1561
1562
1563 clear_c0_cause(CAUSEF_WP);
1564
1565
1566
1567
1568
1569
1570 if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
1571 mips_read_watch_registers();
1572 local_irq_enable();
1573 force_sig_fault(SIGTRAP, TRAP_HWBKPT, NULL);
1574 } else {
1575 mips_clear_watch_registers();
1576 local_irq_enable();
1577 }
1578 exception_exit(prev_state);
1579}
1580
1581asmlinkage void do_mcheck(struct pt_regs *regs)
1582{
1583 int multi_match = regs->cp0_status & ST0_TS;
1584 enum ctx_state prev_state;
1585
1586 prev_state = exception_enter();
1587 show_regs(regs);
1588
1589 if (multi_match) {
1590 dump_tlb_regs();
1591 pr_info("\n");
1592 dump_tlb_all();
1593 }
1594
1595 show_code((void *)regs->cp0_epc, user_mode(regs));
1596
1597
1598
1599
1600
1601 panic("Caught Machine Check exception - %scaused by multiple "
1602 "matching entries in the TLB.",
1603 (multi_match) ? "" : "not ");
1604}
1605
1606asmlinkage void do_mt(struct pt_regs *regs)
1607{
1608 int subcode;
1609
1610 subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
1611 >> VPECONTROL_EXCPT_SHIFT;
1612 switch (subcode) {
1613 case 0:
1614 printk(KERN_DEBUG "Thread Underflow\n");
1615 break;
1616 case 1:
1617 printk(KERN_DEBUG "Thread Overflow\n");
1618 break;
1619 case 2:
1620 printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
1621 break;
1622 case 3:
1623 printk(KERN_DEBUG "Gating Storage Exception\n");
1624 break;
1625 case 4:
1626 printk(KERN_DEBUG "YIELD Scheduler Exception\n");
1627 break;
1628 case 5:
1629 printk(KERN_DEBUG "Gating Storage Scheduler Exception\n");
1630 break;
1631 default:
1632 printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
1633 subcode);
1634 break;
1635 }
1636 die_if_kernel("MIPS MT Thread exception in kernel", regs);
1637
1638 force_sig(SIGILL);
1639}
1640
1641
1642asmlinkage void do_dsp(struct pt_regs *regs)
1643{
1644 if (cpu_has_dsp)
1645 panic("Unexpected DSP exception");
1646
1647 force_sig(SIGILL);
1648}
1649
1650asmlinkage void do_reserved(struct pt_regs *regs)
1651{
1652
1653
1654
1655
1656
1657 show_regs(regs);
1658 panic("Caught reserved exception %ld - should not happen.",
1659 (regs->cp0_cause & 0x7f) >> 2);
1660}
1661
1662static int __initdata l1parity = 1;
1663static int __init nol1parity(char *s)
1664{
1665 l1parity = 0;
1666 return 1;
1667}
1668__setup("nol1par", nol1parity);
1669static int __initdata l2parity = 1;
1670static int __init nol2parity(char *s)
1671{
1672 l2parity = 0;
1673 return 1;
1674}
1675__setup("nol2par", nol2parity);
1676
1677
1678
1679
1680
1681static inline __init void parity_protection_init(void)
1682{
1683#define ERRCTL_PE 0x80000000
1684#define ERRCTL_L2P 0x00800000
1685
1686 if (mips_cm_revision() >= CM_REV_CM3) {
1687 ulong gcr_ectl, cp0_ectl;
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697 l1parity &= l2parity;
1698 l2parity &= l1parity;
1699
1700
1701 cp0_ectl = read_c0_ecc();
1702 write_c0_ecc(cp0_ectl | ERRCTL_PE);
1703 back_to_back_c0_hazard();
1704 cp0_ectl = read_c0_ecc();
1705
1706
1707 gcr_ectl = read_gcr_err_control();
1708
1709 if (!(gcr_ectl & CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT) ||
1710 !(cp0_ectl & ERRCTL_PE)) {
1711
1712
1713
1714
1715 l1parity = l2parity = 0;
1716 }
1717
1718
1719 if (l1parity)
1720 cp0_ectl |= ERRCTL_PE;
1721 else
1722 cp0_ectl &= ~ERRCTL_PE;
1723 write_c0_ecc(cp0_ectl);
1724 back_to_back_c0_hazard();
1725 WARN_ON(!!(read_c0_ecc() & ERRCTL_PE) != l1parity);
1726
1727
1728 if (l2parity)
1729 gcr_ectl |= CM_GCR_ERR_CONTROL_L2_ECC_EN;
1730 else
1731 gcr_ectl &= ~CM_GCR_ERR_CONTROL_L2_ECC_EN;
1732 write_gcr_err_control(gcr_ectl);
1733 gcr_ectl = read_gcr_err_control();
1734 gcr_ectl &= CM_GCR_ERR_CONTROL_L2_ECC_EN;
1735 WARN_ON(!!gcr_ectl != l2parity);
1736
1737 pr_info("Cache parity protection %sabled\n",
1738 l1parity ? "en" : "dis");
1739 return;
1740 }
1741
1742 switch (current_cpu_type()) {
1743 case CPU_24K:
1744 case CPU_34K:
1745 case CPU_74K:
1746 case CPU_1004K:
1747 case CPU_1074K:
1748 case CPU_INTERAPTIV:
1749 case CPU_PROAPTIV:
1750 case CPU_P5600:
1751 case CPU_QEMU_GENERIC:
1752 case CPU_P6600:
1753 {
1754 unsigned long errctl;
1755 unsigned int l1parity_present, l2parity_present;
1756
1757 errctl = read_c0_ecc();
1758 errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
1759
1760
1761 write_c0_ecc(errctl | ERRCTL_PE);
1762 back_to_back_c0_hazard();
1763 l1parity_present = (read_c0_ecc() & ERRCTL_PE);
1764
1765
1766 write_c0_ecc(errctl|ERRCTL_L2P);
1767 back_to_back_c0_hazard();
1768 l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
1769
1770 if (l1parity_present && l2parity_present) {
1771 if (l1parity)
1772 errctl |= ERRCTL_PE;
1773 if (l1parity ^ l2parity)
1774 errctl |= ERRCTL_L2P;
1775 } else if (l1parity_present) {
1776 if (l1parity)
1777 errctl |= ERRCTL_PE;
1778 } else if (l2parity_present) {
1779 if (l2parity)
1780 errctl |= ERRCTL_L2P;
1781 } else {
1782
1783 }
1784
1785 printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
1786
1787 write_c0_ecc(errctl);
1788 back_to_back_c0_hazard();
1789 errctl = read_c0_ecc();
1790 printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
1791
1792 if (l1parity_present)
1793 printk(KERN_INFO "Cache parity protection %sabled\n",
1794 (errctl & ERRCTL_PE) ? "en" : "dis");
1795
1796 if (l2parity_present) {
1797 if (l1parity_present && l1parity)
1798 errctl ^= ERRCTL_L2P;
1799 printk(KERN_INFO "L2 cache parity protection %sabled\n",
1800 (errctl & ERRCTL_L2P) ? "en" : "dis");
1801 }
1802 }
1803 break;
1804
1805 case CPU_5KC:
1806 case CPU_5KE:
1807 case CPU_LOONGSON32:
1808 write_c0_ecc(0x80000000);
1809 back_to_back_c0_hazard();
1810
1811 printk(KERN_INFO "Cache parity protection %sabled\n",
1812 (read_c0_ecc() & 0x80000000) ? "en" : "dis");
1813 break;
1814 case CPU_20KC:
1815 case CPU_25KF:
1816
1817 printk(KERN_INFO "Enable cache parity protection for "
1818 "MIPS 20KC/25KF CPUs.\n");
1819 clear_c0_status(ST0_DE);
1820 break;
1821 default:
1822 break;
1823 }
1824}
1825
1826asmlinkage void cache_parity_error(void)
1827{
1828 const int field = 2 * sizeof(unsigned long);
1829 unsigned int reg_val;
1830
1831
1832 printk("Cache error exception:\n");
1833 printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1834 reg_val = read_c0_cacheerr();
1835 printk("c0_cacheerr == %08x\n", reg_val);
1836
1837 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1838 reg_val & (1<<30) ? "secondary" : "primary",
1839 reg_val & (1<<31) ? "data" : "insn");
1840 if ((cpu_has_mips_r2_r6) &&
1841 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1842 pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
1843 reg_val & (1<<29) ? "ED " : "",
1844 reg_val & (1<<28) ? "ET " : "",
1845 reg_val & (1<<27) ? "ES " : "",
1846 reg_val & (1<<26) ? "EE " : "",
1847 reg_val & (1<<25) ? "EB " : "",
1848 reg_val & (1<<24) ? "EI " : "",
1849 reg_val & (1<<23) ? "E1 " : "",
1850 reg_val & (1<<22) ? "E0 " : "");
1851 } else {
1852 pr_err("Error bits: %s%s%s%s%s%s%s\n",
1853 reg_val & (1<<29) ? "ED " : "",
1854 reg_val & (1<<28) ? "ET " : "",
1855 reg_val & (1<<26) ? "EE " : "",
1856 reg_val & (1<<25) ? "EB " : "",
1857 reg_val & (1<<24) ? "EI " : "",
1858 reg_val & (1<<23) ? "E1 " : "",
1859 reg_val & (1<<22) ? "E0 " : "");
1860 }
1861 printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
1862
1863#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1864 if (reg_val & (1<<22))
1865 printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
1866
1867 if (reg_val & (1<<23))
1868 printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
1869#endif
1870
1871 panic("Can't handle the cache error!");
1872}
1873
1874asmlinkage void do_ftlb(void)
1875{
1876 const int field = 2 * sizeof(unsigned long);
1877 unsigned int reg_val;
1878
1879
1880 if ((cpu_has_mips_r2_r6) &&
1881 (((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS) ||
1882 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_LOONGSON))) {
1883 pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
1884 read_c0_ecc());
1885 pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1886 reg_val = read_c0_cacheerr();
1887 pr_err("c0_cacheerr == %08x\n", reg_val);
1888
1889 if ((reg_val & 0xc0000000) == 0xc0000000) {
1890 pr_err("Decoded c0_cacheerr: FTLB parity error\n");
1891 } else {
1892 pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1893 reg_val & (1<<30) ? "secondary" : "primary",
1894 reg_val & (1<<31) ? "data" : "insn");
1895 }
1896 } else {
1897 pr_err("FTLB error exception\n");
1898 }
1899
1900 cache_parity_error();
1901}
1902
1903asmlinkage void do_gsexc(struct pt_regs *regs, u32 diag1)
1904{
1905 u32 exccode = (diag1 & LOONGSON_DIAG1_EXCCODE) >>
1906 LOONGSON_DIAG1_EXCCODE_SHIFT;
1907 enum ctx_state prev_state;
1908
1909 prev_state = exception_enter();
1910
1911 switch (exccode) {
1912 case 0x08:
1913
1914
1915
1916
1917
1918
1919 force_sig(SIGILL);
1920 break;
1921
1922 default:
1923
1924
1925
1926
1927 show_regs(regs);
1928 panic("Unhandled Loongson exception - GSCause = %08x", diag1);
1929 }
1930
1931 exception_exit(prev_state);
1932}
1933
1934
1935
1936
1937
1938void ejtag_exception_handler(struct pt_regs *regs)
1939{
1940 const int field = 2 * sizeof(unsigned long);
1941 unsigned long depc, old_epc, old_ra;
1942 unsigned int debug;
1943
1944 printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
1945 depc = read_c0_depc();
1946 debug = read_c0_debug();
1947 printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
1948 if (debug & 0x80000000) {
1949
1950
1951
1952
1953
1954
1955 old_epc = regs->cp0_epc;
1956 old_ra = regs->regs[31];
1957 regs->cp0_epc = depc;
1958 compute_return_epc(regs);
1959 depc = regs->cp0_epc;
1960 regs->cp0_epc = old_epc;
1961 regs->regs[31] = old_ra;
1962 } else
1963 depc += 4;
1964 write_c0_depc(depc);
1965
1966#if 0
1967 printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
1968 write_c0_debug(debug | 0x100);
1969#endif
1970}
1971
1972
1973
1974
1975
1976static RAW_NOTIFIER_HEAD(nmi_chain);
1977
1978int register_nmi_notifier(struct notifier_block *nb)
1979{
1980 return raw_notifier_chain_register(&nmi_chain, nb);
1981}
1982
1983void __noreturn nmi_exception_handler(struct pt_regs *regs)
1984{
1985 char str[100];
1986
1987 nmi_enter();
1988 raw_notifier_call_chain(&nmi_chain, 0, regs);
1989 bust_spinlocks(1);
1990 snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
1991 smp_processor_id(), regs->cp0_epc);
1992 regs->cp0_epc = read_c0_errorepc();
1993 die(str, regs);
1994 nmi_exit();
1995}
1996
1997unsigned long ebase;
1998EXPORT_SYMBOL_GPL(ebase);
1999unsigned long exception_handlers[32];
2000unsigned long vi_handlers[64];
2001
2002void reserve_exception_space(phys_addr_t addr, unsigned long size)
2003{
2004 memblock_reserve(addr, size);
2005}
2006
2007void __init *set_except_vector(int n, void *addr)
2008{
2009 unsigned long handler = (unsigned long) addr;
2010 unsigned long old_handler;
2011
2012#ifdef CONFIG_CPU_MICROMIPS
2013
2014
2015
2016
2017
2018
2019
2020 if (!(handler & 0x1))
2021 handler |= 1;
2022#endif
2023 old_handler = xchg(&exception_handlers[n], handler);
2024
2025 if (n == 0 && cpu_has_divec) {
2026#ifdef CONFIG_CPU_MICROMIPS
2027 unsigned long jump_mask = ~((1 << 27) - 1);
2028#else
2029 unsigned long jump_mask = ~((1 << 28) - 1);
2030#endif
2031 u32 *buf = (u32 *)(ebase + 0x200);
2032 unsigned int k0 = 26;
2033 if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
2034 uasm_i_j(&buf, handler & ~jump_mask);
2035 uasm_i_nop(&buf);
2036 } else {
2037 UASM_i_LA(&buf, k0, handler);
2038 uasm_i_jr(&buf, k0);
2039 uasm_i_nop(&buf);
2040 }
2041 local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
2042 }
2043 return (void *)old_handler;
2044}
2045
2046static void do_default_vi(void)
2047{
2048 show_regs(get_irq_regs());
2049 panic("Caught unexpected vectored interrupt.");
2050}
2051
2052static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
2053{
2054 unsigned long handler;
2055 unsigned long old_handler = vi_handlers[n];
2056 int srssets = current_cpu_data.srsets;
2057 u16 *h;
2058 unsigned char *b;
2059
2060 BUG_ON(!cpu_has_veic && !cpu_has_vint);
2061
2062 if (addr == NULL) {
2063 handler = (unsigned long) do_default_vi;
2064 srs = 0;
2065 } else
2066 handler = (unsigned long) addr;
2067 vi_handlers[n] = handler;
2068
2069 b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
2070
2071 if (srs >= srssets)
2072 panic("Shadow register set %d not supported", srs);
2073
2074 if (cpu_has_veic) {
2075 if (board_bind_eic_interrupt)
2076 board_bind_eic_interrupt(n, srs);
2077 } else if (cpu_has_vint) {
2078
2079 if (srssets > 1)
2080 change_c0_srsmap(0xf << n*4, srs << n*4);
2081 }
2082
2083 if (srs == 0) {
2084
2085
2086
2087
2088 extern char except_vec_vi, except_vec_vi_lui;
2089 extern char except_vec_vi_ori, except_vec_vi_end;
2090 extern char rollback_except_vec_vi;
2091 char *vec_start = using_rollback_handler() ?
2092 &rollback_except_vec_vi : &except_vec_vi;
2093#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
2094 const int lui_offset = &except_vec_vi_lui - vec_start + 2;
2095 const int ori_offset = &except_vec_vi_ori - vec_start + 2;
2096#else
2097 const int lui_offset = &except_vec_vi_lui - vec_start;
2098 const int ori_offset = &except_vec_vi_ori - vec_start;
2099#endif
2100 const int handler_len = &except_vec_vi_end - vec_start;
2101
2102 if (handler_len > VECTORSPACING) {
2103
2104
2105
2106
2107 panic("VECTORSPACING too small");
2108 }
2109
2110 set_handler(((unsigned long)b - ebase), vec_start,
2111#ifdef CONFIG_CPU_MICROMIPS
2112 (handler_len - 1));
2113#else
2114 handler_len);
2115#endif
2116 h = (u16 *)(b + lui_offset);
2117 *h = (handler >> 16) & 0xffff;
2118 h = (u16 *)(b + ori_offset);
2119 *h = (handler & 0xffff);
2120 local_flush_icache_range((unsigned long)b,
2121 (unsigned long)(b+handler_len));
2122 }
2123 else {
2124
2125
2126
2127
2128
2129 u32 insn;
2130
2131 h = (u16 *)b;
2132
2133#ifdef CONFIG_CPU_MICROMIPS
2134 insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
2135#else
2136 insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
2137#endif
2138 h[0] = (insn >> 16) & 0xffff;
2139 h[1] = insn & 0xffff;
2140 h[2] = 0;
2141 h[3] = 0;
2142 local_flush_icache_range((unsigned long)b,
2143 (unsigned long)(b+8));
2144 }
2145
2146 return (void *)old_handler;
2147}
2148
2149void *set_vi_handler(int n, vi_handler_t addr)
2150{
2151 return set_vi_srs_handler(n, addr, 0);
2152}
2153
2154extern void tlb_init(void);
2155
2156
2157
2158
2159int cp0_compare_irq;
2160EXPORT_SYMBOL_GPL(cp0_compare_irq);
2161int cp0_compare_irq_shift;
2162
2163
2164
2165
2166int cp0_perfcount_irq;
2167EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
2168
2169
2170
2171
2172int cp0_fdc_irq;
2173EXPORT_SYMBOL_GPL(cp0_fdc_irq);
2174
2175static int noulri;
2176
2177static int __init ulri_disable(char *s)
2178{
2179 pr_info("Disabling ulri\n");
2180 noulri = 1;
2181
2182 return 1;
2183}
2184__setup("noulri", ulri_disable);
2185
2186
2187static void configure_status(void)
2188{
2189
2190
2191
2192
2193
2194
2195 unsigned int status_set = ST0_KERNEL_CUMASK;
2196#ifdef CONFIG_64BIT
2197 status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
2198#endif
2199 if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
2200 status_set |= ST0_XX;
2201 if (cpu_has_dsp)
2202 status_set |= ST0_MX;
2203
2204 change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
2205 status_set);
2206 back_to_back_c0_hazard();
2207}
2208
2209unsigned int hwrena;
2210EXPORT_SYMBOL_GPL(hwrena);
2211
2212
2213static void configure_hwrena(void)
2214{
2215 hwrena = cpu_hwrena_impl_bits;
2216
2217 if (cpu_has_mips_r2_r6)
2218 hwrena |= MIPS_HWRENA_CPUNUM |
2219 MIPS_HWRENA_SYNCISTEP |
2220 MIPS_HWRENA_CC |
2221 MIPS_HWRENA_CCRES;
2222
2223 if (!noulri && cpu_has_userlocal)
2224 hwrena |= MIPS_HWRENA_ULR;
2225
2226 if (hwrena)
2227 write_c0_hwrena(hwrena);
2228}
2229
2230static void configure_exception_vector(void)
2231{
2232 if (cpu_has_mips_r2_r6) {
2233 unsigned long sr = set_c0_status(ST0_BEV);
2234
2235 if (cpu_has_ebase_wg) {
2236#ifdef CONFIG_64BIT
2237 write_c0_ebase_64(ebase | MIPS_EBASE_WG);
2238#else
2239 write_c0_ebase(ebase | MIPS_EBASE_WG);
2240#endif
2241 }
2242 write_c0_ebase(ebase);
2243 write_c0_status(sr);
2244 }
2245 if (cpu_has_veic || cpu_has_vint) {
2246
2247 change_c0_intctl(0x3e0, VECTORSPACING);
2248 }
2249 if (cpu_has_divec) {
2250 if (cpu_has_mipsmt) {
2251 unsigned int vpflags = dvpe();
2252 set_c0_cause(CAUSEF_IV);
2253 evpe(vpflags);
2254 } else
2255 set_c0_cause(CAUSEF_IV);
2256 }
2257}
2258
2259void per_cpu_trap_init(bool is_boot_cpu)
2260{
2261 unsigned int cpu = smp_processor_id();
2262
2263 configure_status();
2264 configure_hwrena();
2265
2266 configure_exception_vector();
2267
2268
2269
2270
2271
2272
2273
2274
2275 if (cpu_has_mips_r2_r6) {
2276 cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
2277 cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
2278 cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
2279 cp0_fdc_irq = (read_c0_intctl() >> INTCTLB_IPFDC) & 7;
2280 if (!cp0_fdc_irq)
2281 cp0_fdc_irq = -1;
2282
2283 } else {
2284 cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
2285 cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
2286 cp0_perfcount_irq = -1;
2287 cp0_fdc_irq = -1;
2288 }
2289
2290 if (cpu_has_mmid)
2291 cpu_data[cpu].asid_cache = 0;
2292 else if (!cpu_data[cpu].asid_cache)
2293 cpu_data[cpu].asid_cache = asid_first_version(cpu);
2294
2295 mmgrab(&init_mm);
2296 current->active_mm = &init_mm;
2297 BUG_ON(current->mm);
2298 enter_lazy_tlb(&init_mm, current);
2299
2300
2301 if (!is_boot_cpu)
2302 cpu_cache_init();
2303 tlb_init();
2304 TLBMISS_HANDLER_SETUP();
2305}
2306
2307
2308void set_handler(unsigned long offset, void *addr, unsigned long size)
2309{
2310#ifdef CONFIG_CPU_MICROMIPS
2311 memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
2312#else
2313 memcpy((void *)(ebase + offset), addr, size);
2314#endif
2315 local_flush_icache_range(ebase + offset, ebase + offset + size);
2316}
2317
2318static const char panic_null_cerr[] =
2319 "Trying to set NULL cache error exception handler\n";
2320
2321
2322
2323
2324
2325
2326void set_uncached_handler(unsigned long offset, void *addr,
2327 unsigned long size)
2328{
2329 unsigned long uncached_ebase = CKSEG1ADDR(ebase);
2330
2331 if (!addr)
2332 panic(panic_null_cerr);
2333
2334 memcpy((void *)(uncached_ebase + offset), addr, size);
2335}
2336
2337static int __initdata rdhwr_noopt;
2338static int __init set_rdhwr_noopt(char *str)
2339{
2340 rdhwr_noopt = 1;
2341 return 1;
2342}
2343
2344__setup("rdhwr_noopt", set_rdhwr_noopt);
2345
2346void __init trap_init(void)
2347{
2348 extern char except_vec3_generic;
2349 extern char except_vec4;
2350 extern char except_vec3_r4000;
2351 unsigned long i, vec_size;
2352 phys_addr_t ebase_pa;
2353
2354 check_wait();
2355
2356 if (!cpu_has_mips_r2_r6) {
2357 ebase = CAC_BASE;
2358 vec_size = 0x400;
2359 } else {
2360 if (cpu_has_veic || cpu_has_vint)
2361 vec_size = 0x200 + VECTORSPACING*64;
2362 else
2363 vec_size = PAGE_SIZE;
2364
2365 ebase_pa = memblock_phys_alloc(vec_size, 1 << fls(vec_size));
2366 if (!ebase_pa)
2367 panic("%s: Failed to allocate %lu bytes align=0x%x\n",
2368 __func__, vec_size, 1 << fls(vec_size));
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381 if (!IS_ENABLED(CONFIG_EVA) && !WARN_ON(ebase_pa >= 0x20000000))
2382 ebase = CKSEG0ADDR(ebase_pa);
2383 else
2384 ebase = (unsigned long)phys_to_virt(ebase_pa);
2385 }
2386
2387 if (cpu_has_mmips) {
2388 unsigned int config3 = read_c0_config3();
2389
2390 if (IS_ENABLED(CONFIG_CPU_MICROMIPS))
2391 write_c0_config3(config3 | MIPS_CONF3_ISA_OE);
2392 else
2393 write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE);
2394 }
2395
2396 if (board_ebase_setup)
2397 board_ebase_setup();
2398 per_cpu_trap_init(true);
2399 memblock_set_bottom_up(false);
2400
2401
2402
2403
2404
2405
2406 set_handler(0x180, &except_vec3_generic, 0x80);
2407
2408
2409
2410
2411 for (i = 0; i <= 31; i++)
2412 set_except_vector(i, handle_reserved);
2413
2414
2415
2416
2417
2418 if (cpu_has_ejtag && board_ejtag_handler_setup)
2419 board_ejtag_handler_setup();
2420
2421
2422
2423
2424 if (cpu_has_watch)
2425 set_except_vector(EXCCODE_WATCH, handle_watch);
2426
2427
2428
2429
2430 if (cpu_has_veic || cpu_has_vint) {
2431 int nvec = cpu_has_veic ? 64 : 8;
2432 for (i = 0; i < nvec; i++)
2433 set_vi_handler(i, NULL);
2434 }
2435 else if (cpu_has_divec)
2436 set_handler(0x200, &except_vec4, 0x8);
2437
2438
2439
2440
2441
2442 parity_protection_init();
2443
2444
2445
2446
2447
2448
2449 if (board_be_init)
2450 board_be_init();
2451
2452 set_except_vector(EXCCODE_INT, using_rollback_handler() ?
2453 rollback_handle_int : handle_int);
2454 set_except_vector(EXCCODE_MOD, handle_tlbm);
2455 set_except_vector(EXCCODE_TLBL, handle_tlbl);
2456 set_except_vector(EXCCODE_TLBS, handle_tlbs);
2457
2458 set_except_vector(EXCCODE_ADEL, handle_adel);
2459 set_except_vector(EXCCODE_ADES, handle_ades);
2460
2461 set_except_vector(EXCCODE_IBE, handle_ibe);
2462 set_except_vector(EXCCODE_DBE, handle_dbe);
2463
2464 set_except_vector(EXCCODE_SYS, handle_sys);
2465 set_except_vector(EXCCODE_BP, handle_bp);
2466
2467 if (rdhwr_noopt)
2468 set_except_vector(EXCCODE_RI, handle_ri);
2469 else {
2470 if (cpu_has_vtag_icache)
2471 set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2472 else if (current_cpu_type() == CPU_LOONGSON64)
2473 set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2474 else
2475 set_except_vector(EXCCODE_RI, handle_ri_rdhwr);
2476 }
2477
2478 set_except_vector(EXCCODE_CPU, handle_cpu);
2479 set_except_vector(EXCCODE_OV, handle_ov);
2480 set_except_vector(EXCCODE_TR, handle_tr);
2481 set_except_vector(EXCCODE_MSAFPE, handle_msa_fpe);
2482
2483 if (board_nmi_handler_setup)
2484 board_nmi_handler_setup();
2485
2486 if (cpu_has_fpu && !cpu_has_nofpuex)
2487 set_except_vector(EXCCODE_FPE, handle_fpe);
2488
2489 if (cpu_has_ftlbparex)
2490 set_except_vector(MIPS_EXCCODE_TLBPAR, handle_ftlb);
2491
2492 if (cpu_has_gsexcex)
2493 set_except_vector(LOONGSON_EXCCODE_GSEXC, handle_gsexc);
2494
2495 if (cpu_has_rixiex) {
2496 set_except_vector(EXCCODE_TLBRI, tlb_do_page_fault_0);
2497 set_except_vector(EXCCODE_TLBXI, tlb_do_page_fault_0);
2498 }
2499
2500 set_except_vector(EXCCODE_MSADIS, handle_msa);
2501 set_except_vector(EXCCODE_MDMX, handle_mdmx);
2502
2503 if (cpu_has_mcheck)
2504 set_except_vector(EXCCODE_MCHECK, handle_mcheck);
2505
2506 if (cpu_has_mipsmt)
2507 set_except_vector(EXCCODE_THREAD, handle_mt);
2508
2509 set_except_vector(EXCCODE_DSPDIS, handle_dsp);
2510
2511 if (board_cache_error_setup)
2512 board_cache_error_setup();
2513
2514 if (cpu_has_vce)
2515
2516 set_handler(0x180, &except_vec3_r4000, 0x100);
2517 else if (cpu_has_4kex)
2518 set_handler(0x180, &except_vec3_generic, 0x80);
2519 else
2520 set_handler(0x080, &except_vec3_generic, 0x80);
2521
2522 local_flush_icache_range(ebase, ebase + vec_size);
2523
2524 sort_extable(__start___dbe_table, __stop___dbe_table);
2525
2526 cu2_notifier(default_cu2_call, 0x80000000);
2527}
2528
2529static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd,
2530 void *v)
2531{
2532 switch (cmd) {
2533 case CPU_PM_ENTER_FAILED:
2534 case CPU_PM_EXIT:
2535 configure_status();
2536 configure_hwrena();
2537 configure_exception_vector();
2538
2539
2540 TLBMISS_HANDLER_RESTORE();
2541
2542 break;
2543 }
2544
2545 return NOTIFY_OK;
2546}
2547
2548static struct notifier_block trap_pm_notifier_block = {
2549 .notifier_call = trap_pm_notifier,
2550};
2551
2552static int __init trap_pm_init(void)
2553{
2554 return cpu_pm_register_notifier(&trap_pm_notifier_block);
2555}
2556arch_initcall(trap_pm_init);
2557