1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/bitops.h>
16#include <linux/bug.h>
17#include <linux/compiler.h>
18#include <linux/context_tracking.h>
19#include <linux/cpu_pm.h>
20#include <linux/kexec.h>
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/mm.h>
25#include <linux/sched.h>
26#include <linux/smp.h>
27#include <linux/spinlock.h>
28#include <linux/kallsyms.h>
29#include <linux/bootmem.h>
30#include <linux/interrupt.h>
31#include <linux/ptrace.h>
32#include <linux/kgdb.h>
33#include <linux/kdebug.h>
34#include <linux/kprobes.h>
35#include <linux/notifier.h>
36#include <linux/kdb.h>
37#include <linux/irq.h>
38#include <linux/perf_event.h>
39
40#include <asm/addrspace.h>
41#include <asm/bootinfo.h>
42#include <asm/branch.h>
43#include <asm/break.h>
44#include <asm/cop2.h>
45#include <asm/cpu.h>
46#include <asm/cpu-type.h>
47#include <asm/dsp.h>
48#include <asm/fpu.h>
49#include <asm/fpu_emulator.h>
50#include <asm/idle.h>
51#include <asm/mips-r2-to-r6-emul.h>
52#include <asm/mipsregs.h>
53#include <asm/mipsmtregs.h>
54#include <asm/module.h>
55#include <asm/msa.h>
56#include <asm/pgtable.h>
57#include <asm/ptrace.h>
58#include <asm/sections.h>
59#include <asm/siginfo.h>
60#include <asm/tlbdebug.h>
61#include <asm/traps.h>
62#include <asm/uaccess.h>
63#include <asm/watch.h>
64#include <asm/mmu_context.h>
65#include <asm/types.h>
66#include <asm/stacktrace.h>
67#include <asm/uasm.h>
68
69extern void check_wait(void);
70extern asmlinkage void rollback_handle_int(void);
71extern asmlinkage void handle_int(void);
72extern u32 handle_tlbl[];
73extern u32 handle_tlbs[];
74extern u32 handle_tlbm[];
75extern asmlinkage void handle_adel(void);
76extern asmlinkage void handle_ades(void);
77extern asmlinkage void handle_ibe(void);
78extern asmlinkage void handle_dbe(void);
79extern asmlinkage void handle_sys(void);
80extern asmlinkage void handle_bp(void);
81extern asmlinkage void handle_ri(void);
82extern asmlinkage void handle_ri_rdhwr_vivt(void);
83extern asmlinkage void handle_ri_rdhwr(void);
84extern asmlinkage void handle_cpu(void);
85extern asmlinkage void handle_ov(void);
86extern asmlinkage void handle_tr(void);
87extern asmlinkage void handle_msa_fpe(void);
88extern asmlinkage void handle_fpe(void);
89extern asmlinkage void handle_ftlb(void);
90extern asmlinkage void handle_msa(void);
91extern asmlinkage void handle_mdmx(void);
92extern asmlinkage void handle_watch(void);
93extern asmlinkage void handle_mt(void);
94extern asmlinkage void handle_dsp(void);
95extern asmlinkage void handle_mcheck(void);
96extern asmlinkage void handle_reserved(void);
97extern void tlb_do_page_fault_0(void);
98
99void (*board_be_init)(void);
100int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
101void (*board_nmi_handler_setup)(void);
102void (*board_ejtag_handler_setup)(void);
103void (*board_bind_eic_interrupt)(int irq, int regset);
104void (*board_ebase_setup)(void);
105void(*board_cache_error_setup)(void);
106
107static void show_raw_backtrace(unsigned long reg29)
108{
109 unsigned long *sp = (unsigned long *)(reg29 & ~3);
110 unsigned long addr;
111
112 printk("Call Trace:");
113#ifdef CONFIG_KALLSYMS
114 printk("\n");
115#endif
116 while (!kstack_end(sp)) {
117 unsigned long __user *p =
118 (unsigned long __user *)(unsigned long)sp++;
119 if (__get_user(addr, p)) {
120 printk(" (Bad stack address)");
121 break;
122 }
123 if (__kernel_text_address(addr))
124 print_ip_sym(addr);
125 }
126 printk("\n");
127}
128
129#ifdef CONFIG_KALLSYMS
130int raw_show_trace;
131static int __init set_raw_show_trace(char *str)
132{
133 raw_show_trace = 1;
134 return 1;
135}
136__setup("raw_show_trace", set_raw_show_trace);
137#endif
138
139static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
140{
141 unsigned long sp = regs->regs[29];
142 unsigned long ra = regs->regs[31];
143 unsigned long pc = regs->cp0_epc;
144
145 if (!task)
146 task = current;
147
148 if (raw_show_trace || !__kernel_text_address(pc)) {
149 show_raw_backtrace(sp);
150 return;
151 }
152 printk("Call Trace:\n");
153 do {
154 print_ip_sym(pc);
155 pc = unwind_stack(task, &sp, pc, &ra);
156 } while (pc);
157 printk("\n");
158}
159
160
161
162
163
164static void show_stacktrace(struct task_struct *task,
165 const struct pt_regs *regs)
166{
167 const int field = 2 * sizeof(unsigned long);
168 long stackdata;
169 int i;
170 unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
171
172 printk("Stack :");
173 i = 0;
174 while ((unsigned long) sp & (PAGE_SIZE - 1)) {
175 if (i && ((i % (64 / field)) == 0))
176 printk("\n ");
177 if (i > 39) {
178 printk(" ...");
179 break;
180 }
181
182 if (__get_user(stackdata, sp++)) {
183 printk(" (Bad stack address)");
184 break;
185 }
186
187 printk(" %0*lx", field, stackdata);
188 i++;
189 }
190 printk("\n");
191 show_backtrace(task, regs);
192}
193
194void show_stack(struct task_struct *task, unsigned long *sp)
195{
196 struct pt_regs regs;
197 mm_segment_t old_fs = get_fs();
198 if (sp) {
199 regs.regs[29] = (unsigned long)sp;
200 regs.regs[31] = 0;
201 regs.cp0_epc = 0;
202 } else {
203 if (task && task != current) {
204 regs.regs[29] = task->thread.reg29;
205 regs.regs[31] = 0;
206 regs.cp0_epc = task->thread.reg31;
207#ifdef CONFIG_KGDB_KDB
208 } else if (atomic_read(&kgdb_active) != -1 &&
209 kdb_current_regs) {
210 memcpy(®s, kdb_current_regs, sizeof(regs));
211#endif
212 } else {
213 prepare_frametrace(®s);
214 }
215 }
216
217
218
219
220 set_fs(KERNEL_DS);
221 show_stacktrace(task, ®s);
222 set_fs(old_fs);
223}
224
225static void show_code(unsigned int __user *pc)
226{
227 long i;
228 unsigned short __user *pc16 = NULL;
229
230 printk("\nCode:");
231
232 if ((unsigned long)pc & 1)
233 pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
234 for(i = -3 ; i < 6 ; i++) {
235 unsigned int insn;
236 if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
237 printk(" (Bad address in epc)\n");
238 break;
239 }
240 printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
241 }
242}
243
244static void __show_regs(const struct pt_regs *regs)
245{
246 const int field = 2 * sizeof(unsigned long);
247 unsigned int cause = regs->cp0_cause;
248 unsigned int exccode;
249 int i;
250
251 show_regs_print_info(KERN_DEFAULT);
252
253
254
255
256 for (i = 0; i < 32; ) {
257 if ((i % 4) == 0)
258 printk("$%2d :", i);
259 if (i == 0)
260 printk(" %0*lx", field, 0UL);
261 else if (i == 26 || i == 27)
262 printk(" %*s", field, "");
263 else
264 printk(" %0*lx", field, regs->regs[i]);
265
266 i++;
267 if ((i % 4) == 0)
268 printk("\n");
269 }
270
271#ifdef CONFIG_CPU_HAS_SMARTMIPS
272 printk("Acx : %0*lx\n", field, regs->acx);
273#endif
274 printk("Hi : %0*lx\n", field, regs->hi);
275 printk("Lo : %0*lx\n", field, regs->lo);
276
277
278
279
280 printk("epc : %0*lx %pS\n", field, regs->cp0_epc,
281 (void *) regs->cp0_epc);
282 printk("ra : %0*lx %pS\n", field, regs->regs[31],
283 (void *) regs->regs[31]);
284
285 printk("Status: %08x ", (uint32_t) regs->cp0_status);
286
287 if (cpu_has_3kex) {
288 if (regs->cp0_status & ST0_KUO)
289 printk("KUo ");
290 if (regs->cp0_status & ST0_IEO)
291 printk("IEo ");
292 if (regs->cp0_status & ST0_KUP)
293 printk("KUp ");
294 if (regs->cp0_status & ST0_IEP)
295 printk("IEp ");
296 if (regs->cp0_status & ST0_KUC)
297 printk("KUc ");
298 if (regs->cp0_status & ST0_IEC)
299 printk("IEc ");
300 } else if (cpu_has_4kex) {
301 if (regs->cp0_status & ST0_KX)
302 printk("KX ");
303 if (regs->cp0_status & ST0_SX)
304 printk("SX ");
305 if (regs->cp0_status & ST0_UX)
306 printk("UX ");
307 switch (regs->cp0_status & ST0_KSU) {
308 case KSU_USER:
309 printk("USER ");
310 break;
311 case KSU_SUPERVISOR:
312 printk("SUPERVISOR ");
313 break;
314 case KSU_KERNEL:
315 printk("KERNEL ");
316 break;
317 default:
318 printk("BAD_MODE ");
319 break;
320 }
321 if (regs->cp0_status & ST0_ERL)
322 printk("ERL ");
323 if (regs->cp0_status & ST0_EXL)
324 printk("EXL ");
325 if (regs->cp0_status & ST0_IE)
326 printk("IE ");
327 }
328 printk("\n");
329
330 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
331 printk("Cause : %08x (ExcCode %02x)\n", cause, exccode);
332
333 if (1 <= exccode && exccode <= 5)
334 printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
335
336 printk("PrId : %08x (%s)\n", read_c0_prid(),
337 cpu_name_string());
338}
339
340
341
342
343void show_regs(struct pt_regs *regs)
344{
345 __show_regs((struct pt_regs *)regs);
346}
347
348void show_registers(struct pt_regs *regs)
349{
350 const int field = 2 * sizeof(unsigned long);
351 mm_segment_t old_fs = get_fs();
352
353 __show_regs(regs);
354 print_modules();
355 printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
356 current->comm, current->pid, current_thread_info(), current,
357 field, current_thread_info()->tp_value);
358 if (cpu_has_userlocal) {
359 unsigned long tls;
360
361 tls = read_c0_userlocal();
362 if (tls != current_thread_info()->tp_value)
363 printk("*HwTLS: %0*lx\n", field, tls);
364 }
365
366 if (!user_mode(regs))
367
368 set_fs(KERNEL_DS);
369 show_stacktrace(current, regs);
370 show_code((unsigned int __user *) regs->cp0_epc);
371 printk("\n");
372 set_fs(old_fs);
373}
374
375static DEFINE_RAW_SPINLOCK(die_lock);
376
377void __noreturn die(const char *str, struct pt_regs *regs)
378{
379 static int die_counter;
380 int sig = SIGSEGV;
381
382 oops_enter();
383
384 if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
385 SIGSEGV) == NOTIFY_STOP)
386 sig = 0;
387
388 console_verbose();
389 raw_spin_lock_irq(&die_lock);
390 bust_spinlocks(1);
391
392 printk("%s[#%d]:\n", str, ++die_counter);
393 show_registers(regs);
394 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
395 raw_spin_unlock_irq(&die_lock);
396
397 oops_exit();
398
399 if (in_interrupt())
400 panic("Fatal exception in interrupt");
401
402 if (panic_on_oops) {
403 printk(KERN_EMERG "Fatal exception: panic in 5 seconds");
404 ssleep(5);
405 panic("Fatal exception");
406 }
407
408 if (regs && kexec_should_crash(current))
409 crash_kexec(regs);
410
411 do_exit(sig);
412}
413
414extern struct exception_table_entry __start___dbe_table[];
415extern struct exception_table_entry __stop___dbe_table[];
416
417__asm__(
418" .section __dbe_table, \"a\"\n"
419" .previous \n");
420
421
422static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
423{
424 const struct exception_table_entry *e;
425
426 e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr);
427 if (!e)
428 e = search_module_dbetables(addr);
429 return e;
430}
431
432asmlinkage void do_be(struct pt_regs *regs)
433{
434 const int field = 2 * sizeof(unsigned long);
435 const struct exception_table_entry *fixup = NULL;
436 int data = regs->cp0_cause & 4;
437 int action = MIPS_BE_FATAL;
438 enum ctx_state prev_state;
439
440 prev_state = exception_enter();
441
442 if (data && !user_mode(regs))
443 fixup = search_dbe_tables(exception_epc(regs));
444
445 if (fixup)
446 action = MIPS_BE_FIXUP;
447
448 if (board_be_handler)
449 action = board_be_handler(regs, fixup != NULL);
450
451 switch (action) {
452 case MIPS_BE_DISCARD:
453 goto out;
454 case MIPS_BE_FIXUP:
455 if (fixup) {
456 regs->cp0_epc = fixup->nextinsn;
457 goto out;
458 }
459 break;
460 default:
461 break;
462 }
463
464
465
466
467 printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
468 data ? "Data" : "Instruction",
469 field, regs->cp0_epc, field, regs->regs[31]);
470 if (notify_die(DIE_OOPS, "bus error", regs, 0, current->thread.trap_nr,
471 SIGBUS) == NOTIFY_STOP)
472 goto out;
473
474 die_if_kernel("Oops", regs);
475 force_sig(SIGBUS, current);
476
477out:
478 exception_exit(prev_state);
479}
480
481
482
483
484
485#define OPCODE 0xfc000000
486#define BASE 0x03e00000
487#define RT 0x001f0000
488#define OFFSET 0x0000ffff
489#define LL 0xc0000000
490#define SC 0xe0000000
491#define SPEC0 0x00000000
492#define SPEC3 0x7c000000
493#define RD 0x0000f800
494#define FUNC 0x0000003f
495#define SYNC 0x0000000f
496#define RDHWR 0x0000003b
497
498
499#define MM_POOL32A_FUNC 0xfc00ffff
500#define MM_RDHWR 0x00006b3c
501#define MM_RS 0x001f0000
502#define MM_RT 0x03e00000
503
504
505
506
507
508unsigned int ll_bit;
509struct task_struct *ll_task;
510
511static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
512{
513 unsigned long value, __user *vaddr;
514 long offset;
515
516
517
518
519
520
521
522 offset = opcode & OFFSET;
523 offset <<= 16;
524 offset >>= 16;
525
526 vaddr = (unsigned long __user *)
527 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
528
529 if ((unsigned long)vaddr & 3)
530 return SIGBUS;
531 if (get_user(value, vaddr))
532 return SIGSEGV;
533
534 preempt_disable();
535
536 if (ll_task == NULL || ll_task == current) {
537 ll_bit = 1;
538 } else {
539 ll_bit = 0;
540 }
541 ll_task = current;
542
543 preempt_enable();
544
545 regs->regs[(opcode & RT) >> 16] = value;
546
547 return 0;
548}
549
550static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
551{
552 unsigned long __user *vaddr;
553 unsigned long reg;
554 long offset;
555
556
557
558
559
560
561
562 offset = opcode & OFFSET;
563 offset <<= 16;
564 offset >>= 16;
565
566 vaddr = (unsigned long __user *)
567 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
568 reg = (opcode & RT) >> 16;
569
570 if ((unsigned long)vaddr & 3)
571 return SIGBUS;
572
573 preempt_disable();
574
575 if (ll_bit == 0 || ll_task != current) {
576 regs->regs[reg] = 0;
577 preempt_enable();
578 return 0;
579 }
580
581 preempt_enable();
582
583 if (put_user(regs->regs[reg], vaddr))
584 return SIGSEGV;
585
586 regs->regs[reg] = 1;
587
588 return 0;
589}
590
591
592
593
594
595
596
597
598static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
599{
600 if ((opcode & OPCODE) == LL) {
601 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
602 1, regs, 0);
603 return simulate_ll(regs, opcode);
604 }
605 if ((opcode & OPCODE) == SC) {
606 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
607 1, regs, 0);
608 return simulate_sc(regs, opcode);
609 }
610
611 return -1;
612}
613
614
615
616
617
618static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
619{
620 struct thread_info *ti = task_thread_info(current);
621
622 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
623 1, regs, 0);
624 switch (rd) {
625 case 0:
626 regs->regs[rt] = smp_processor_id();
627 return 0;
628 case 1:
629 regs->regs[rt] = min(current_cpu_data.dcache.linesz,
630 current_cpu_data.icache.linesz);
631 return 0;
632 case 2:
633 regs->regs[rt] = read_c0_count();
634 return 0;
635 case 3:
636 switch (current_cpu_type()) {
637 case CPU_20KC:
638 case CPU_25KF:
639 regs->regs[rt] = 1;
640 break;
641 default:
642 regs->regs[rt] = 2;
643 }
644 return 0;
645 case 29:
646 regs->regs[rt] = ti->tp_value;
647 return 0;
648 default:
649 return -1;
650 }
651}
652
653static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
654{
655 if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
656 int rd = (opcode & RD) >> 11;
657 int rt = (opcode & RT) >> 16;
658
659 simulate_rdhwr(regs, rd, rt);
660 return 0;
661 }
662
663
664 return -1;
665}
666
667static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned int opcode)
668{
669 if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
670 int rd = (opcode & MM_RS) >> 16;
671 int rt = (opcode & MM_RT) >> 21;
672 simulate_rdhwr(regs, rd, rt);
673 return 0;
674 }
675
676
677 return -1;
678}
679
680static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
681{
682 if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
683 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
684 1, regs, 0);
685 return 0;
686 }
687
688 return -1;
689}
690
691asmlinkage void do_ov(struct pt_regs *regs)
692{
693 enum ctx_state prev_state;
694 siginfo_t info = {
695 .si_signo = SIGFPE,
696 .si_code = FPE_INTOVF,
697 .si_addr = (void __user *)regs->cp0_epc,
698 };
699
700 prev_state = exception_enter();
701 die_if_kernel("Integer overflow", regs);
702
703 force_sig_info(SIGFPE, &info, current);
704 exception_exit(prev_state);
705}
706
707int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
708{
709 struct siginfo si = { 0 };
710
711 switch (sig) {
712 case 0:
713 return 0;
714
715 case SIGFPE:
716 si.si_addr = fault_addr;
717 si.si_signo = sig;
718
719
720
721
722 fcr31 &= (fcr31 & FPU_CSR_ALL_E) <<
723 (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E));
724 if (fcr31 & FPU_CSR_INV_X)
725 si.si_code = FPE_FLTINV;
726 else if (fcr31 & FPU_CSR_DIV_X)
727 si.si_code = FPE_FLTDIV;
728 else if (fcr31 & FPU_CSR_OVF_X)
729 si.si_code = FPE_FLTOVF;
730 else if (fcr31 & FPU_CSR_UDF_X)
731 si.si_code = FPE_FLTUND;
732 else if (fcr31 & FPU_CSR_INE_X)
733 si.si_code = FPE_FLTRES;
734 else
735 si.si_code = __SI_FAULT;
736 force_sig_info(sig, &si, current);
737 return 1;
738
739 case SIGBUS:
740 si.si_addr = fault_addr;
741 si.si_signo = sig;
742 si.si_code = BUS_ADRERR;
743 force_sig_info(sig, &si, current);
744 return 1;
745
746 case SIGSEGV:
747 si.si_addr = fault_addr;
748 si.si_signo = sig;
749 down_read(¤t->mm->mmap_sem);
750 if (find_vma(current->mm, (unsigned long)fault_addr))
751 si.si_code = SEGV_ACCERR;
752 else
753 si.si_code = SEGV_MAPERR;
754 up_read(¤t->mm->mmap_sem);
755 force_sig_info(sig, &si, current);
756 return 1;
757
758 default:
759 force_sig(sig, current);
760 return 1;
761 }
762}
763
764static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
765 unsigned long old_epc, unsigned long old_ra)
766{
767 union mips_instruction inst = { .word = opcode };
768 void __user *fault_addr;
769 unsigned long fcr31;
770 int sig;
771
772
773 switch (inst.i_format.opcode) {
774 case cop1_op:
775 case cop1x_op:
776 case lwc1_op:
777 case ldc1_op:
778 case swc1_op:
779 case sdc1_op:
780 break;
781
782 default:
783 return -1;
784 }
785
786
787
788
789
790 regs->cp0_epc = old_epc;
791 regs->regs[31] = old_ra;
792
793
794 lose_fpu(1);
795
796
797 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
798 &fault_addr);
799 fcr31 = current->thread.fpu.fcr31;
800
801
802
803
804
805 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
806
807
808 own_fpu(1);
809
810
811 process_fpemu_return(sig, fault_addr, fcr31);
812
813 return 0;
814}
815
816
817
818
819asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
820{
821 enum ctx_state prev_state;
822 void __user *fault_addr;
823 int sig;
824
825 prev_state = exception_enter();
826 if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
827 SIGFPE) == NOTIFY_STOP)
828 goto out;
829
830
831 write_32bit_cp1_register(CP1_STATUS, fcr31 & ~FPU_CSR_ALL_X);
832 local_irq_enable();
833
834 die_if_kernel("FP exception in kernel code", regs);
835
836 if (fcr31 & FPU_CSR_UNI_X) {
837
838
839
840
841
842
843
844
845
846
847
848 lose_fpu(1);
849
850
851 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
852 &fault_addr);
853 fcr31 = current->thread.fpu.fcr31;
854
855
856
857
858
859 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
860
861
862 own_fpu(1);
863 } else {
864 sig = SIGFPE;
865 fault_addr = (void __user *) regs->cp0_epc;
866 }
867
868
869 process_fpemu_return(sig, fault_addr, fcr31);
870
871out:
872 exception_exit(prev_state);
873}
874
875void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
876 const char *str)
877{
878 siginfo_t info = { 0 };
879 char b[40];
880
881#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
882 if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr,
883 SIGTRAP) == NOTIFY_STOP)
884 return;
885#endif
886
887 if (notify_die(DIE_TRAP, str, regs, code, current->thread.trap_nr,
888 SIGTRAP) == NOTIFY_STOP)
889 return;
890
891
892
893
894
895
896
897 switch (code) {
898 case BRK_OVERFLOW:
899 case BRK_DIVZERO:
900 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
901 die_if_kernel(b, regs);
902 if (code == BRK_DIVZERO)
903 info.si_code = FPE_INTDIV;
904 else
905 info.si_code = FPE_INTOVF;
906 info.si_signo = SIGFPE;
907 info.si_addr = (void __user *) regs->cp0_epc;
908 force_sig_info(SIGFPE, &info, current);
909 break;
910 case BRK_BUG:
911 die_if_kernel("Kernel bug detected", regs);
912 force_sig(SIGTRAP, current);
913 break;
914 case BRK_MEMU:
915
916
917
918
919
920
921
922
923 if (do_dsemulret(regs))
924 return;
925
926 die_if_kernel("Math emu break/trap", regs);
927 force_sig(SIGTRAP, current);
928 break;
929 default:
930 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
931 die_if_kernel(b, regs);
932 if (si_code) {
933 info.si_signo = SIGTRAP;
934 info.si_code = si_code;
935 force_sig_info(SIGTRAP, &info, current);
936 } else {
937 force_sig(SIGTRAP, current);
938 }
939 }
940}
941
942asmlinkage void do_bp(struct pt_regs *regs)
943{
944 unsigned long epc = msk_isa16_mode(exception_epc(regs));
945 unsigned int opcode, bcode;
946 enum ctx_state prev_state;
947 mm_segment_t seg;
948
949 seg = get_fs();
950 if (!user_mode(regs))
951 set_fs(KERNEL_DS);
952
953 prev_state = exception_enter();
954 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
955 if (get_isa16_mode(regs->cp0_epc)) {
956 u16 instr[2];
957
958 if (__get_user(instr[0], (u16 __user *)epc))
959 goto out_sigsegv;
960
961 if (!cpu_has_mmips) {
962
963 bcode = (instr[0] >> 5) & 0x3f;
964 } else if (mm_insn_16bit(instr[0])) {
965
966 bcode = instr[0] & 0xf;
967 } else {
968
969 if (__get_user(instr[1], (u16 __user *)(epc + 2)))
970 goto out_sigsegv;
971 opcode = (instr[0] << 16) | instr[1];
972 bcode = (opcode >> 6) & ((1 << 20) - 1);
973 }
974 } else {
975 if (__get_user(opcode, (unsigned int __user *)epc))
976 goto out_sigsegv;
977 bcode = (opcode >> 6) & ((1 << 20) - 1);
978 }
979
980
981
982
983
984
985
986 if (bcode >= (1 << 10))
987 bcode = ((bcode & ((1 << 10) - 1)) << 10) | (bcode >> 10);
988
989
990
991
992
993 switch (bcode) {
994 case BRK_UPROBE:
995 if (notify_die(DIE_UPROBE, "uprobe", regs, bcode,
996 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
997 goto out;
998 else
999 break;
1000 case BRK_UPROBE_XOL:
1001 if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, bcode,
1002 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1003 goto out;
1004 else
1005 break;
1006 case BRK_KPROBE_BP:
1007 if (notify_die(DIE_BREAK, "debug", regs, bcode,
1008 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1009 goto out;
1010 else
1011 break;
1012 case BRK_KPROBE_SSTEPBP:
1013 if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
1014 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1015 goto out;
1016 else
1017 break;
1018 default:
1019 break;
1020 }
1021
1022 do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break");
1023
1024out:
1025 set_fs(seg);
1026 exception_exit(prev_state);
1027 return;
1028
1029out_sigsegv:
1030 force_sig(SIGSEGV, current);
1031 goto out;
1032}
1033
1034asmlinkage void do_tr(struct pt_regs *regs)
1035{
1036 u32 opcode, tcode = 0;
1037 enum ctx_state prev_state;
1038 u16 instr[2];
1039 mm_segment_t seg;
1040 unsigned long epc = msk_isa16_mode(exception_epc(regs));
1041
1042 seg = get_fs();
1043 if (!user_mode(regs))
1044 set_fs(get_ds());
1045
1046 prev_state = exception_enter();
1047 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1048 if (get_isa16_mode(regs->cp0_epc)) {
1049 if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
1050 __get_user(instr[1], (u16 __user *)(epc + 2)))
1051 goto out_sigsegv;
1052 opcode = (instr[0] << 16) | instr[1];
1053
1054 if (!(opcode & OPCODE))
1055 tcode = (opcode >> 12) & ((1 << 4) - 1);
1056 } else {
1057 if (__get_user(opcode, (u32 __user *)epc))
1058 goto out_sigsegv;
1059
1060 if (!(opcode & OPCODE))
1061 tcode = (opcode >> 6) & ((1 << 10) - 1);
1062 }
1063
1064 do_trap_or_bp(regs, tcode, 0, "Trap");
1065
1066out:
1067 set_fs(seg);
1068 exception_exit(prev_state);
1069 return;
1070
1071out_sigsegv:
1072 force_sig(SIGSEGV, current);
1073 goto out;
1074}
1075
1076asmlinkage void do_ri(struct pt_regs *regs)
1077{
1078 unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
1079 unsigned long old_epc = regs->cp0_epc;
1080 unsigned long old31 = regs->regs[31];
1081 enum ctx_state prev_state;
1082 unsigned int opcode = 0;
1083 int status = -1;
1084
1085
1086
1087
1088
1089 if (mipsr2_emulation && cpu_has_mips_r6 &&
1090 likely(user_mode(regs)) &&
1091 likely(get_user(opcode, epc) >= 0)) {
1092 unsigned long fcr31 = 0;
1093
1094 status = mipsr2_decoder(regs, opcode, &fcr31);
1095 switch (status) {
1096 case 0:
1097 case SIGEMT:
1098 task_thread_info(current)->r2_emul_return = 1;
1099 return;
1100 case SIGILL:
1101 goto no_r2_instr;
1102 default:
1103 process_fpemu_return(status,
1104 ¤t->thread.cp0_baduaddr,
1105 fcr31);
1106 task_thread_info(current)->r2_emul_return = 1;
1107 return;
1108 }
1109 }
1110
1111no_r2_instr:
1112
1113 prev_state = exception_enter();
1114 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1115
1116 if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
1117 SIGILL) == NOTIFY_STOP)
1118 goto out;
1119
1120 die_if_kernel("Reserved instruction in kernel code", regs);
1121
1122 if (unlikely(compute_return_epc(regs) < 0))
1123 goto out;
1124
1125 if (!get_isa16_mode(regs->cp0_epc)) {
1126 if (unlikely(get_user(opcode, epc) < 0))
1127 status = SIGSEGV;
1128
1129 if (!cpu_has_llsc && status < 0)
1130 status = simulate_llsc(regs, opcode);
1131
1132 if (status < 0)
1133 status = simulate_rdhwr_normal(regs, opcode);
1134
1135 if (status < 0)
1136 status = simulate_sync(regs, opcode);
1137
1138 if (status < 0)
1139 status = simulate_fp(regs, opcode, old_epc, old31);
1140 } else if (cpu_has_mmips) {
1141 unsigned short mmop[2] = { 0 };
1142
1143 if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
1144 status = SIGSEGV;
1145 if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
1146 status = SIGSEGV;
1147 opcode = mmop[0];
1148 opcode = (opcode << 16) | mmop[1];
1149
1150 if (status < 0)
1151 status = simulate_rdhwr_mm(regs, opcode);
1152 }
1153
1154 if (status < 0)
1155 status = SIGILL;
1156
1157 if (unlikely(status > 0)) {
1158 regs->cp0_epc = old_epc;
1159 regs->regs[31] = old31;
1160 force_sig(status, current);
1161 }
1162
1163out:
1164 exception_exit(prev_state);
1165}
1166
1167
1168
1169
1170
1171
1172static void mt_ase_fp_affinity(void)
1173{
1174#ifdef CONFIG_MIPS_MT_FPAFF
1175 if (mt_fpemul_threshold > 0 &&
1176 ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
1177
1178
1179
1180
1181
1182 if (cpumask_intersects(¤t->cpus_allowed, &mt_fpu_cpumask)) {
1183 cpumask_t tmask;
1184
1185 current->thread.user_cpus_allowed
1186 = current->cpus_allowed;
1187 cpumask_and(&tmask, ¤t->cpus_allowed,
1188 &mt_fpu_cpumask);
1189 set_cpus_allowed_ptr(current, &tmask);
1190 set_thread_flag(TIF_FPUBOUND);
1191 }
1192 }
1193#endif
1194}
1195
1196
1197
1198
1199static RAW_NOTIFIER_HEAD(cu2_chain);
1200
1201int __ref register_cu2_notifier(struct notifier_block *nb)
1202{
1203 return raw_notifier_chain_register(&cu2_chain, nb);
1204}
1205
1206int cu2_notifier_call_chain(unsigned long val, void *v)
1207{
1208 return raw_notifier_call_chain(&cu2_chain, val, v);
1209}
1210
1211static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
1212 void *data)
1213{
1214 struct pt_regs *regs = data;
1215
1216 die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
1217 "instruction", regs);
1218 force_sig(SIGILL, current);
1219
1220 return NOTIFY_OK;
1221}
1222
1223static int wait_on_fp_mode_switch(atomic_t *p)
1224{
1225
1226
1227
1228
1229
1230
1231 schedule();
1232 return 0;
1233}
1234
1235static int enable_restore_fp_context(int msa)
1236{
1237 int err, was_fpu_owner, prior_msa;
1238
1239
1240
1241
1242
1243 wait_on_atomic_t(¤t->mm->context.fp_mode_switching,
1244 wait_on_fp_mode_switch, TASK_KILLABLE);
1245
1246 if (!used_math()) {
1247
1248 preempt_disable();
1249 err = init_fpu();
1250 if (msa && !err) {
1251 enable_msa();
1252 _init_msa_upper();
1253 set_thread_flag(TIF_USEDMSA);
1254 set_thread_flag(TIF_MSA_CTX_LIVE);
1255 }
1256 preempt_enable();
1257 if (!err)
1258 set_used_math();
1259 return err;
1260 }
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289 if (!msa && !thread_msa_context_live())
1290 return own_fpu(1);
1291
1292
1293
1294
1295
1296 preempt_disable();
1297 was_fpu_owner = is_fpu_owner();
1298 err = own_fpu_inatomic(0);
1299 if (err)
1300 goto out;
1301
1302 enable_msa();
1303 write_msa_csr(current->thread.fpu.msacsr);
1304 set_thread_flag(TIF_USEDMSA);
1305
1306
1307
1308
1309
1310
1311
1312
1313 prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
1314 if (!prior_msa && was_fpu_owner) {
1315 _init_msa_upper();
1316
1317 goto out;
1318 }
1319
1320 if (!prior_msa) {
1321
1322
1323
1324
1325 _restore_fp(current);
1326
1327
1328
1329
1330
1331
1332 _init_msa_upper();
1333 } else {
1334
1335 restore_msa(current);
1336
1337
1338 if (!was_fpu_owner)
1339 write_32bit_cp1_register(CP1_STATUS,
1340 current->thread.fpu.fcr31);
1341 }
1342
1343out:
1344 preempt_enable();
1345
1346 return 0;
1347}
1348
1349asmlinkage void do_cpu(struct pt_regs *regs)
1350{
1351 enum ctx_state prev_state;
1352 unsigned int __user *epc;
1353 unsigned long old_epc, old31;
1354 void __user *fault_addr;
1355 unsigned int opcode;
1356 unsigned long fcr31;
1357 unsigned int cpid;
1358 int status, err;
1359 unsigned long __maybe_unused flags;
1360 int sig;
1361
1362 prev_state = exception_enter();
1363 cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
1364
1365 if (cpid != 2)
1366 die_if_kernel("do_cpu invoked from kernel context!", regs);
1367
1368 switch (cpid) {
1369 case 0:
1370 epc = (unsigned int __user *)exception_epc(regs);
1371 old_epc = regs->cp0_epc;
1372 old31 = regs->regs[31];
1373 opcode = 0;
1374 status = -1;
1375
1376 if (unlikely(compute_return_epc(regs) < 0))
1377 break;
1378
1379 if (!get_isa16_mode(regs->cp0_epc)) {
1380 if (unlikely(get_user(opcode, epc) < 0))
1381 status = SIGSEGV;
1382
1383 if (!cpu_has_llsc && status < 0)
1384 status = simulate_llsc(regs, opcode);
1385 }
1386
1387 if (status < 0)
1388 status = SIGILL;
1389
1390 if (unlikely(status > 0)) {
1391 regs->cp0_epc = old_epc;
1392 regs->regs[31] = old31;
1393 force_sig(status, current);
1394 }
1395
1396 break;
1397
1398 case 3:
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411 if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) {
1412 force_sig(SIGILL, current);
1413 break;
1414 }
1415
1416
1417 case 1:
1418 err = enable_restore_fp_context(0);
1419
1420 if (raw_cpu_has_fpu && !err)
1421 break;
1422
1423 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0,
1424 &fault_addr);
1425 fcr31 = current->thread.fpu.fcr31;
1426
1427
1428
1429
1430
1431 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
1432
1433
1434 if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
1435 mt_ase_fp_affinity();
1436
1437 break;
1438
1439 case 2:
1440 raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
1441 break;
1442 }
1443
1444 exception_exit(prev_state);
1445}
1446
1447asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr)
1448{
1449 enum ctx_state prev_state;
1450
1451 prev_state = exception_enter();
1452 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1453 if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
1454 current->thread.trap_nr, SIGFPE) == NOTIFY_STOP)
1455 goto out;
1456
1457
1458 write_msa_csr(msacsr & ~MSA_CSR_CAUSEF);
1459 local_irq_enable();
1460
1461 die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
1462 force_sig(SIGFPE, current);
1463out:
1464 exception_exit(prev_state);
1465}
1466
1467asmlinkage void do_msa(struct pt_regs *regs)
1468{
1469 enum ctx_state prev_state;
1470 int err;
1471
1472 prev_state = exception_enter();
1473
1474 if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
1475 force_sig(SIGILL, current);
1476 goto out;
1477 }
1478
1479 die_if_kernel("do_msa invoked from kernel context!", regs);
1480
1481 err = enable_restore_fp_context(1);
1482 if (err)
1483 force_sig(SIGILL, current);
1484out:
1485 exception_exit(prev_state);
1486}
1487
1488asmlinkage void do_mdmx(struct pt_regs *regs)
1489{
1490 enum ctx_state prev_state;
1491
1492 prev_state = exception_enter();
1493 force_sig(SIGILL, current);
1494 exception_exit(prev_state);
1495}
1496
1497
1498
1499
1500asmlinkage void do_watch(struct pt_regs *regs)
1501{
1502 siginfo_t info = { .si_signo = SIGTRAP, .si_code = TRAP_HWBKPT };
1503 enum ctx_state prev_state;
1504 u32 cause;
1505
1506 prev_state = exception_enter();
1507
1508
1509
1510
1511 cause = read_c0_cause();
1512 cause &= ~(1 << 22);
1513 write_c0_cause(cause);
1514
1515
1516
1517
1518
1519
1520 if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
1521 mips_read_watch_registers();
1522 local_irq_enable();
1523 force_sig_info(SIGTRAP, &info, current);
1524 } else {
1525 mips_clear_watch_registers();
1526 local_irq_enable();
1527 }
1528 exception_exit(prev_state);
1529}
1530
1531asmlinkage void do_mcheck(struct pt_regs *regs)
1532{
1533 int multi_match = regs->cp0_status & ST0_TS;
1534 enum ctx_state prev_state;
1535 mm_segment_t old_fs = get_fs();
1536
1537 prev_state = exception_enter();
1538 show_regs(regs);
1539
1540 if (multi_match) {
1541 dump_tlb_regs();
1542 pr_info("\n");
1543 dump_tlb_all();
1544 }
1545
1546 if (!user_mode(regs))
1547 set_fs(KERNEL_DS);
1548
1549 show_code((unsigned int __user *) regs->cp0_epc);
1550
1551 set_fs(old_fs);
1552
1553
1554
1555
1556
1557 panic("Caught Machine Check exception - %scaused by multiple "
1558 "matching entries in the TLB.",
1559 (multi_match) ? "" : "not ");
1560}
1561
1562asmlinkage void do_mt(struct pt_regs *regs)
1563{
1564 int subcode;
1565
1566 subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
1567 >> VPECONTROL_EXCPT_SHIFT;
1568 switch (subcode) {
1569 case 0:
1570 printk(KERN_DEBUG "Thread Underflow\n");
1571 break;
1572 case 1:
1573 printk(KERN_DEBUG "Thread Overflow\n");
1574 break;
1575 case 2:
1576 printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
1577 break;
1578 case 3:
1579 printk(KERN_DEBUG "Gating Storage Exception\n");
1580 break;
1581 case 4:
1582 printk(KERN_DEBUG "YIELD Scheduler Exception\n");
1583 break;
1584 case 5:
1585 printk(KERN_DEBUG "Gating Storage Scheduler Exception\n");
1586 break;
1587 default:
1588 printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
1589 subcode);
1590 break;
1591 }
1592 die_if_kernel("MIPS MT Thread exception in kernel", regs);
1593
1594 force_sig(SIGILL, current);
1595}
1596
1597
1598asmlinkage void do_dsp(struct pt_regs *regs)
1599{
1600 if (cpu_has_dsp)
1601 panic("Unexpected DSP exception");
1602
1603 force_sig(SIGILL, current);
1604}
1605
1606asmlinkage void do_reserved(struct pt_regs *regs)
1607{
1608
1609
1610
1611
1612
1613 show_regs(regs);
1614 panic("Caught reserved exception %ld - should not happen.",
1615 (regs->cp0_cause & 0x7f) >> 2);
1616}
1617
1618static int __initdata l1parity = 1;
1619static int __init nol1parity(char *s)
1620{
1621 l1parity = 0;
1622 return 1;
1623}
1624__setup("nol1par", nol1parity);
1625static int __initdata l2parity = 1;
1626static int __init nol2parity(char *s)
1627{
1628 l2parity = 0;
1629 return 1;
1630}
1631__setup("nol2par", nol2parity);
1632
1633
1634
1635
1636
1637static inline void parity_protection_init(void)
1638{
1639 switch (current_cpu_type()) {
1640 case CPU_24K:
1641 case CPU_34K:
1642 case CPU_74K:
1643 case CPU_1004K:
1644 case CPU_1074K:
1645 case CPU_INTERAPTIV:
1646 case CPU_PROAPTIV:
1647 case CPU_P5600:
1648 case CPU_QEMU_GENERIC:
1649 case CPU_I6400:
1650 {
1651#define ERRCTL_PE 0x80000000
1652#define ERRCTL_L2P 0x00800000
1653 unsigned long errctl;
1654 unsigned int l1parity_present, l2parity_present;
1655
1656 errctl = read_c0_ecc();
1657 errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
1658
1659
1660 write_c0_ecc(errctl | ERRCTL_PE);
1661 back_to_back_c0_hazard();
1662 l1parity_present = (read_c0_ecc() & ERRCTL_PE);
1663
1664
1665 write_c0_ecc(errctl|ERRCTL_L2P);
1666 back_to_back_c0_hazard();
1667 l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
1668
1669 if (l1parity_present && l2parity_present) {
1670 if (l1parity)
1671 errctl |= ERRCTL_PE;
1672 if (l1parity ^ l2parity)
1673 errctl |= ERRCTL_L2P;
1674 } else if (l1parity_present) {
1675 if (l1parity)
1676 errctl |= ERRCTL_PE;
1677 } else if (l2parity_present) {
1678 if (l2parity)
1679 errctl |= ERRCTL_L2P;
1680 } else {
1681
1682 }
1683
1684 printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
1685
1686 write_c0_ecc(errctl);
1687 back_to_back_c0_hazard();
1688 errctl = read_c0_ecc();
1689 printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
1690
1691 if (l1parity_present)
1692 printk(KERN_INFO "Cache parity protection %sabled\n",
1693 (errctl & ERRCTL_PE) ? "en" : "dis");
1694
1695 if (l2parity_present) {
1696 if (l1parity_present && l1parity)
1697 errctl ^= ERRCTL_L2P;
1698 printk(KERN_INFO "L2 cache parity protection %sabled\n",
1699 (errctl & ERRCTL_L2P) ? "en" : "dis");
1700 }
1701 }
1702 break;
1703
1704 case CPU_5KC:
1705 case CPU_5KE:
1706 case CPU_LOONGSON1:
1707 write_c0_ecc(0x80000000);
1708 back_to_back_c0_hazard();
1709
1710 printk(KERN_INFO "Cache parity protection %sabled\n",
1711 (read_c0_ecc() & 0x80000000) ? "en" : "dis");
1712 break;
1713 case CPU_20KC:
1714 case CPU_25KF:
1715
1716 printk(KERN_INFO "Enable cache parity protection for "
1717 "MIPS 20KC/25KF CPUs.\n");
1718 clear_c0_status(ST0_DE);
1719 break;
1720 default:
1721 break;
1722 }
1723}
1724
1725asmlinkage void cache_parity_error(void)
1726{
1727 const int field = 2 * sizeof(unsigned long);
1728 unsigned int reg_val;
1729
1730
1731 printk("Cache error exception:\n");
1732 printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1733 reg_val = read_c0_cacheerr();
1734 printk("c0_cacheerr == %08x\n", reg_val);
1735
1736 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1737 reg_val & (1<<30) ? "secondary" : "primary",
1738 reg_val & (1<<31) ? "data" : "insn");
1739 if ((cpu_has_mips_r2_r6) &&
1740 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1741 pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
1742 reg_val & (1<<29) ? "ED " : "",
1743 reg_val & (1<<28) ? "ET " : "",
1744 reg_val & (1<<27) ? "ES " : "",
1745 reg_val & (1<<26) ? "EE " : "",
1746 reg_val & (1<<25) ? "EB " : "",
1747 reg_val & (1<<24) ? "EI " : "",
1748 reg_val & (1<<23) ? "E1 " : "",
1749 reg_val & (1<<22) ? "E0 " : "");
1750 } else {
1751 pr_err("Error bits: %s%s%s%s%s%s%s\n",
1752 reg_val & (1<<29) ? "ED " : "",
1753 reg_val & (1<<28) ? "ET " : "",
1754 reg_val & (1<<26) ? "EE " : "",
1755 reg_val & (1<<25) ? "EB " : "",
1756 reg_val & (1<<24) ? "EI " : "",
1757 reg_val & (1<<23) ? "E1 " : "",
1758 reg_val & (1<<22) ? "E0 " : "");
1759 }
1760 printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
1761
1762#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1763 if (reg_val & (1<<22))
1764 printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
1765
1766 if (reg_val & (1<<23))
1767 printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
1768#endif
1769
1770 panic("Can't handle the cache error!");
1771}
1772
1773asmlinkage void do_ftlb(void)
1774{
1775 const int field = 2 * sizeof(unsigned long);
1776 unsigned int reg_val;
1777
1778
1779 if ((cpu_has_mips_r2_r6) &&
1780 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1781 pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
1782 read_c0_ecc());
1783 pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1784 reg_val = read_c0_cacheerr();
1785 pr_err("c0_cacheerr == %08x\n", reg_val);
1786
1787 if ((reg_val & 0xc0000000) == 0xc0000000) {
1788 pr_err("Decoded c0_cacheerr: FTLB parity error\n");
1789 } else {
1790 pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1791 reg_val & (1<<30) ? "secondary" : "primary",
1792 reg_val & (1<<31) ? "data" : "insn");
1793 }
1794 } else {
1795 pr_err("FTLB error exception\n");
1796 }
1797
1798 cache_parity_error();
1799}
1800
1801
1802
1803
1804
1805void ejtag_exception_handler(struct pt_regs *regs)
1806{
1807 const int field = 2 * sizeof(unsigned long);
1808 unsigned long depc, old_epc, old_ra;
1809 unsigned int debug;
1810
1811 printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
1812 depc = read_c0_depc();
1813 debug = read_c0_debug();
1814 printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
1815 if (debug & 0x80000000) {
1816
1817
1818
1819
1820
1821
1822 old_epc = regs->cp0_epc;
1823 old_ra = regs->regs[31];
1824 regs->cp0_epc = depc;
1825 compute_return_epc(regs);
1826 depc = regs->cp0_epc;
1827 regs->cp0_epc = old_epc;
1828 regs->regs[31] = old_ra;
1829 } else
1830 depc += 4;
1831 write_c0_depc(depc);
1832
1833#if 0
1834 printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
1835 write_c0_debug(debug | 0x100);
1836#endif
1837}
1838
1839
1840
1841
1842
1843static RAW_NOTIFIER_HEAD(nmi_chain);
1844
1845int register_nmi_notifier(struct notifier_block *nb)
1846{
1847 return raw_notifier_chain_register(&nmi_chain, nb);
1848}
1849
1850void __noreturn nmi_exception_handler(struct pt_regs *regs)
1851{
1852 char str[100];
1853
1854 nmi_enter();
1855 raw_notifier_call_chain(&nmi_chain, 0, regs);
1856 bust_spinlocks(1);
1857 snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
1858 smp_processor_id(), regs->cp0_epc);
1859 regs->cp0_epc = read_c0_errorepc();
1860 die(str, regs);
1861 nmi_exit();
1862}
1863
1864#define VECTORSPACING 0x100
1865
1866unsigned long ebase;
1867unsigned long exception_handlers[32];
1868unsigned long vi_handlers[64];
1869
1870void __init *set_except_vector(int n, void *addr)
1871{
1872 unsigned long handler = (unsigned long) addr;
1873 unsigned long old_handler;
1874
1875#ifdef CONFIG_CPU_MICROMIPS
1876
1877
1878
1879
1880
1881
1882
1883 if (!(handler & 0x1))
1884 handler |= 1;
1885#endif
1886 old_handler = xchg(&exception_handlers[n], handler);
1887
1888 if (n == 0 && cpu_has_divec) {
1889#ifdef CONFIG_CPU_MICROMIPS
1890 unsigned long jump_mask = ~((1 << 27) - 1);
1891#else
1892 unsigned long jump_mask = ~((1 << 28) - 1);
1893#endif
1894 u32 *buf = (u32 *)(ebase + 0x200);
1895 unsigned int k0 = 26;
1896 if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
1897 uasm_i_j(&buf, handler & ~jump_mask);
1898 uasm_i_nop(&buf);
1899 } else {
1900 UASM_i_LA(&buf, k0, handler);
1901 uasm_i_jr(&buf, k0);
1902 uasm_i_nop(&buf);
1903 }
1904 local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
1905 }
1906 return (void *)old_handler;
1907}
1908
1909static void do_default_vi(void)
1910{
1911 show_regs(get_irq_regs());
1912 panic("Caught unexpected vectored interrupt.");
1913}
1914
1915static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1916{
1917 unsigned long handler;
1918 unsigned long old_handler = vi_handlers[n];
1919 int srssets = current_cpu_data.srsets;
1920 u16 *h;
1921 unsigned char *b;
1922
1923 BUG_ON(!cpu_has_veic && !cpu_has_vint);
1924
1925 if (addr == NULL) {
1926 handler = (unsigned long) do_default_vi;
1927 srs = 0;
1928 } else
1929 handler = (unsigned long) addr;
1930 vi_handlers[n] = handler;
1931
1932 b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
1933
1934 if (srs >= srssets)
1935 panic("Shadow register set %d not supported", srs);
1936
1937 if (cpu_has_veic) {
1938 if (board_bind_eic_interrupt)
1939 board_bind_eic_interrupt(n, srs);
1940 } else if (cpu_has_vint) {
1941
1942 if (srssets > 1)
1943 change_c0_srsmap(0xf << n*4, srs << n*4);
1944 }
1945
1946 if (srs == 0) {
1947
1948
1949
1950
1951 extern char except_vec_vi, except_vec_vi_lui;
1952 extern char except_vec_vi_ori, except_vec_vi_end;
1953 extern char rollback_except_vec_vi;
1954 char *vec_start = using_rollback_handler() ?
1955 &rollback_except_vec_vi : &except_vec_vi;
1956#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
1957 const int lui_offset = &except_vec_vi_lui - vec_start + 2;
1958 const int ori_offset = &except_vec_vi_ori - vec_start + 2;
1959#else
1960 const int lui_offset = &except_vec_vi_lui - vec_start;
1961 const int ori_offset = &except_vec_vi_ori - vec_start;
1962#endif
1963 const int handler_len = &except_vec_vi_end - vec_start;
1964
1965 if (handler_len > VECTORSPACING) {
1966
1967
1968
1969
1970 panic("VECTORSPACING too small");
1971 }
1972
1973 set_handler(((unsigned long)b - ebase), vec_start,
1974#ifdef CONFIG_CPU_MICROMIPS
1975 (handler_len - 1));
1976#else
1977 handler_len);
1978#endif
1979 h = (u16 *)(b + lui_offset);
1980 *h = (handler >> 16) & 0xffff;
1981 h = (u16 *)(b + ori_offset);
1982 *h = (handler & 0xffff);
1983 local_flush_icache_range((unsigned long)b,
1984 (unsigned long)(b+handler_len));
1985 }
1986 else {
1987
1988
1989
1990
1991
1992 u32 insn;
1993
1994 h = (u16 *)b;
1995
1996#ifdef CONFIG_CPU_MICROMIPS
1997 insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
1998#else
1999 insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
2000#endif
2001 h[0] = (insn >> 16) & 0xffff;
2002 h[1] = insn & 0xffff;
2003 h[2] = 0;
2004 h[3] = 0;
2005 local_flush_icache_range((unsigned long)b,
2006 (unsigned long)(b+8));
2007 }
2008
2009 return (void *)old_handler;
2010}
2011
2012void *set_vi_handler(int n, vi_handler_t addr)
2013{
2014 return set_vi_srs_handler(n, addr, 0);
2015}
2016
2017extern void tlb_init(void);
2018
2019
2020
2021
2022int cp0_compare_irq;
2023EXPORT_SYMBOL_GPL(cp0_compare_irq);
2024int cp0_compare_irq_shift;
2025
2026
2027
2028
2029int cp0_perfcount_irq;
2030EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
2031
2032
2033
2034
2035int cp0_fdc_irq;
2036EXPORT_SYMBOL_GPL(cp0_fdc_irq);
2037
2038static int noulri;
2039
2040static int __init ulri_disable(char *s)
2041{
2042 pr_info("Disabling ulri\n");
2043 noulri = 1;
2044
2045 return 1;
2046}
2047__setup("noulri", ulri_disable);
2048
2049
2050static void configure_status(void)
2051{
2052
2053
2054
2055
2056
2057
2058 unsigned int status_set = ST0_CU0;
2059#ifdef CONFIG_64BIT
2060 status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
2061#endif
2062 if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
2063 status_set |= ST0_XX;
2064 if (cpu_has_dsp)
2065 status_set |= ST0_MX;
2066
2067 change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
2068 status_set);
2069}
2070
2071
2072static void configure_hwrena(void)
2073{
2074 unsigned int hwrena = cpu_hwrena_impl_bits;
2075
2076 if (cpu_has_mips_r2_r6)
2077 hwrena |= 0x0000000f;
2078
2079 if (!noulri && cpu_has_userlocal)
2080 hwrena |= (1 << 29);
2081
2082 if (hwrena)
2083 write_c0_hwrena(hwrena);
2084}
2085
2086static void configure_exception_vector(void)
2087{
2088 if (cpu_has_veic || cpu_has_vint) {
2089 unsigned long sr = set_c0_status(ST0_BEV);
2090 write_c0_ebase(ebase);
2091 write_c0_status(sr);
2092
2093 change_c0_intctl(0x3e0, VECTORSPACING);
2094 }
2095 if (cpu_has_divec) {
2096 if (cpu_has_mipsmt) {
2097 unsigned int vpflags = dvpe();
2098 set_c0_cause(CAUSEF_IV);
2099 evpe(vpflags);
2100 } else
2101 set_c0_cause(CAUSEF_IV);
2102 }
2103}
2104
2105void per_cpu_trap_init(bool is_boot_cpu)
2106{
2107 unsigned int cpu = smp_processor_id();
2108
2109 configure_status();
2110 configure_hwrena();
2111
2112 configure_exception_vector();
2113
2114
2115
2116
2117
2118
2119
2120
2121 if (cpu_has_mips_r2_r6) {
2122 cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
2123 cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
2124 cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
2125 cp0_fdc_irq = (read_c0_intctl() >> INTCTLB_IPFDC) & 7;
2126 if (!cp0_fdc_irq)
2127 cp0_fdc_irq = -1;
2128
2129 } else {
2130 cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
2131 cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
2132 cp0_perfcount_irq = -1;
2133 cp0_fdc_irq = -1;
2134 }
2135
2136 if (!cpu_data[cpu].asid_cache)
2137 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
2138
2139 atomic_inc(&init_mm.mm_count);
2140 current->active_mm = &init_mm;
2141 BUG_ON(current->mm);
2142 enter_lazy_tlb(&init_mm, current);
2143
2144
2145 if (!is_boot_cpu)
2146 cpu_cache_init();
2147 tlb_init();
2148 TLBMISS_HANDLER_SETUP();
2149}
2150
2151
2152void set_handler(unsigned long offset, void *addr, unsigned long size)
2153{
2154#ifdef CONFIG_CPU_MICROMIPS
2155 memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
2156#else
2157 memcpy((void *)(ebase + offset), addr, size);
2158#endif
2159 local_flush_icache_range(ebase + offset, ebase + offset + size);
2160}
2161
2162static char panic_null_cerr[] =
2163 "Trying to set NULL cache error exception handler";
2164
2165
2166
2167
2168
2169
2170void set_uncached_handler(unsigned long offset, void *addr,
2171 unsigned long size)
2172{
2173 unsigned long uncached_ebase = CKSEG1ADDR(ebase);
2174
2175 if (!addr)
2176 panic(panic_null_cerr);
2177
2178 memcpy((void *)(uncached_ebase + offset), addr, size);
2179}
2180
2181static int __initdata rdhwr_noopt;
2182static int __init set_rdhwr_noopt(char *str)
2183{
2184 rdhwr_noopt = 1;
2185 return 1;
2186}
2187
2188__setup("rdhwr_noopt", set_rdhwr_noopt);
2189
2190void __init trap_init(void)
2191{
2192 extern char except_vec3_generic;
2193 extern char except_vec4;
2194 extern char except_vec3_r4000;
2195 unsigned long i;
2196
2197 check_wait();
2198
2199 if (cpu_has_veic || cpu_has_vint) {
2200 unsigned long size = 0x200 + VECTORSPACING*64;
2201 ebase = (unsigned long)
2202 __alloc_bootmem(size, 1 << fls(size), 0);
2203 } else {
2204 ebase = CAC_BASE;
2205
2206 if (cpu_has_mips_r2_r6)
2207 ebase += (read_c0_ebase() & 0x3ffff000);
2208 }
2209
2210 if (cpu_has_mmips) {
2211 unsigned int config3 = read_c0_config3();
2212
2213 if (IS_ENABLED(CONFIG_CPU_MICROMIPS))
2214 write_c0_config3(config3 | MIPS_CONF3_ISA_OE);
2215 else
2216 write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE);
2217 }
2218
2219 if (board_ebase_setup)
2220 board_ebase_setup();
2221 per_cpu_trap_init(true);
2222
2223
2224
2225
2226
2227
2228 set_handler(0x180, &except_vec3_generic, 0x80);
2229
2230
2231
2232
2233 for (i = 0; i <= 31; i++)
2234 set_except_vector(i, handle_reserved);
2235
2236
2237
2238
2239
2240 if (cpu_has_ejtag && board_ejtag_handler_setup)
2241 board_ejtag_handler_setup();
2242
2243
2244
2245
2246 if (cpu_has_watch)
2247 set_except_vector(EXCCODE_WATCH, handle_watch);
2248
2249
2250
2251
2252 if (cpu_has_veic || cpu_has_vint) {
2253 int nvec = cpu_has_veic ? 64 : 8;
2254 for (i = 0; i < nvec; i++)
2255 set_vi_handler(i, NULL);
2256 }
2257 else if (cpu_has_divec)
2258 set_handler(0x200, &except_vec4, 0x8);
2259
2260
2261
2262
2263
2264 parity_protection_init();
2265
2266
2267
2268
2269
2270
2271 if (board_be_init)
2272 board_be_init();
2273
2274 set_except_vector(EXCCODE_INT, using_rollback_handler() ?
2275 rollback_handle_int : handle_int);
2276 set_except_vector(EXCCODE_MOD, handle_tlbm);
2277 set_except_vector(EXCCODE_TLBL, handle_tlbl);
2278 set_except_vector(EXCCODE_TLBS, handle_tlbs);
2279
2280 set_except_vector(EXCCODE_ADEL, handle_adel);
2281 set_except_vector(EXCCODE_ADES, handle_ades);
2282
2283 set_except_vector(EXCCODE_IBE, handle_ibe);
2284 set_except_vector(EXCCODE_DBE, handle_dbe);
2285
2286 set_except_vector(EXCCODE_SYS, handle_sys);
2287 set_except_vector(EXCCODE_BP, handle_bp);
2288 set_except_vector(EXCCODE_RI, rdhwr_noopt ? handle_ri :
2289 (cpu_has_vtag_icache ?
2290 handle_ri_rdhwr_vivt : handle_ri_rdhwr));
2291 set_except_vector(EXCCODE_CPU, handle_cpu);
2292 set_except_vector(EXCCODE_OV, handle_ov);
2293 set_except_vector(EXCCODE_TR, handle_tr);
2294 set_except_vector(EXCCODE_MSAFPE, handle_msa_fpe);
2295
2296 if (current_cpu_type() == CPU_R6000 ||
2297 current_cpu_type() == CPU_R6000A) {
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308 }
2309
2310
2311 if (board_nmi_handler_setup)
2312 board_nmi_handler_setup();
2313
2314 if (cpu_has_fpu && !cpu_has_nofpuex)
2315 set_except_vector(EXCCODE_FPE, handle_fpe);
2316
2317 set_except_vector(MIPS_EXCCODE_TLBPAR, handle_ftlb);
2318
2319 if (cpu_has_rixiex) {
2320 set_except_vector(EXCCODE_TLBRI, tlb_do_page_fault_0);
2321 set_except_vector(EXCCODE_TLBXI, tlb_do_page_fault_0);
2322 }
2323
2324 set_except_vector(EXCCODE_MSADIS, handle_msa);
2325 set_except_vector(EXCCODE_MDMX, handle_mdmx);
2326
2327 if (cpu_has_mcheck)
2328 set_except_vector(EXCCODE_MCHECK, handle_mcheck);
2329
2330 if (cpu_has_mipsmt)
2331 set_except_vector(EXCCODE_THREAD, handle_mt);
2332
2333 set_except_vector(EXCCODE_DSPDIS, handle_dsp);
2334
2335 if (board_cache_error_setup)
2336 board_cache_error_setup();
2337
2338 if (cpu_has_vce)
2339
2340 set_handler(0x180, &except_vec3_r4000, 0x100);
2341 else if (cpu_has_4kex)
2342 set_handler(0x180, &except_vec3_generic, 0x80);
2343 else
2344 set_handler(0x080, &except_vec3_generic, 0x80);
2345
2346 local_flush_icache_range(ebase, ebase + 0x400);
2347
2348 sort_extable(__start___dbe_table, __stop___dbe_table);
2349
2350 cu2_notifier(default_cu2_call, 0x80000000);
2351}
2352
2353static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd,
2354 void *v)
2355{
2356 switch (cmd) {
2357 case CPU_PM_ENTER_FAILED:
2358 case CPU_PM_EXIT:
2359 configure_status();
2360 configure_hwrena();
2361 configure_exception_vector();
2362
2363
2364 TLBMISS_HANDLER_RESTORE();
2365
2366 break;
2367 }
2368
2369 return NOTIFY_OK;
2370}
2371
2372static struct notifier_block trap_pm_notifier_block = {
2373 .notifier_call = trap_pm_notifier,
2374};
2375
2376static int __init trap_pm_init(void)
2377{
2378 return cpu_pm_register_notifier(&trap_pm_notifier_block);
2379}
2380arch_initcall(trap_pm_init);
2381