1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/bitops.h>
16#include <linux/bug.h>
17#include <linux/compiler.h>
18#include <linux/context_tracking.h>
19#include <linux/cpu_pm.h>
20#include <linux/kexec.h>
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/mm.h>
25#include <linux/sched.h>
26#include <linux/smp.h>
27#include <linux/spinlock.h>
28#include <linux/kallsyms.h>
29#include <linux/bootmem.h>
30#include <linux/interrupt.h>
31#include <linux/ptrace.h>
32#include <linux/kgdb.h>
33#include <linux/kdebug.h>
34#include <linux/kprobes.h>
35#include <linux/notifier.h>
36#include <linux/kdb.h>
37#include <linux/irq.h>
38#include <linux/perf_event.h>
39
40#include <asm/bootinfo.h>
41#include <asm/branch.h>
42#include <asm/break.h>
43#include <asm/cop2.h>
44#include <asm/cpu.h>
45#include <asm/cpu-type.h>
46#include <asm/dsp.h>
47#include <asm/fpu.h>
48#include <asm/fpu_emulator.h>
49#include <asm/idle.h>
50#include <asm/mips-r2-to-r6-emul.h>
51#include <asm/mipsregs.h>
52#include <asm/mipsmtregs.h>
53#include <asm/module.h>
54#include <asm/msa.h>
55#include <asm/pgtable.h>
56#include <asm/ptrace.h>
57#include <asm/sections.h>
58#include <asm/tlbdebug.h>
59#include <asm/traps.h>
60#include <asm/uaccess.h>
61#include <asm/watch.h>
62#include <asm/mmu_context.h>
63#include <asm/types.h>
64#include <asm/stacktrace.h>
65#include <asm/uasm.h>
66
67extern void check_wait(void);
68extern asmlinkage void rollback_handle_int(void);
69extern asmlinkage void handle_int(void);
70extern u32 handle_tlbl[];
71extern u32 handle_tlbs[];
72extern u32 handle_tlbm[];
73extern asmlinkage void handle_adel(void);
74extern asmlinkage void handle_ades(void);
75extern asmlinkage void handle_ibe(void);
76extern asmlinkage void handle_dbe(void);
77extern asmlinkage void handle_sys(void);
78extern asmlinkage void handle_bp(void);
79extern asmlinkage void handle_ri(void);
80extern asmlinkage void handle_ri_rdhwr_vivt(void);
81extern asmlinkage void handle_ri_rdhwr(void);
82extern asmlinkage void handle_cpu(void);
83extern asmlinkage void handle_ov(void);
84extern asmlinkage void handle_tr(void);
85extern asmlinkage void handle_msa_fpe(void);
86extern asmlinkage void handle_fpe(void);
87extern asmlinkage void handle_ftlb(void);
88extern asmlinkage void handle_msa(void);
89extern asmlinkage void handle_mdmx(void);
90extern asmlinkage void handle_watch(void);
91extern asmlinkage void handle_mt(void);
92extern asmlinkage void handle_dsp(void);
93extern asmlinkage void handle_mcheck(void);
94extern asmlinkage void handle_reserved(void);
95extern void tlb_do_page_fault_0(void);
96
97void (*board_be_init)(void);
98int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
99void (*board_nmi_handler_setup)(void);
100void (*board_ejtag_handler_setup)(void);
101void (*board_bind_eic_interrupt)(int irq, int regset);
102void (*board_ebase_setup)(void);
103void(*board_cache_error_setup)(void);
104
105static void show_raw_backtrace(unsigned long reg29)
106{
107 unsigned long *sp = (unsigned long *)(reg29 & ~3);
108 unsigned long addr;
109
110 printk("Call Trace:");
111#ifdef CONFIG_KALLSYMS
112 printk("\n");
113#endif
114 while (!kstack_end(sp)) {
115 unsigned long __user *p =
116 (unsigned long __user *)(unsigned long)sp++;
117 if (__get_user(addr, p)) {
118 printk(" (Bad stack address)");
119 break;
120 }
121 if (__kernel_text_address(addr))
122 print_ip_sym(addr);
123 }
124 printk("\n");
125}
126
127#ifdef CONFIG_KALLSYMS
128int raw_show_trace;
129static int __init set_raw_show_trace(char *str)
130{
131 raw_show_trace = 1;
132 return 1;
133}
134__setup("raw_show_trace", set_raw_show_trace);
135#endif
136
137static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
138{
139 unsigned long sp = regs->regs[29];
140 unsigned long ra = regs->regs[31];
141 unsigned long pc = regs->cp0_epc;
142
143 if (!task)
144 task = current;
145
146 if (raw_show_trace || !__kernel_text_address(pc)) {
147 show_raw_backtrace(sp);
148 return;
149 }
150 printk("Call Trace:\n");
151 do {
152 print_ip_sym(pc);
153 pc = unwind_stack(task, &sp, pc, &ra);
154 } while (pc);
155 printk("\n");
156}
157
158
159
160
161
162static void show_stacktrace(struct task_struct *task,
163 const struct pt_regs *regs)
164{
165 const int field = 2 * sizeof(unsigned long);
166 long stackdata;
167 int i;
168 unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
169
170 printk("Stack :");
171 i = 0;
172 while ((unsigned long) sp & (PAGE_SIZE - 1)) {
173 if (i && ((i % (64 / field)) == 0))
174 printk("\n ");
175 if (i > 39) {
176 printk(" ...");
177 break;
178 }
179
180 if (__get_user(stackdata, sp++)) {
181 printk(" (Bad stack address)");
182 break;
183 }
184
185 printk(" %0*lx", field, stackdata);
186 i++;
187 }
188 printk("\n");
189 show_backtrace(task, regs);
190}
191
192void show_stack(struct task_struct *task, unsigned long *sp)
193{
194 struct pt_regs regs;
195 mm_segment_t old_fs = get_fs();
196 if (sp) {
197 regs.regs[29] = (unsigned long)sp;
198 regs.regs[31] = 0;
199 regs.cp0_epc = 0;
200 } else {
201 if (task && task != current) {
202 regs.regs[29] = task->thread.reg29;
203 regs.regs[31] = 0;
204 regs.cp0_epc = task->thread.reg31;
205#ifdef CONFIG_KGDB_KDB
206 } else if (atomic_read(&kgdb_active) != -1 &&
207 kdb_current_regs) {
208 memcpy(®s, kdb_current_regs, sizeof(regs));
209#endif
210 } else {
211 prepare_frametrace(®s);
212 }
213 }
214
215
216
217
218 set_fs(KERNEL_DS);
219 show_stacktrace(task, ®s);
220 set_fs(old_fs);
221}
222
223static void show_code(unsigned int __user *pc)
224{
225 long i;
226 unsigned short __user *pc16 = NULL;
227
228 printk("\nCode:");
229
230 if ((unsigned long)pc & 1)
231 pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
232 for(i = -3 ; i < 6 ; i++) {
233 unsigned int insn;
234 if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
235 printk(" (Bad address in epc)\n");
236 break;
237 }
238 printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
239 }
240}
241
242static void __show_regs(const struct pt_regs *regs)
243{
244 const int field = 2 * sizeof(unsigned long);
245 unsigned int cause = regs->cp0_cause;
246 unsigned int exccode;
247 int i;
248
249 show_regs_print_info(KERN_DEFAULT);
250
251
252
253
254 for (i = 0; i < 32; ) {
255 if ((i % 4) == 0)
256 printk("$%2d :", i);
257 if (i == 0)
258 printk(" %0*lx", field, 0UL);
259 else if (i == 26 || i == 27)
260 printk(" %*s", field, "");
261 else
262 printk(" %0*lx", field, regs->regs[i]);
263
264 i++;
265 if ((i % 4) == 0)
266 printk("\n");
267 }
268
269#ifdef CONFIG_CPU_HAS_SMARTMIPS
270 printk("Acx : %0*lx\n", field, regs->acx);
271#endif
272 printk("Hi : %0*lx\n", field, regs->hi);
273 printk("Lo : %0*lx\n", field, regs->lo);
274
275
276
277
278 printk("epc : %0*lx %pS\n", field, regs->cp0_epc,
279 (void *) regs->cp0_epc);
280 printk("ra : %0*lx %pS\n", field, regs->regs[31],
281 (void *) regs->regs[31]);
282
283 printk("Status: %08x ", (uint32_t) regs->cp0_status);
284
285 if (cpu_has_3kex) {
286 if (regs->cp0_status & ST0_KUO)
287 printk("KUo ");
288 if (regs->cp0_status & ST0_IEO)
289 printk("IEo ");
290 if (regs->cp0_status & ST0_KUP)
291 printk("KUp ");
292 if (regs->cp0_status & ST0_IEP)
293 printk("IEp ");
294 if (regs->cp0_status & ST0_KUC)
295 printk("KUc ");
296 if (regs->cp0_status & ST0_IEC)
297 printk("IEc ");
298 } else if (cpu_has_4kex) {
299 if (regs->cp0_status & ST0_KX)
300 printk("KX ");
301 if (regs->cp0_status & ST0_SX)
302 printk("SX ");
303 if (regs->cp0_status & ST0_UX)
304 printk("UX ");
305 switch (regs->cp0_status & ST0_KSU) {
306 case KSU_USER:
307 printk("USER ");
308 break;
309 case KSU_SUPERVISOR:
310 printk("SUPERVISOR ");
311 break;
312 case KSU_KERNEL:
313 printk("KERNEL ");
314 break;
315 default:
316 printk("BAD_MODE ");
317 break;
318 }
319 if (regs->cp0_status & ST0_ERL)
320 printk("ERL ");
321 if (regs->cp0_status & ST0_EXL)
322 printk("EXL ");
323 if (regs->cp0_status & ST0_IE)
324 printk("IE ");
325 }
326 printk("\n");
327
328 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
329 printk("Cause : %08x (ExcCode %02x)\n", cause, exccode);
330
331 if (1 <= exccode && exccode <= 5)
332 printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
333
334 printk("PrId : %08x (%s)\n", read_c0_prid(),
335 cpu_name_string());
336}
337
338
339
340
341void show_regs(struct pt_regs *regs)
342{
343 __show_regs((struct pt_regs *)regs);
344}
345
346void show_registers(struct pt_regs *regs)
347{
348 const int field = 2 * sizeof(unsigned long);
349 mm_segment_t old_fs = get_fs();
350
351 __show_regs(regs);
352 print_modules();
353 printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
354 current->comm, current->pid, current_thread_info(), current,
355 field, current_thread_info()->tp_value);
356 if (cpu_has_userlocal) {
357 unsigned long tls;
358
359 tls = read_c0_userlocal();
360 if (tls != current_thread_info()->tp_value)
361 printk("*HwTLS: %0*lx\n", field, tls);
362 }
363
364 if (!user_mode(regs))
365
366 set_fs(KERNEL_DS);
367 show_stacktrace(current, regs);
368 show_code((unsigned int __user *) regs->cp0_epc);
369 printk("\n");
370 set_fs(old_fs);
371}
372
373static int regs_to_trapnr(struct pt_regs *regs)
374{
375 return (regs->cp0_cause >> 2) & 0x1f;
376}
377
378static DEFINE_RAW_SPINLOCK(die_lock);
379
380void __noreturn die(const char *str, struct pt_regs *regs)
381{
382 static int die_counter;
383 int sig = SIGSEGV;
384
385 oops_enter();
386
387 if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs),
388 SIGSEGV) == NOTIFY_STOP)
389 sig = 0;
390
391 console_verbose();
392 raw_spin_lock_irq(&die_lock);
393 bust_spinlocks(1);
394
395 printk("%s[#%d]:\n", str, ++die_counter);
396 show_registers(regs);
397 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
398 raw_spin_unlock_irq(&die_lock);
399
400 oops_exit();
401
402 if (in_interrupt())
403 panic("Fatal exception in interrupt");
404
405 if (panic_on_oops) {
406 printk(KERN_EMERG "Fatal exception: panic in 5 seconds");
407 ssleep(5);
408 panic("Fatal exception");
409 }
410
411 if (regs && kexec_should_crash(current))
412 crash_kexec(regs);
413
414 do_exit(sig);
415}
416
417extern struct exception_table_entry __start___dbe_table[];
418extern struct exception_table_entry __stop___dbe_table[];
419
420__asm__(
421" .section __dbe_table, \"a\"\n"
422" .previous \n");
423
424
425static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
426{
427 const struct exception_table_entry *e;
428
429 e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr);
430 if (!e)
431 e = search_module_dbetables(addr);
432 return e;
433}
434
435asmlinkage void do_be(struct pt_regs *regs)
436{
437 const int field = 2 * sizeof(unsigned long);
438 const struct exception_table_entry *fixup = NULL;
439 int data = regs->cp0_cause & 4;
440 int action = MIPS_BE_FATAL;
441 enum ctx_state prev_state;
442
443 prev_state = exception_enter();
444
445 if (data && !user_mode(regs))
446 fixup = search_dbe_tables(exception_epc(regs));
447
448 if (fixup)
449 action = MIPS_BE_FIXUP;
450
451 if (board_be_handler)
452 action = board_be_handler(regs, fixup != NULL);
453
454 switch (action) {
455 case MIPS_BE_DISCARD:
456 goto out;
457 case MIPS_BE_FIXUP:
458 if (fixup) {
459 regs->cp0_epc = fixup->nextinsn;
460 goto out;
461 }
462 break;
463 default:
464 break;
465 }
466
467
468
469
470 printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
471 data ? "Data" : "Instruction",
472 field, regs->cp0_epc, field, regs->regs[31]);
473 if (notify_die(DIE_OOPS, "bus error", regs, 0, regs_to_trapnr(regs),
474 SIGBUS) == NOTIFY_STOP)
475 goto out;
476
477 die_if_kernel("Oops", regs);
478 force_sig(SIGBUS, current);
479
480out:
481 exception_exit(prev_state);
482}
483
484
485
486
487
488#define OPCODE 0xfc000000
489#define BASE 0x03e00000
490#define RT 0x001f0000
491#define OFFSET 0x0000ffff
492#define LL 0xc0000000
493#define SC 0xe0000000
494#define SPEC0 0x00000000
495#define SPEC3 0x7c000000
496#define RD 0x0000f800
497#define FUNC 0x0000003f
498#define SYNC 0x0000000f
499#define RDHWR 0x0000003b
500
501
502#define MM_POOL32A_FUNC 0xfc00ffff
503#define MM_RDHWR 0x00006b3c
504#define MM_RS 0x001f0000
505#define MM_RT 0x03e00000
506
507
508
509
510
511unsigned int ll_bit;
512struct task_struct *ll_task;
513
514static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
515{
516 unsigned long value, __user *vaddr;
517 long offset;
518
519
520
521
522
523
524
525 offset = opcode & OFFSET;
526 offset <<= 16;
527 offset >>= 16;
528
529 vaddr = (unsigned long __user *)
530 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
531
532 if ((unsigned long)vaddr & 3)
533 return SIGBUS;
534 if (get_user(value, vaddr))
535 return SIGSEGV;
536
537 preempt_disable();
538
539 if (ll_task == NULL || ll_task == current) {
540 ll_bit = 1;
541 } else {
542 ll_bit = 0;
543 }
544 ll_task = current;
545
546 preempt_enable();
547
548 regs->regs[(opcode & RT) >> 16] = value;
549
550 return 0;
551}
552
553static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
554{
555 unsigned long __user *vaddr;
556 unsigned long reg;
557 long offset;
558
559
560
561
562
563
564
565 offset = opcode & OFFSET;
566 offset <<= 16;
567 offset >>= 16;
568
569 vaddr = (unsigned long __user *)
570 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
571 reg = (opcode & RT) >> 16;
572
573 if ((unsigned long)vaddr & 3)
574 return SIGBUS;
575
576 preempt_disable();
577
578 if (ll_bit == 0 || ll_task != current) {
579 regs->regs[reg] = 0;
580 preempt_enable();
581 return 0;
582 }
583
584 preempt_enable();
585
586 if (put_user(regs->regs[reg], vaddr))
587 return SIGSEGV;
588
589 regs->regs[reg] = 1;
590
591 return 0;
592}
593
594
595
596
597
598
599
600
601static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
602{
603 if ((opcode & OPCODE) == LL) {
604 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
605 1, regs, 0);
606 return simulate_ll(regs, opcode);
607 }
608 if ((opcode & OPCODE) == SC) {
609 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
610 1, regs, 0);
611 return simulate_sc(regs, opcode);
612 }
613
614 return -1;
615}
616
617
618
619
620
621static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
622{
623 struct thread_info *ti = task_thread_info(current);
624
625 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
626 1, regs, 0);
627 switch (rd) {
628 case 0:
629 regs->regs[rt] = smp_processor_id();
630 return 0;
631 case 1:
632 regs->regs[rt] = min(current_cpu_data.dcache.linesz,
633 current_cpu_data.icache.linesz);
634 return 0;
635 case 2:
636 regs->regs[rt] = read_c0_count();
637 return 0;
638 case 3:
639 switch (current_cpu_type()) {
640 case CPU_20KC:
641 case CPU_25KF:
642 regs->regs[rt] = 1;
643 break;
644 default:
645 regs->regs[rt] = 2;
646 }
647 return 0;
648 case 29:
649 regs->regs[rt] = ti->tp_value;
650 return 0;
651 default:
652 return -1;
653 }
654}
655
656static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
657{
658 if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
659 int rd = (opcode & RD) >> 11;
660 int rt = (opcode & RT) >> 16;
661
662 simulate_rdhwr(regs, rd, rt);
663 return 0;
664 }
665
666
667 return -1;
668}
669
670static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned short opcode)
671{
672 if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
673 int rd = (opcode & MM_RS) >> 16;
674 int rt = (opcode & MM_RT) >> 21;
675 simulate_rdhwr(regs, rd, rt);
676 return 0;
677 }
678
679
680 return -1;
681}
682
683static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
684{
685 if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
686 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
687 1, regs, 0);
688 return 0;
689 }
690
691 return -1;
692}
693
694asmlinkage void do_ov(struct pt_regs *regs)
695{
696 enum ctx_state prev_state;
697 siginfo_t info;
698
699 prev_state = exception_enter();
700 die_if_kernel("Integer overflow", regs);
701
702 info.si_code = FPE_INTOVF;
703 info.si_signo = SIGFPE;
704 info.si_errno = 0;
705 info.si_addr = (void __user *) regs->cp0_epc;
706 force_sig_info(SIGFPE, &info, current);
707 exception_exit(prev_state);
708}
709
710int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
711{
712 struct siginfo si = { 0 };
713
714 switch (sig) {
715 case 0:
716 return 0;
717
718 case SIGFPE:
719 si.si_addr = fault_addr;
720 si.si_signo = sig;
721
722
723
724
725 fcr31 &= (fcr31 & FPU_CSR_ALL_E) <<
726 (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E));
727 if (fcr31 & FPU_CSR_INV_X)
728 si.si_code = FPE_FLTINV;
729 else if (fcr31 & FPU_CSR_DIV_X)
730 si.si_code = FPE_FLTDIV;
731 else if (fcr31 & FPU_CSR_OVF_X)
732 si.si_code = FPE_FLTOVF;
733 else if (fcr31 & FPU_CSR_UDF_X)
734 si.si_code = FPE_FLTUND;
735 else if (fcr31 & FPU_CSR_INE_X)
736 si.si_code = FPE_FLTRES;
737 else
738 si.si_code = __SI_FAULT;
739 force_sig_info(sig, &si, current);
740 return 1;
741
742 case SIGBUS:
743 si.si_addr = fault_addr;
744 si.si_signo = sig;
745 si.si_code = BUS_ADRERR;
746 force_sig_info(sig, &si, current);
747 return 1;
748
749 case SIGSEGV:
750 si.si_addr = fault_addr;
751 si.si_signo = sig;
752 down_read(¤t->mm->mmap_sem);
753 if (find_vma(current->mm, (unsigned long)fault_addr))
754 si.si_code = SEGV_ACCERR;
755 else
756 si.si_code = SEGV_MAPERR;
757 up_read(¤t->mm->mmap_sem);
758 force_sig_info(sig, &si, current);
759 return 1;
760
761 default:
762 force_sig(sig, current);
763 return 1;
764 }
765}
766
767static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
768 unsigned long old_epc, unsigned long old_ra)
769{
770 union mips_instruction inst = { .word = opcode };
771 void __user *fault_addr;
772 unsigned long fcr31;
773 int sig;
774
775
776 switch (inst.i_format.opcode) {
777 case cop1_op:
778 case cop1x_op:
779 case lwc1_op:
780 case ldc1_op:
781 case swc1_op:
782 case sdc1_op:
783 break;
784
785 default:
786 return -1;
787 }
788
789
790
791
792
793 regs->cp0_epc = old_epc;
794 regs->regs[31] = old_ra;
795
796
797 lose_fpu(1);
798
799
800 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
801 &fault_addr);
802 fcr31 = current->thread.fpu.fcr31;
803
804
805
806
807
808 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
809
810
811 own_fpu(1);
812
813
814 process_fpemu_return(sig, fault_addr, fcr31);
815
816 return 0;
817}
818
819
820
821
822asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
823{
824 enum ctx_state prev_state;
825 void __user *fault_addr;
826 int sig;
827
828 prev_state = exception_enter();
829 if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs),
830 SIGFPE) == NOTIFY_STOP)
831 goto out;
832
833
834 write_32bit_cp1_register(CP1_STATUS, fcr31 & ~FPU_CSR_ALL_X);
835 local_irq_enable();
836
837 die_if_kernel("FP exception in kernel code", regs);
838
839 if (fcr31 & FPU_CSR_UNI_X) {
840
841
842
843
844
845
846
847
848
849
850
851 lose_fpu(1);
852
853
854 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
855 &fault_addr);
856 fcr31 = current->thread.fpu.fcr31;
857
858
859
860
861
862 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
863
864
865 own_fpu(1);
866 } else {
867 sig = SIGFPE;
868 fault_addr = (void __user *) regs->cp0_epc;
869 }
870
871
872 process_fpemu_return(sig, fault_addr, fcr31);
873
874out:
875 exception_exit(prev_state);
876}
877
878void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
879 const char *str)
880{
881 siginfo_t info;
882 char b[40];
883
884#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
885 if (kgdb_ll_trap(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
886 return;
887#endif
888
889 if (notify_die(DIE_TRAP, str, regs, code, regs_to_trapnr(regs),
890 SIGTRAP) == NOTIFY_STOP)
891 return;
892
893
894
895
896
897
898
899 switch (code) {
900 case BRK_OVERFLOW:
901 case BRK_DIVZERO:
902 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
903 die_if_kernel(b, regs);
904 if (code == BRK_DIVZERO)
905 info.si_code = FPE_INTDIV;
906 else
907 info.si_code = FPE_INTOVF;
908 info.si_signo = SIGFPE;
909 info.si_errno = 0;
910 info.si_addr = (void __user *) regs->cp0_epc;
911 force_sig_info(SIGFPE, &info, current);
912 break;
913 case BRK_BUG:
914 die_if_kernel("Kernel bug detected", regs);
915 force_sig(SIGTRAP, current);
916 break;
917 case BRK_MEMU:
918
919
920
921
922
923
924
925
926 if (do_dsemulret(regs))
927 return;
928
929 die_if_kernel("Math emu break/trap", regs);
930 force_sig(SIGTRAP, current);
931 break;
932 default:
933 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
934 die_if_kernel(b, regs);
935 force_sig(SIGTRAP, current);
936 }
937}
938
939asmlinkage void do_bp(struct pt_regs *regs)
940{
941 unsigned long epc = msk_isa16_mode(exception_epc(regs));
942 unsigned int opcode, bcode;
943 enum ctx_state prev_state;
944 mm_segment_t seg;
945
946 seg = get_fs();
947 if (!user_mode(regs))
948 set_fs(KERNEL_DS);
949
950 prev_state = exception_enter();
951 if (get_isa16_mode(regs->cp0_epc)) {
952 u16 instr[2];
953
954 if (__get_user(instr[0], (u16 __user *)epc))
955 goto out_sigsegv;
956
957 if (!cpu_has_mmips) {
958
959 bcode = (instr[0] >> 5) & 0x3f;
960 } else if (mm_insn_16bit(instr[0])) {
961
962 bcode = instr[0] & 0xf;
963 } else {
964
965 if (__get_user(instr[1], (u16 __user *)(epc + 2)))
966 goto out_sigsegv;
967 opcode = (instr[0] << 16) | instr[1];
968 bcode = (opcode >> 6) & ((1 << 20) - 1);
969 }
970 } else {
971 if (__get_user(opcode, (unsigned int __user *)epc))
972 goto out_sigsegv;
973 bcode = (opcode >> 6) & ((1 << 20) - 1);
974 }
975
976
977
978
979
980
981
982 if (bcode >= (1 << 10))
983 bcode = ((bcode & ((1 << 10) - 1)) << 10) | (bcode >> 10);
984
985
986
987
988
989 switch (bcode) {
990 case BRK_KPROBE_BP:
991 if (notify_die(DIE_BREAK, "debug", regs, bcode,
992 regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
993 goto out;
994 else
995 break;
996 case BRK_KPROBE_SSTEPBP:
997 if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
998 regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
999 goto out;
1000 else
1001 break;
1002 default:
1003 break;
1004 }
1005
1006 do_trap_or_bp(regs, bcode, "Break");
1007
1008out:
1009 set_fs(seg);
1010 exception_exit(prev_state);
1011 return;
1012
1013out_sigsegv:
1014 force_sig(SIGSEGV, current);
1015 goto out;
1016}
1017
1018asmlinkage void do_tr(struct pt_regs *regs)
1019{
1020 u32 opcode, tcode = 0;
1021 enum ctx_state prev_state;
1022 u16 instr[2];
1023 mm_segment_t seg;
1024 unsigned long epc = msk_isa16_mode(exception_epc(regs));
1025
1026 seg = get_fs();
1027 if (!user_mode(regs))
1028 set_fs(get_ds());
1029
1030 prev_state = exception_enter();
1031 if (get_isa16_mode(regs->cp0_epc)) {
1032 if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
1033 __get_user(instr[1], (u16 __user *)(epc + 2)))
1034 goto out_sigsegv;
1035 opcode = (instr[0] << 16) | instr[1];
1036
1037 if (!(opcode & OPCODE))
1038 tcode = (opcode >> 12) & ((1 << 4) - 1);
1039 } else {
1040 if (__get_user(opcode, (u32 __user *)epc))
1041 goto out_sigsegv;
1042
1043 if (!(opcode & OPCODE))
1044 tcode = (opcode >> 6) & ((1 << 10) - 1);
1045 }
1046
1047 do_trap_or_bp(regs, tcode, "Trap");
1048
1049out:
1050 set_fs(seg);
1051 exception_exit(prev_state);
1052 return;
1053
1054out_sigsegv:
1055 force_sig(SIGSEGV, current);
1056 goto out;
1057}
1058
1059asmlinkage void do_ri(struct pt_regs *regs)
1060{
1061 unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
1062 unsigned long old_epc = regs->cp0_epc;
1063 unsigned long old31 = regs->regs[31];
1064 enum ctx_state prev_state;
1065 unsigned int opcode = 0;
1066 int status = -1;
1067
1068
1069
1070
1071
1072 if (mipsr2_emulation && cpu_has_mips_r6 &&
1073 likely(user_mode(regs)) &&
1074 likely(get_user(opcode, epc) >= 0)) {
1075 unsigned long fcr31 = 0;
1076
1077 status = mipsr2_decoder(regs, opcode, &fcr31);
1078 switch (status) {
1079 case 0:
1080 case SIGEMT:
1081 task_thread_info(current)->r2_emul_return = 1;
1082 return;
1083 case SIGILL:
1084 goto no_r2_instr;
1085 default:
1086 process_fpemu_return(status,
1087 ¤t->thread.cp0_baduaddr,
1088 fcr31);
1089 task_thread_info(current)->r2_emul_return = 1;
1090 return;
1091 }
1092 }
1093
1094no_r2_instr:
1095
1096 prev_state = exception_enter();
1097
1098 if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs),
1099 SIGILL) == NOTIFY_STOP)
1100 goto out;
1101
1102 die_if_kernel("Reserved instruction in kernel code", regs);
1103
1104 if (unlikely(compute_return_epc(regs) < 0))
1105 goto out;
1106
1107 if (get_isa16_mode(regs->cp0_epc)) {
1108 unsigned short mmop[2] = { 0 };
1109
1110 if (unlikely(get_user(mmop[0], epc) < 0))
1111 status = SIGSEGV;
1112 if (unlikely(get_user(mmop[1], epc) < 0))
1113 status = SIGSEGV;
1114 opcode = (mmop[0] << 16) | mmop[1];
1115
1116 if (status < 0)
1117 status = simulate_rdhwr_mm(regs, opcode);
1118 } else {
1119 if (unlikely(get_user(opcode, epc) < 0))
1120 status = SIGSEGV;
1121
1122 if (!cpu_has_llsc && status < 0)
1123 status = simulate_llsc(regs, opcode);
1124
1125 if (status < 0)
1126 status = simulate_rdhwr_normal(regs, opcode);
1127
1128 if (status < 0)
1129 status = simulate_sync(regs, opcode);
1130
1131 if (status < 0)
1132 status = simulate_fp(regs, opcode, old_epc, old31);
1133 }
1134
1135 if (status < 0)
1136 status = SIGILL;
1137
1138 if (unlikely(status > 0)) {
1139 regs->cp0_epc = old_epc;
1140 regs->regs[31] = old31;
1141 force_sig(status, current);
1142 }
1143
1144out:
1145 exception_exit(prev_state);
1146}
1147
1148
1149
1150
1151
1152
1153static void mt_ase_fp_affinity(void)
1154{
1155#ifdef CONFIG_MIPS_MT_FPAFF
1156 if (mt_fpemul_threshold > 0 &&
1157 ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
1158
1159
1160
1161
1162
1163 if (cpumask_intersects(¤t->cpus_allowed, &mt_fpu_cpumask)) {
1164 cpumask_t tmask;
1165
1166 current->thread.user_cpus_allowed
1167 = current->cpus_allowed;
1168 cpumask_and(&tmask, ¤t->cpus_allowed,
1169 &mt_fpu_cpumask);
1170 set_cpus_allowed_ptr(current, &tmask);
1171 set_thread_flag(TIF_FPUBOUND);
1172 }
1173 }
1174#endif
1175}
1176
1177
1178
1179
1180static RAW_NOTIFIER_HEAD(cu2_chain);
1181
1182int __ref register_cu2_notifier(struct notifier_block *nb)
1183{
1184 return raw_notifier_chain_register(&cu2_chain, nb);
1185}
1186
1187int cu2_notifier_call_chain(unsigned long val, void *v)
1188{
1189 return raw_notifier_call_chain(&cu2_chain, val, v);
1190}
1191
1192static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
1193 void *data)
1194{
1195 struct pt_regs *regs = data;
1196
1197 die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
1198 "instruction", regs);
1199 force_sig(SIGILL, current);
1200
1201 return NOTIFY_OK;
1202}
1203
1204static int wait_on_fp_mode_switch(atomic_t *p)
1205{
1206
1207
1208
1209
1210
1211
1212 schedule();
1213 return 0;
1214}
1215
1216static int enable_restore_fp_context(int msa)
1217{
1218 int err, was_fpu_owner, prior_msa;
1219
1220
1221
1222
1223
1224 wait_on_atomic_t(¤t->mm->context.fp_mode_switching,
1225 wait_on_fp_mode_switch, TASK_KILLABLE);
1226
1227 if (!used_math()) {
1228
1229 preempt_disable();
1230 err = init_fpu();
1231 if (msa && !err) {
1232 enable_msa();
1233 _init_msa_upper();
1234 set_thread_flag(TIF_USEDMSA);
1235 set_thread_flag(TIF_MSA_CTX_LIVE);
1236 }
1237 preempt_enable();
1238 if (!err)
1239 set_used_math();
1240 return err;
1241 }
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270 if (!msa && !thread_msa_context_live())
1271 return own_fpu(1);
1272
1273
1274
1275
1276
1277 preempt_disable();
1278 was_fpu_owner = is_fpu_owner();
1279 err = own_fpu_inatomic(0);
1280 if (err)
1281 goto out;
1282
1283 enable_msa();
1284 write_msa_csr(current->thread.fpu.msacsr);
1285 set_thread_flag(TIF_USEDMSA);
1286
1287
1288
1289
1290
1291
1292
1293
1294 prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
1295 if (!prior_msa && was_fpu_owner) {
1296 _init_msa_upper();
1297
1298 goto out;
1299 }
1300
1301 if (!prior_msa) {
1302
1303
1304
1305
1306 _restore_fp(current);
1307
1308
1309
1310
1311
1312
1313 _init_msa_upper();
1314 } else {
1315
1316 restore_msa(current);
1317
1318
1319 if (!was_fpu_owner)
1320 write_32bit_cp1_register(CP1_STATUS,
1321 current->thread.fpu.fcr31);
1322 }
1323
1324out:
1325 preempt_enable();
1326
1327 return 0;
1328}
1329
1330asmlinkage void do_cpu(struct pt_regs *regs)
1331{
1332 enum ctx_state prev_state;
1333 unsigned int __user *epc;
1334 unsigned long old_epc, old31;
1335 void __user *fault_addr;
1336 unsigned int opcode;
1337 unsigned long fcr31;
1338 unsigned int cpid;
1339 int status, err;
1340 unsigned long __maybe_unused flags;
1341 int sig;
1342
1343 prev_state = exception_enter();
1344 cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
1345
1346 if (cpid != 2)
1347 die_if_kernel("do_cpu invoked from kernel context!", regs);
1348
1349 switch (cpid) {
1350 case 0:
1351 epc = (unsigned int __user *)exception_epc(regs);
1352 old_epc = regs->cp0_epc;
1353 old31 = regs->regs[31];
1354 opcode = 0;
1355 status = -1;
1356
1357 if (unlikely(compute_return_epc(regs) < 0))
1358 break;
1359
1360 if (get_isa16_mode(regs->cp0_epc)) {
1361 unsigned short mmop[2] = { 0 };
1362
1363 if (unlikely(get_user(mmop[0], epc) < 0))
1364 status = SIGSEGV;
1365 if (unlikely(get_user(mmop[1], epc) < 0))
1366 status = SIGSEGV;
1367 opcode = (mmop[0] << 16) | mmop[1];
1368
1369 if (status < 0)
1370 status = simulate_rdhwr_mm(regs, opcode);
1371 } else {
1372 if (unlikely(get_user(opcode, epc) < 0))
1373 status = SIGSEGV;
1374
1375 if (!cpu_has_llsc && status < 0)
1376 status = simulate_llsc(regs, opcode);
1377
1378 if (status < 0)
1379 status = simulate_rdhwr_normal(regs, opcode);
1380 }
1381
1382 if (status < 0)
1383 status = SIGILL;
1384
1385 if (unlikely(status > 0)) {
1386 regs->cp0_epc = old_epc;
1387 regs->regs[31] = old31;
1388 force_sig(status, current);
1389 }
1390
1391 break;
1392
1393 case 3:
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406 if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) {
1407 force_sig(SIGILL, current);
1408 break;
1409 }
1410
1411
1412 case 1:
1413 err = enable_restore_fp_context(0);
1414
1415 if (raw_cpu_has_fpu && !err)
1416 break;
1417
1418 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0,
1419 &fault_addr);
1420 fcr31 = current->thread.fpu.fcr31;
1421
1422
1423
1424
1425
1426 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
1427
1428
1429 if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
1430 mt_ase_fp_affinity();
1431
1432 break;
1433
1434 case 2:
1435 raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
1436 break;
1437 }
1438
1439 exception_exit(prev_state);
1440}
1441
1442asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr)
1443{
1444 enum ctx_state prev_state;
1445
1446 prev_state = exception_enter();
1447 if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
1448 regs_to_trapnr(regs), SIGFPE) == NOTIFY_STOP)
1449 goto out;
1450
1451
1452 write_msa_csr(msacsr & ~MSA_CSR_CAUSEF);
1453 local_irq_enable();
1454
1455 die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
1456 force_sig(SIGFPE, current);
1457out:
1458 exception_exit(prev_state);
1459}
1460
1461asmlinkage void do_msa(struct pt_regs *regs)
1462{
1463 enum ctx_state prev_state;
1464 int err;
1465
1466 prev_state = exception_enter();
1467
1468 if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
1469 force_sig(SIGILL, current);
1470 goto out;
1471 }
1472
1473 die_if_kernel("do_msa invoked from kernel context!", regs);
1474
1475 err = enable_restore_fp_context(1);
1476 if (err)
1477 force_sig(SIGILL, current);
1478out:
1479 exception_exit(prev_state);
1480}
1481
1482asmlinkage void do_mdmx(struct pt_regs *regs)
1483{
1484 enum ctx_state prev_state;
1485
1486 prev_state = exception_enter();
1487 force_sig(SIGILL, current);
1488 exception_exit(prev_state);
1489}
1490
1491
1492
1493
1494asmlinkage void do_watch(struct pt_regs *regs)
1495{
1496 enum ctx_state prev_state;
1497 u32 cause;
1498
1499 prev_state = exception_enter();
1500
1501
1502
1503
1504 cause = read_c0_cause();
1505 cause &= ~(1 << 22);
1506 write_c0_cause(cause);
1507
1508
1509
1510
1511
1512
1513 if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
1514 mips_read_watch_registers();
1515 local_irq_enable();
1516 force_sig(SIGTRAP, current);
1517 } else {
1518 mips_clear_watch_registers();
1519 local_irq_enable();
1520 }
1521 exception_exit(prev_state);
1522}
1523
1524asmlinkage void do_mcheck(struct pt_regs *regs)
1525{
1526 const int field = 2 * sizeof(unsigned long);
1527 int multi_match = regs->cp0_status & ST0_TS;
1528 enum ctx_state prev_state;
1529 mm_segment_t old_fs = get_fs();
1530
1531 prev_state = exception_enter();
1532 show_regs(regs);
1533
1534 if (multi_match) {
1535 pr_err("Index : %0x\n", read_c0_index());
1536 pr_err("Pagemask: %0x\n", read_c0_pagemask());
1537 pr_err("EntryHi : %0*lx\n", field, read_c0_entryhi());
1538 pr_err("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
1539 pr_err("EntryLo1: %0*lx\n", field, read_c0_entrylo1());
1540 pr_err("Wired : %0x\n", read_c0_wired());
1541 pr_err("Pagegrain: %0x\n", read_c0_pagegrain());
1542 if (cpu_has_htw) {
1543 pr_err("PWField : %0*lx\n", field, read_c0_pwfield());
1544 pr_err("PWSize : %0*lx\n", field, read_c0_pwsize());
1545 pr_err("PWCtl : %0x\n", read_c0_pwctl());
1546 }
1547 pr_err("\n");
1548 dump_tlb_all();
1549 }
1550
1551 if (!user_mode(regs))
1552 set_fs(KERNEL_DS);
1553
1554 show_code((unsigned int __user *) regs->cp0_epc);
1555
1556 set_fs(old_fs);
1557
1558
1559
1560
1561
1562 panic("Caught Machine Check exception - %scaused by multiple "
1563 "matching entries in the TLB.",
1564 (multi_match) ? "" : "not ");
1565}
1566
1567asmlinkage void do_mt(struct pt_regs *regs)
1568{
1569 int subcode;
1570
1571 subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
1572 >> VPECONTROL_EXCPT_SHIFT;
1573 switch (subcode) {
1574 case 0:
1575 printk(KERN_DEBUG "Thread Underflow\n");
1576 break;
1577 case 1:
1578 printk(KERN_DEBUG "Thread Overflow\n");
1579 break;
1580 case 2:
1581 printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
1582 break;
1583 case 3:
1584 printk(KERN_DEBUG "Gating Storage Exception\n");
1585 break;
1586 case 4:
1587 printk(KERN_DEBUG "YIELD Scheduler Exception\n");
1588 break;
1589 case 5:
1590 printk(KERN_DEBUG "Gating Storage Scheduler Exception\n");
1591 break;
1592 default:
1593 printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
1594 subcode);
1595 break;
1596 }
1597 die_if_kernel("MIPS MT Thread exception in kernel", regs);
1598
1599 force_sig(SIGILL, current);
1600}
1601
1602
1603asmlinkage void do_dsp(struct pt_regs *regs)
1604{
1605 if (cpu_has_dsp)
1606 panic("Unexpected DSP exception");
1607
1608 force_sig(SIGILL, current);
1609}
1610
1611asmlinkage void do_reserved(struct pt_regs *regs)
1612{
1613
1614
1615
1616
1617
1618 show_regs(regs);
1619 panic("Caught reserved exception %ld - should not happen.",
1620 (regs->cp0_cause & 0x7f) >> 2);
1621}
1622
1623static int __initdata l1parity = 1;
1624static int __init nol1parity(char *s)
1625{
1626 l1parity = 0;
1627 return 1;
1628}
1629__setup("nol1par", nol1parity);
1630static int __initdata l2parity = 1;
1631static int __init nol2parity(char *s)
1632{
1633 l2parity = 0;
1634 return 1;
1635}
1636__setup("nol2par", nol2parity);
1637
1638
1639
1640
1641
1642static inline void parity_protection_init(void)
1643{
1644 switch (current_cpu_type()) {
1645 case CPU_24K:
1646 case CPU_34K:
1647 case CPU_74K:
1648 case CPU_1004K:
1649 case CPU_1074K:
1650 case CPU_INTERAPTIV:
1651 case CPU_PROAPTIV:
1652 case CPU_P5600:
1653 case CPU_QEMU_GENERIC:
1654 {
1655#define ERRCTL_PE 0x80000000
1656#define ERRCTL_L2P 0x00800000
1657 unsigned long errctl;
1658 unsigned int l1parity_present, l2parity_present;
1659
1660 errctl = read_c0_ecc();
1661 errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
1662
1663
1664 write_c0_ecc(errctl | ERRCTL_PE);
1665 back_to_back_c0_hazard();
1666 l1parity_present = (read_c0_ecc() & ERRCTL_PE);
1667
1668
1669 write_c0_ecc(errctl|ERRCTL_L2P);
1670 back_to_back_c0_hazard();
1671 l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
1672
1673 if (l1parity_present && l2parity_present) {
1674 if (l1parity)
1675 errctl |= ERRCTL_PE;
1676 if (l1parity ^ l2parity)
1677 errctl |= ERRCTL_L2P;
1678 } else if (l1parity_present) {
1679 if (l1parity)
1680 errctl |= ERRCTL_PE;
1681 } else if (l2parity_present) {
1682 if (l2parity)
1683 errctl |= ERRCTL_L2P;
1684 } else {
1685
1686 }
1687
1688 printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
1689
1690 write_c0_ecc(errctl);
1691 back_to_back_c0_hazard();
1692 errctl = read_c0_ecc();
1693 printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
1694
1695 if (l1parity_present)
1696 printk(KERN_INFO "Cache parity protection %sabled\n",
1697 (errctl & ERRCTL_PE) ? "en" : "dis");
1698
1699 if (l2parity_present) {
1700 if (l1parity_present && l1parity)
1701 errctl ^= ERRCTL_L2P;
1702 printk(KERN_INFO "L2 cache parity protection %sabled\n",
1703 (errctl & ERRCTL_L2P) ? "en" : "dis");
1704 }
1705 }
1706 break;
1707
1708 case CPU_5KC:
1709 case CPU_5KE:
1710 case CPU_LOONGSON1:
1711 write_c0_ecc(0x80000000);
1712 back_to_back_c0_hazard();
1713
1714 printk(KERN_INFO "Cache parity protection %sabled\n",
1715 (read_c0_ecc() & 0x80000000) ? "en" : "dis");
1716 break;
1717 case CPU_20KC:
1718 case CPU_25KF:
1719
1720 printk(KERN_INFO "Enable cache parity protection for "
1721 "MIPS 20KC/25KF CPUs.\n");
1722 clear_c0_status(ST0_DE);
1723 break;
1724 default:
1725 break;
1726 }
1727}
1728
1729asmlinkage void cache_parity_error(void)
1730{
1731 const int field = 2 * sizeof(unsigned long);
1732 unsigned int reg_val;
1733
1734
1735 printk("Cache error exception:\n");
1736 printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1737 reg_val = read_c0_cacheerr();
1738 printk("c0_cacheerr == %08x\n", reg_val);
1739
1740 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1741 reg_val & (1<<30) ? "secondary" : "primary",
1742 reg_val & (1<<31) ? "data" : "insn");
1743 if ((cpu_has_mips_r2_r6) &&
1744 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1745 pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
1746 reg_val & (1<<29) ? "ED " : "",
1747 reg_val & (1<<28) ? "ET " : "",
1748 reg_val & (1<<27) ? "ES " : "",
1749 reg_val & (1<<26) ? "EE " : "",
1750 reg_val & (1<<25) ? "EB " : "",
1751 reg_val & (1<<24) ? "EI " : "",
1752 reg_val & (1<<23) ? "E1 " : "",
1753 reg_val & (1<<22) ? "E0 " : "");
1754 } else {
1755 pr_err("Error bits: %s%s%s%s%s%s%s\n",
1756 reg_val & (1<<29) ? "ED " : "",
1757 reg_val & (1<<28) ? "ET " : "",
1758 reg_val & (1<<26) ? "EE " : "",
1759 reg_val & (1<<25) ? "EB " : "",
1760 reg_val & (1<<24) ? "EI " : "",
1761 reg_val & (1<<23) ? "E1 " : "",
1762 reg_val & (1<<22) ? "E0 " : "");
1763 }
1764 printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
1765
1766#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1767 if (reg_val & (1<<22))
1768 printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
1769
1770 if (reg_val & (1<<23))
1771 printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
1772#endif
1773
1774 panic("Can't handle the cache error!");
1775}
1776
1777asmlinkage void do_ftlb(void)
1778{
1779 const int field = 2 * sizeof(unsigned long);
1780 unsigned int reg_val;
1781
1782
1783 if ((cpu_has_mips_r2_r6) &&
1784 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1785 pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
1786 read_c0_ecc());
1787 pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1788 reg_val = read_c0_cacheerr();
1789 pr_err("c0_cacheerr == %08x\n", reg_val);
1790
1791 if ((reg_val & 0xc0000000) == 0xc0000000) {
1792 pr_err("Decoded c0_cacheerr: FTLB parity error\n");
1793 } else {
1794 pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1795 reg_val & (1<<30) ? "secondary" : "primary",
1796 reg_val & (1<<31) ? "data" : "insn");
1797 }
1798 } else {
1799 pr_err("FTLB error exception\n");
1800 }
1801
1802 cache_parity_error();
1803}
1804
1805
1806
1807
1808
1809void ejtag_exception_handler(struct pt_regs *regs)
1810{
1811 const int field = 2 * sizeof(unsigned long);
1812 unsigned long depc, old_epc, old_ra;
1813 unsigned int debug;
1814
1815 printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
1816 depc = read_c0_depc();
1817 debug = read_c0_debug();
1818 printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
1819 if (debug & 0x80000000) {
1820
1821
1822
1823
1824
1825
1826 old_epc = regs->cp0_epc;
1827 old_ra = regs->regs[31];
1828 regs->cp0_epc = depc;
1829 compute_return_epc(regs);
1830 depc = regs->cp0_epc;
1831 regs->cp0_epc = old_epc;
1832 regs->regs[31] = old_ra;
1833 } else
1834 depc += 4;
1835 write_c0_depc(depc);
1836
1837#if 0
1838 printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
1839 write_c0_debug(debug | 0x100);
1840#endif
1841}
1842
1843
1844
1845
1846
1847static RAW_NOTIFIER_HEAD(nmi_chain);
1848
1849int register_nmi_notifier(struct notifier_block *nb)
1850{
1851 return raw_notifier_chain_register(&nmi_chain, nb);
1852}
1853
1854void __noreturn nmi_exception_handler(struct pt_regs *regs)
1855{
1856 char str[100];
1857
1858 raw_notifier_call_chain(&nmi_chain, 0, regs);
1859 bust_spinlocks(1);
1860 snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
1861 smp_processor_id(), regs->cp0_epc);
1862 regs->cp0_epc = read_c0_errorepc();
1863 die(str, regs);
1864}
1865
1866#define VECTORSPACING 0x100
1867
1868unsigned long ebase;
1869unsigned long exception_handlers[32];
1870unsigned long vi_handlers[64];
1871
1872void __init *set_except_vector(int n, void *addr)
1873{
1874 unsigned long handler = (unsigned long) addr;
1875 unsigned long old_handler;
1876
1877#ifdef CONFIG_CPU_MICROMIPS
1878
1879
1880
1881
1882
1883
1884
1885 if (!(handler & 0x1))
1886 handler |= 1;
1887#endif
1888 old_handler = xchg(&exception_handlers[n], handler);
1889
1890 if (n == 0 && cpu_has_divec) {
1891#ifdef CONFIG_CPU_MICROMIPS
1892 unsigned long jump_mask = ~((1 << 27) - 1);
1893#else
1894 unsigned long jump_mask = ~((1 << 28) - 1);
1895#endif
1896 u32 *buf = (u32 *)(ebase + 0x200);
1897 unsigned int k0 = 26;
1898 if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
1899 uasm_i_j(&buf, handler & ~jump_mask);
1900 uasm_i_nop(&buf);
1901 } else {
1902 UASM_i_LA(&buf, k0, handler);
1903 uasm_i_jr(&buf, k0);
1904 uasm_i_nop(&buf);
1905 }
1906 local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
1907 }
1908 return (void *)old_handler;
1909}
1910
1911static void do_default_vi(void)
1912{
1913 show_regs(get_irq_regs());
1914 panic("Caught unexpected vectored interrupt.");
1915}
1916
1917static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1918{
1919 unsigned long handler;
1920 unsigned long old_handler = vi_handlers[n];
1921 int srssets = current_cpu_data.srsets;
1922 u16 *h;
1923 unsigned char *b;
1924
1925 BUG_ON(!cpu_has_veic && !cpu_has_vint);
1926
1927 if (addr == NULL) {
1928 handler = (unsigned long) do_default_vi;
1929 srs = 0;
1930 } else
1931 handler = (unsigned long) addr;
1932 vi_handlers[n] = handler;
1933
1934 b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
1935
1936 if (srs >= srssets)
1937 panic("Shadow register set %d not supported", srs);
1938
1939 if (cpu_has_veic) {
1940 if (board_bind_eic_interrupt)
1941 board_bind_eic_interrupt(n, srs);
1942 } else if (cpu_has_vint) {
1943
1944 if (srssets > 1)
1945 change_c0_srsmap(0xf << n*4, srs << n*4);
1946 }
1947
1948 if (srs == 0) {
1949
1950
1951
1952
1953 extern char except_vec_vi, except_vec_vi_lui;
1954 extern char except_vec_vi_ori, except_vec_vi_end;
1955 extern char rollback_except_vec_vi;
1956 char *vec_start = using_rollback_handler() ?
1957 &rollback_except_vec_vi : &except_vec_vi;
1958#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
1959 const int lui_offset = &except_vec_vi_lui - vec_start + 2;
1960 const int ori_offset = &except_vec_vi_ori - vec_start + 2;
1961#else
1962 const int lui_offset = &except_vec_vi_lui - vec_start;
1963 const int ori_offset = &except_vec_vi_ori - vec_start;
1964#endif
1965 const int handler_len = &except_vec_vi_end - vec_start;
1966
1967 if (handler_len > VECTORSPACING) {
1968
1969
1970
1971
1972 panic("VECTORSPACING too small");
1973 }
1974
1975 set_handler(((unsigned long)b - ebase), vec_start,
1976#ifdef CONFIG_CPU_MICROMIPS
1977 (handler_len - 1));
1978#else
1979 handler_len);
1980#endif
1981 h = (u16 *)(b + lui_offset);
1982 *h = (handler >> 16) & 0xffff;
1983 h = (u16 *)(b + ori_offset);
1984 *h = (handler & 0xffff);
1985 local_flush_icache_range((unsigned long)b,
1986 (unsigned long)(b+handler_len));
1987 }
1988 else {
1989
1990
1991
1992
1993
1994 u32 insn;
1995
1996 h = (u16 *)b;
1997
1998#ifdef CONFIG_CPU_MICROMIPS
1999 insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
2000#else
2001 insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
2002#endif
2003 h[0] = (insn >> 16) & 0xffff;
2004 h[1] = insn & 0xffff;
2005 h[2] = 0;
2006 h[3] = 0;
2007 local_flush_icache_range((unsigned long)b,
2008 (unsigned long)(b+8));
2009 }
2010
2011 return (void *)old_handler;
2012}
2013
2014void *set_vi_handler(int n, vi_handler_t addr)
2015{
2016 return set_vi_srs_handler(n, addr, 0);
2017}
2018
2019extern void tlb_init(void);
2020
2021
2022
2023
2024int cp0_compare_irq;
2025EXPORT_SYMBOL_GPL(cp0_compare_irq);
2026int cp0_compare_irq_shift;
2027
2028
2029
2030
2031int cp0_perfcount_irq;
2032EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
2033
2034
2035
2036
2037int cp0_fdc_irq;
2038EXPORT_SYMBOL_GPL(cp0_fdc_irq);
2039
2040static int noulri;
2041
2042static int __init ulri_disable(char *s)
2043{
2044 pr_info("Disabling ulri\n");
2045 noulri = 1;
2046
2047 return 1;
2048}
2049__setup("noulri", ulri_disable);
2050
2051
2052static void configure_status(void)
2053{
2054
2055
2056
2057
2058
2059
2060 unsigned int status_set = ST0_CU0;
2061#ifdef CONFIG_64BIT
2062 status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
2063#endif
2064 if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
2065 status_set |= ST0_XX;
2066 if (cpu_has_dsp)
2067 status_set |= ST0_MX;
2068
2069 change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
2070 status_set);
2071}
2072
2073
2074static void configure_hwrena(void)
2075{
2076 unsigned int hwrena = cpu_hwrena_impl_bits;
2077
2078 if (cpu_has_mips_r2_r6)
2079 hwrena |= 0x0000000f;
2080
2081 if (!noulri && cpu_has_userlocal)
2082 hwrena |= (1 << 29);
2083
2084 if (hwrena)
2085 write_c0_hwrena(hwrena);
2086}
2087
2088static void configure_exception_vector(void)
2089{
2090 if (cpu_has_veic || cpu_has_vint) {
2091 unsigned long sr = set_c0_status(ST0_BEV);
2092 write_c0_ebase(ebase);
2093 write_c0_status(sr);
2094
2095 change_c0_intctl(0x3e0, VECTORSPACING);
2096 }
2097 if (cpu_has_divec) {
2098 if (cpu_has_mipsmt) {
2099 unsigned int vpflags = dvpe();
2100 set_c0_cause(CAUSEF_IV);
2101 evpe(vpflags);
2102 } else
2103 set_c0_cause(CAUSEF_IV);
2104 }
2105}
2106
2107void per_cpu_trap_init(bool is_boot_cpu)
2108{
2109 unsigned int cpu = smp_processor_id();
2110
2111 configure_status();
2112 configure_hwrena();
2113
2114 configure_exception_vector();
2115
2116
2117
2118
2119
2120
2121
2122
2123 if (cpu_has_mips_r2_r6) {
2124 cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
2125 cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
2126 cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
2127 cp0_fdc_irq = (read_c0_intctl() >> INTCTLB_IPFDC) & 7;
2128 if (!cp0_fdc_irq)
2129 cp0_fdc_irq = -1;
2130
2131 } else {
2132 cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
2133 cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
2134 cp0_perfcount_irq = -1;
2135 cp0_fdc_irq = -1;
2136 }
2137
2138 if (!cpu_data[cpu].asid_cache)
2139 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
2140
2141 atomic_inc(&init_mm.mm_count);
2142 current->active_mm = &init_mm;
2143 BUG_ON(current->mm);
2144 enter_lazy_tlb(&init_mm, current);
2145
2146
2147 if (!is_boot_cpu)
2148 cpu_cache_init();
2149 tlb_init();
2150 TLBMISS_HANDLER_SETUP();
2151}
2152
2153
2154void set_handler(unsigned long offset, void *addr, unsigned long size)
2155{
2156#ifdef CONFIG_CPU_MICROMIPS
2157 memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
2158#else
2159 memcpy((void *)(ebase + offset), addr, size);
2160#endif
2161 local_flush_icache_range(ebase + offset, ebase + offset + size);
2162}
2163
2164static char panic_null_cerr[] =
2165 "Trying to set NULL cache error exception handler";
2166
2167
2168
2169
2170
2171
2172void set_uncached_handler(unsigned long offset, void *addr,
2173 unsigned long size)
2174{
2175 unsigned long uncached_ebase = CKSEG1ADDR(ebase);
2176
2177 if (!addr)
2178 panic(panic_null_cerr);
2179
2180 memcpy((void *)(uncached_ebase + offset), addr, size);
2181}
2182
2183static int __initdata rdhwr_noopt;
2184static int __init set_rdhwr_noopt(char *str)
2185{
2186 rdhwr_noopt = 1;
2187 return 1;
2188}
2189
2190__setup("rdhwr_noopt", set_rdhwr_noopt);
2191
2192void __init trap_init(void)
2193{
2194 extern char except_vec3_generic;
2195 extern char except_vec4;
2196 extern char except_vec3_r4000;
2197 unsigned long i;
2198
2199 check_wait();
2200
2201 if (cpu_has_veic || cpu_has_vint) {
2202 unsigned long size = 0x200 + VECTORSPACING*64;
2203 ebase = (unsigned long)
2204 __alloc_bootmem(size, 1 << fls(size), 0);
2205 } else {
2206#ifdef CONFIG_KVM_GUEST
2207#define KVM_GUEST_KSEG0 0x40000000
2208 ebase = KVM_GUEST_KSEG0;
2209#else
2210 ebase = CKSEG0;
2211#endif
2212 if (cpu_has_mips_r2_r6)
2213 ebase += (read_c0_ebase() & 0x3ffff000);
2214 }
2215
2216 if (cpu_has_mmips) {
2217 unsigned int config3 = read_c0_config3();
2218
2219 if (IS_ENABLED(CONFIG_CPU_MICROMIPS))
2220 write_c0_config3(config3 | MIPS_CONF3_ISA_OE);
2221 else
2222 write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE);
2223 }
2224
2225 if (board_ebase_setup)
2226 board_ebase_setup();
2227 per_cpu_trap_init(true);
2228
2229
2230
2231
2232
2233
2234 set_handler(0x180, &except_vec3_generic, 0x80);
2235
2236
2237
2238
2239 for (i = 0; i <= 31; i++)
2240 set_except_vector(i, handle_reserved);
2241
2242
2243
2244
2245
2246 if (cpu_has_ejtag && board_ejtag_handler_setup)
2247 board_ejtag_handler_setup();
2248
2249
2250
2251
2252 if (cpu_has_watch)
2253 set_except_vector(23, handle_watch);
2254
2255
2256
2257
2258 if (cpu_has_veic || cpu_has_vint) {
2259 int nvec = cpu_has_veic ? 64 : 8;
2260 for (i = 0; i < nvec; i++)
2261 set_vi_handler(i, NULL);
2262 }
2263 else if (cpu_has_divec)
2264 set_handler(0x200, &except_vec4, 0x8);
2265
2266
2267
2268
2269
2270 parity_protection_init();
2271
2272
2273
2274
2275
2276
2277 if (board_be_init)
2278 board_be_init();
2279
2280 set_except_vector(0, using_rollback_handler() ? rollback_handle_int
2281 : handle_int);
2282 set_except_vector(1, handle_tlbm);
2283 set_except_vector(2, handle_tlbl);
2284 set_except_vector(3, handle_tlbs);
2285
2286 set_except_vector(4, handle_adel);
2287 set_except_vector(5, handle_ades);
2288
2289 set_except_vector(6, handle_ibe);
2290 set_except_vector(7, handle_dbe);
2291
2292 set_except_vector(8, handle_sys);
2293 set_except_vector(9, handle_bp);
2294 set_except_vector(10, rdhwr_noopt ? handle_ri :
2295 (cpu_has_vtag_icache ?
2296 handle_ri_rdhwr_vivt : handle_ri_rdhwr));
2297 set_except_vector(11, handle_cpu);
2298 set_except_vector(12, handle_ov);
2299 set_except_vector(13, handle_tr);
2300 set_except_vector(14, handle_msa_fpe);
2301
2302 if (current_cpu_type() == CPU_R6000 ||
2303 current_cpu_type() == CPU_R6000A) {
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314 }
2315
2316
2317 if (board_nmi_handler_setup)
2318 board_nmi_handler_setup();
2319
2320 if (cpu_has_fpu && !cpu_has_nofpuex)
2321 set_except_vector(15, handle_fpe);
2322
2323 set_except_vector(16, handle_ftlb);
2324
2325 if (cpu_has_rixiex) {
2326 set_except_vector(19, tlb_do_page_fault_0);
2327 set_except_vector(20, tlb_do_page_fault_0);
2328 }
2329
2330 set_except_vector(21, handle_msa);
2331 set_except_vector(22, handle_mdmx);
2332
2333 if (cpu_has_mcheck)
2334 set_except_vector(24, handle_mcheck);
2335
2336 if (cpu_has_mipsmt)
2337 set_except_vector(25, handle_mt);
2338
2339 set_except_vector(26, handle_dsp);
2340
2341 if (board_cache_error_setup)
2342 board_cache_error_setup();
2343
2344 if (cpu_has_vce)
2345
2346 set_handler(0x180, &except_vec3_r4000, 0x100);
2347 else if (cpu_has_4kex)
2348 set_handler(0x180, &except_vec3_generic, 0x80);
2349 else
2350 set_handler(0x080, &except_vec3_generic, 0x80);
2351
2352 local_flush_icache_range(ebase, ebase + 0x400);
2353
2354 sort_extable(__start___dbe_table, __stop___dbe_table);
2355
2356 cu2_notifier(default_cu2_call, 0x80000000);
2357}
2358
2359static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd,
2360 void *v)
2361{
2362 switch (cmd) {
2363 case CPU_PM_ENTER_FAILED:
2364 case CPU_PM_EXIT:
2365 configure_status();
2366 configure_hwrena();
2367 configure_exception_vector();
2368
2369
2370 TLBMISS_HANDLER_RESTORE();
2371
2372 break;
2373 }
2374
2375 return NOTIFY_OK;
2376}
2377
2378static struct notifier_block trap_pm_notifier_block = {
2379 .notifier_call = trap_pm_notifier,
2380};
2381
2382static int __init trap_pm_init(void)
2383{
2384 return cpu_pm_register_notifier(&trap_pm_notifier_block);
2385}
2386arch_initcall(trap_pm_init);
2387