1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/bitops.h>
16#include <linux/bug.h>
17#include <linux/compiler.h>
18#include <linux/context_tracking.h>
19#include <linux/cpu_pm.h>
20#include <linux/kexec.h>
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/module.h>
24#include <linux/extable.h>
25#include <linux/mm.h>
26#include <linux/sched/mm.h>
27#include <linux/sched/debug.h>
28#include <linux/smp.h>
29#include <linux/spinlock.h>
30#include <linux/kallsyms.h>
31#include <linux/bootmem.h>
32#include <linux/interrupt.h>
33#include <linux/ptrace.h>
34#include <linux/kgdb.h>
35#include <linux/kdebug.h>
36#include <linux/kprobes.h>
37#include <linux/notifier.h>
38#include <linux/kdb.h>
39#include <linux/irq.h>
40#include <linux/perf_event.h>
41
42#include <asm/addrspace.h>
43#include <asm/bootinfo.h>
44#include <asm/branch.h>
45#include <asm/break.h>
46#include <asm/cop2.h>
47#include <asm/cpu.h>
48#include <asm/cpu-type.h>
49#include <asm/dsp.h>
50#include <asm/fpu.h>
51#include <asm/fpu_emulator.h>
52#include <asm/idle.h>
53#include <asm/mips-cps.h>
54#include <asm/mips-r2-to-r6-emul.h>
55#include <asm/mipsregs.h>
56#include <asm/mipsmtregs.h>
57#include <asm/module.h>
58#include <asm/msa.h>
59#include <asm/pgtable.h>
60#include <asm/ptrace.h>
61#include <asm/sections.h>
62#include <asm/siginfo.h>
63#include <asm/tlbdebug.h>
64#include <asm/traps.h>
65#include <linux/uaccess.h>
66#include <asm/watch.h>
67#include <asm/mmu_context.h>
68#include <asm/types.h>
69#include <asm/stacktrace.h>
70#include <asm/uasm.h>
71
72extern void check_wait(void);
73extern asmlinkage void rollback_handle_int(void);
74extern asmlinkage void handle_int(void);
75extern u32 handle_tlbl[];
76extern u32 handle_tlbs[];
77extern u32 handle_tlbm[];
78extern asmlinkage void handle_adel(void);
79extern asmlinkage void handle_ades(void);
80extern asmlinkage void handle_ibe(void);
81extern asmlinkage void handle_dbe(void);
82extern asmlinkage void handle_sys(void);
83extern asmlinkage void handle_bp(void);
84extern asmlinkage void handle_ri(void);
85extern asmlinkage void handle_ri_rdhwr_tlbp(void);
86extern asmlinkage void handle_ri_rdhwr(void);
87extern asmlinkage void handle_cpu(void);
88extern asmlinkage void handle_ov(void);
89extern asmlinkage void handle_tr(void);
90extern asmlinkage void handle_msa_fpe(void);
91extern asmlinkage void handle_fpe(void);
92extern asmlinkage void handle_ftlb(void);
93extern asmlinkage void handle_msa(void);
94extern asmlinkage void handle_mdmx(void);
95extern asmlinkage void handle_watch(void);
96extern asmlinkage void handle_mt(void);
97extern asmlinkage void handle_dsp(void);
98extern asmlinkage void handle_mcheck(void);
99extern asmlinkage void handle_reserved(void);
100extern void tlb_do_page_fault_0(void);
101
102void (*board_be_init)(void);
103int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
104void (*board_nmi_handler_setup)(void);
105void (*board_ejtag_handler_setup)(void);
106void (*board_bind_eic_interrupt)(int irq, int regset);
107void (*board_ebase_setup)(void);
108void(*board_cache_error_setup)(void);
109
110static void show_raw_backtrace(unsigned long reg29)
111{
112 unsigned long *sp = (unsigned long *)(reg29 & ~3);
113 unsigned long addr;
114
115 printk("Call Trace:");
116#ifdef CONFIG_KALLSYMS
117 printk("\n");
118#endif
119 while (!kstack_end(sp)) {
120 unsigned long __user *p =
121 (unsigned long __user *)(unsigned long)sp++;
122 if (__get_user(addr, p)) {
123 printk(" (Bad stack address)");
124 break;
125 }
126 if (__kernel_text_address(addr))
127 print_ip_sym(addr);
128 }
129 printk("\n");
130}
131
132#ifdef CONFIG_KALLSYMS
133int raw_show_trace;
134static int __init set_raw_show_trace(char *str)
135{
136 raw_show_trace = 1;
137 return 1;
138}
139__setup("raw_show_trace", set_raw_show_trace);
140#endif
141
142static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
143{
144 unsigned long sp = regs->regs[29];
145 unsigned long ra = regs->regs[31];
146 unsigned long pc = regs->cp0_epc;
147
148 if (!task)
149 task = current;
150
151 if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) {
152 show_raw_backtrace(sp);
153 return;
154 }
155 printk("Call Trace:\n");
156 do {
157 print_ip_sym(pc);
158 pc = unwind_stack(task, &sp, pc, &ra);
159 } while (pc);
160 pr_cont("\n");
161}
162
163
164
165
166
167static void show_stacktrace(struct task_struct *task,
168 const struct pt_regs *regs)
169{
170 const int field = 2 * sizeof(unsigned long);
171 long stackdata;
172 int i;
173 unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
174
175 printk("Stack :");
176 i = 0;
177 while ((unsigned long) sp & (PAGE_SIZE - 1)) {
178 if (i && ((i % (64 / field)) == 0)) {
179 pr_cont("\n");
180 printk(" ");
181 }
182 if (i > 39) {
183 pr_cont(" ...");
184 break;
185 }
186
187 if (__get_user(stackdata, sp++)) {
188 pr_cont(" (Bad stack address)");
189 break;
190 }
191
192 pr_cont(" %0*lx", field, stackdata);
193 i++;
194 }
195 pr_cont("\n");
196 show_backtrace(task, regs);
197}
198
199void show_stack(struct task_struct *task, unsigned long *sp)
200{
201 struct pt_regs regs;
202 mm_segment_t old_fs = get_fs();
203
204 regs.cp0_status = KSU_KERNEL;
205 if (sp) {
206 regs.regs[29] = (unsigned long)sp;
207 regs.regs[31] = 0;
208 regs.cp0_epc = 0;
209 } else {
210 if (task && task != current) {
211 regs.regs[29] = task->thread.reg29;
212 regs.regs[31] = 0;
213 regs.cp0_epc = task->thread.reg31;
214#ifdef CONFIG_KGDB_KDB
215 } else if (atomic_read(&kgdb_active) != -1 &&
216 kdb_current_regs) {
217 memcpy(®s, kdb_current_regs, sizeof(regs));
218#endif
219 } else {
220 prepare_frametrace(®s);
221 }
222 }
223
224
225
226
227 set_fs(KERNEL_DS);
228 show_stacktrace(task, ®s);
229 set_fs(old_fs);
230}
231
232static void show_code(unsigned int __user *pc)
233{
234 long i;
235 unsigned short __user *pc16 = NULL;
236
237 printk("Code:");
238
239 if ((unsigned long)pc & 1)
240 pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
241 for(i = -3 ; i < 6 ; i++) {
242 unsigned int insn;
243 if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
244 pr_cont(" (Bad address in epc)\n");
245 break;
246 }
247 pr_cont("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
248 }
249 pr_cont("\n");
250}
251
252static void __show_regs(const struct pt_regs *regs)
253{
254 const int field = 2 * sizeof(unsigned long);
255 unsigned int cause = regs->cp0_cause;
256 unsigned int exccode;
257 int i;
258
259 show_regs_print_info(KERN_DEFAULT);
260
261
262
263
264 for (i = 0; i < 32; ) {
265 if ((i % 4) == 0)
266 printk("$%2d :", i);
267 if (i == 0)
268 pr_cont(" %0*lx", field, 0UL);
269 else if (i == 26 || i == 27)
270 pr_cont(" %*s", field, "");
271 else
272 pr_cont(" %0*lx", field, regs->regs[i]);
273
274 i++;
275 if ((i % 4) == 0)
276 pr_cont("\n");
277 }
278
279#ifdef CONFIG_CPU_HAS_SMARTMIPS
280 printk("Acx : %0*lx\n", field, regs->acx);
281#endif
282 printk("Hi : %0*lx\n", field, regs->hi);
283 printk("Lo : %0*lx\n", field, regs->lo);
284
285
286
287
288 printk("epc : %0*lx %pS\n", field, regs->cp0_epc,
289 (void *) regs->cp0_epc);
290 printk("ra : %0*lx %pS\n", field, regs->regs[31],
291 (void *) regs->regs[31]);
292
293 printk("Status: %08x ", (uint32_t) regs->cp0_status);
294
295 if (cpu_has_3kex) {
296 if (regs->cp0_status & ST0_KUO)
297 pr_cont("KUo ");
298 if (regs->cp0_status & ST0_IEO)
299 pr_cont("IEo ");
300 if (regs->cp0_status & ST0_KUP)
301 pr_cont("KUp ");
302 if (regs->cp0_status & ST0_IEP)
303 pr_cont("IEp ");
304 if (regs->cp0_status & ST0_KUC)
305 pr_cont("KUc ");
306 if (regs->cp0_status & ST0_IEC)
307 pr_cont("IEc ");
308 } else if (cpu_has_4kex) {
309 if (regs->cp0_status & ST0_KX)
310 pr_cont("KX ");
311 if (regs->cp0_status & ST0_SX)
312 pr_cont("SX ");
313 if (regs->cp0_status & ST0_UX)
314 pr_cont("UX ");
315 switch (regs->cp0_status & ST0_KSU) {
316 case KSU_USER:
317 pr_cont("USER ");
318 break;
319 case KSU_SUPERVISOR:
320 pr_cont("SUPERVISOR ");
321 break;
322 case KSU_KERNEL:
323 pr_cont("KERNEL ");
324 break;
325 default:
326 pr_cont("BAD_MODE ");
327 break;
328 }
329 if (regs->cp0_status & ST0_ERL)
330 pr_cont("ERL ");
331 if (regs->cp0_status & ST0_EXL)
332 pr_cont("EXL ");
333 if (regs->cp0_status & ST0_IE)
334 pr_cont("IE ");
335 }
336 pr_cont("\n");
337
338 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
339 printk("Cause : %08x (ExcCode %02x)\n", cause, exccode);
340
341 if (1 <= exccode && exccode <= 5)
342 printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
343
344 printk("PrId : %08x (%s)\n", read_c0_prid(),
345 cpu_name_string());
346}
347
348
349
350
351void show_regs(struct pt_regs *regs)
352{
353 __show_regs((struct pt_regs *)regs);
354 dump_stack();
355}
356
357void show_registers(struct pt_regs *regs)
358{
359 const int field = 2 * sizeof(unsigned long);
360 mm_segment_t old_fs = get_fs();
361
362 __show_regs(regs);
363 print_modules();
364 printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
365 current->comm, current->pid, current_thread_info(), current,
366 field, current_thread_info()->tp_value);
367 if (cpu_has_userlocal) {
368 unsigned long tls;
369
370 tls = read_c0_userlocal();
371 if (tls != current_thread_info()->tp_value)
372 printk("*HwTLS: %0*lx\n", field, tls);
373 }
374
375 if (!user_mode(regs))
376
377 set_fs(KERNEL_DS);
378 show_stacktrace(current, regs);
379 show_code((unsigned int __user *) regs->cp0_epc);
380 printk("\n");
381 set_fs(old_fs);
382}
383
384static DEFINE_RAW_SPINLOCK(die_lock);
385
386void __noreturn die(const char *str, struct pt_regs *regs)
387{
388 static int die_counter;
389 int sig = SIGSEGV;
390
391 oops_enter();
392
393 if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
394 SIGSEGV) == NOTIFY_STOP)
395 sig = 0;
396
397 console_verbose();
398 raw_spin_lock_irq(&die_lock);
399 bust_spinlocks(1);
400
401 printk("%s[#%d]:\n", str, ++die_counter);
402 show_registers(regs);
403 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
404 raw_spin_unlock_irq(&die_lock);
405
406 oops_exit();
407
408 if (in_interrupt())
409 panic("Fatal exception in interrupt");
410
411 if (panic_on_oops)
412 panic("Fatal exception");
413
414 if (regs && kexec_should_crash(current))
415 crash_kexec(regs);
416
417 do_exit(sig);
418}
419
420extern struct exception_table_entry __start___dbe_table[];
421extern struct exception_table_entry __stop___dbe_table[];
422
423__asm__(
424" .section __dbe_table, \"a\"\n"
425" .previous \n");
426
427
428static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
429{
430 const struct exception_table_entry *e;
431
432 e = search_extable(__start___dbe_table,
433 __stop___dbe_table - __start___dbe_table, addr);
434 if (!e)
435 e = search_module_dbetables(addr);
436 return e;
437}
438
439asmlinkage void do_be(struct pt_regs *regs)
440{
441 const int field = 2 * sizeof(unsigned long);
442 const struct exception_table_entry *fixup = NULL;
443 int data = regs->cp0_cause & 4;
444 int action = MIPS_BE_FATAL;
445 enum ctx_state prev_state;
446
447 prev_state = exception_enter();
448
449 if (data && !user_mode(regs))
450 fixup = search_dbe_tables(exception_epc(regs));
451
452 if (fixup)
453 action = MIPS_BE_FIXUP;
454
455 if (board_be_handler)
456 action = board_be_handler(regs, fixup != NULL);
457 else
458 mips_cm_error_report();
459
460 switch (action) {
461 case MIPS_BE_DISCARD:
462 goto out;
463 case MIPS_BE_FIXUP:
464 if (fixup) {
465 regs->cp0_epc = fixup->nextinsn;
466 goto out;
467 }
468 break;
469 default:
470 break;
471 }
472
473
474
475
476 printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
477 data ? "Data" : "Instruction",
478 field, regs->cp0_epc, field, regs->regs[31]);
479 if (notify_die(DIE_OOPS, "bus error", regs, 0, current->thread.trap_nr,
480 SIGBUS) == NOTIFY_STOP)
481 goto out;
482
483 die_if_kernel("Oops", regs);
484 force_sig(SIGBUS, current);
485
486out:
487 exception_exit(prev_state);
488}
489
490
491
492
493
494#define OPCODE 0xfc000000
495#define BASE 0x03e00000
496#define RT 0x001f0000
497#define OFFSET 0x0000ffff
498#define LL 0xc0000000
499#define SC 0xe0000000
500#define SPEC0 0x00000000
501#define SPEC3 0x7c000000
502#define RD 0x0000f800
503#define FUNC 0x0000003f
504#define SYNC 0x0000000f
505#define RDHWR 0x0000003b
506
507
508#define MM_POOL32A_FUNC 0xfc00ffff
509#define MM_RDHWR 0x00006b3c
510#define MM_RS 0x001f0000
511#define MM_RT 0x03e00000
512
513
514
515
516
517unsigned int ll_bit;
518struct task_struct *ll_task;
519
520static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
521{
522 unsigned long value, __user *vaddr;
523 long offset;
524
525
526
527
528
529
530
531 offset = opcode & OFFSET;
532 offset <<= 16;
533 offset >>= 16;
534
535 vaddr = (unsigned long __user *)
536 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
537
538 if ((unsigned long)vaddr & 3)
539 return SIGBUS;
540 if (get_user(value, vaddr))
541 return SIGSEGV;
542
543 preempt_disable();
544
545 if (ll_task == NULL || ll_task == current) {
546 ll_bit = 1;
547 } else {
548 ll_bit = 0;
549 }
550 ll_task = current;
551
552 preempt_enable();
553
554 regs->regs[(opcode & RT) >> 16] = value;
555
556 return 0;
557}
558
559static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
560{
561 unsigned long __user *vaddr;
562 unsigned long reg;
563 long offset;
564
565
566
567
568
569
570
571 offset = opcode & OFFSET;
572 offset <<= 16;
573 offset >>= 16;
574
575 vaddr = (unsigned long __user *)
576 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
577 reg = (opcode & RT) >> 16;
578
579 if ((unsigned long)vaddr & 3)
580 return SIGBUS;
581
582 preempt_disable();
583
584 if (ll_bit == 0 || ll_task != current) {
585 regs->regs[reg] = 0;
586 preempt_enable();
587 return 0;
588 }
589
590 preempt_enable();
591
592 if (put_user(regs->regs[reg], vaddr))
593 return SIGSEGV;
594
595 regs->regs[reg] = 1;
596
597 return 0;
598}
599
600
601
602
603
604
605
606
607static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
608{
609 if ((opcode & OPCODE) == LL) {
610 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
611 1, regs, 0);
612 return simulate_ll(regs, opcode);
613 }
614 if ((opcode & OPCODE) == SC) {
615 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
616 1, regs, 0);
617 return simulate_sc(regs, opcode);
618 }
619
620 return -1;
621}
622
623
624
625
626
627static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
628{
629 struct thread_info *ti = task_thread_info(current);
630
631 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
632 1, regs, 0);
633 switch (rd) {
634 case MIPS_HWR_CPUNUM:
635 regs->regs[rt] = smp_processor_id();
636 return 0;
637 case MIPS_HWR_SYNCISTEP:
638 regs->regs[rt] = min(current_cpu_data.dcache.linesz,
639 current_cpu_data.icache.linesz);
640 return 0;
641 case MIPS_HWR_CC:
642 regs->regs[rt] = read_c0_count();
643 return 0;
644 case MIPS_HWR_CCRES:
645 switch (current_cpu_type()) {
646 case CPU_20KC:
647 case CPU_25KF:
648 regs->regs[rt] = 1;
649 break;
650 default:
651 regs->regs[rt] = 2;
652 }
653 return 0;
654 case MIPS_HWR_ULR:
655 regs->regs[rt] = ti->tp_value;
656 return 0;
657 default:
658 return -1;
659 }
660}
661
662static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
663{
664 if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
665 int rd = (opcode & RD) >> 11;
666 int rt = (opcode & RT) >> 16;
667
668 simulate_rdhwr(regs, rd, rt);
669 return 0;
670 }
671
672
673 return -1;
674}
675
676static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned int opcode)
677{
678 if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
679 int rd = (opcode & MM_RS) >> 16;
680 int rt = (opcode & MM_RT) >> 21;
681 simulate_rdhwr(regs, rd, rt);
682 return 0;
683 }
684
685
686 return -1;
687}
688
689static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
690{
691 if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
692 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
693 1, regs, 0);
694 return 0;
695 }
696
697 return -1;
698}
699
700asmlinkage void do_ov(struct pt_regs *regs)
701{
702 enum ctx_state prev_state;
703
704 prev_state = exception_enter();
705 die_if_kernel("Integer overflow", regs);
706
707 force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->cp0_epc, current);
708 exception_exit(prev_state);
709}
710
711
712
713
714
715
716
717void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
718 struct task_struct *tsk)
719{
720 int si_code = FPE_FLTUNK;
721
722 if (fcr31 & FPU_CSR_INV_X)
723 si_code = FPE_FLTINV;
724 else if (fcr31 & FPU_CSR_DIV_X)
725 si_code = FPE_FLTDIV;
726 else if (fcr31 & FPU_CSR_OVF_X)
727 si_code = FPE_FLTOVF;
728 else if (fcr31 & FPU_CSR_UDF_X)
729 si_code = FPE_FLTUND;
730 else if (fcr31 & FPU_CSR_INE_X)
731 si_code = FPE_FLTRES;
732
733 force_sig_fault(SIGFPE, si_code, fault_addr, tsk);
734}
735
736int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
737{
738 int si_code;
739 struct vm_area_struct *vma;
740
741 switch (sig) {
742 case 0:
743 return 0;
744
745 case SIGFPE:
746 force_fcr31_sig(fcr31, fault_addr, current);
747 return 1;
748
749 case SIGBUS:
750 force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr, current);
751 return 1;
752
753 case SIGSEGV:
754 down_read(¤t->mm->mmap_sem);
755 vma = find_vma(current->mm, (unsigned long)fault_addr);
756 if (vma && (vma->vm_start <= (unsigned long)fault_addr))
757 si_code = SEGV_ACCERR;
758 else
759 si_code = SEGV_MAPERR;
760 up_read(¤t->mm->mmap_sem);
761 force_sig_fault(SIGSEGV, si_code, fault_addr, current);
762 return 1;
763
764 default:
765 force_sig(sig, current);
766 return 1;
767 }
768}
769
770static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
771 unsigned long old_epc, unsigned long old_ra)
772{
773 union mips_instruction inst = { .word = opcode };
774 void __user *fault_addr;
775 unsigned long fcr31;
776 int sig;
777
778
779 switch (inst.i_format.opcode) {
780 case cop1_op:
781 case cop1x_op:
782 case lwc1_op:
783 case ldc1_op:
784 case swc1_op:
785 case sdc1_op:
786 break;
787
788 default:
789 return -1;
790 }
791
792
793
794
795
796 regs->cp0_epc = old_epc;
797 regs->regs[31] = old_ra;
798
799
800 lose_fpu(1);
801
802
803 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
804 &fault_addr);
805
806
807
808
809
810 fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
811 current->thread.fpu.fcr31 &= ~fcr31;
812
813
814 own_fpu(1);
815
816
817 process_fpemu_return(sig, fault_addr, fcr31);
818
819 return 0;
820}
821
822
823
824
825asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
826{
827 enum ctx_state prev_state;
828 void __user *fault_addr;
829 int sig;
830
831 prev_state = exception_enter();
832 if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
833 SIGFPE) == NOTIFY_STOP)
834 goto out;
835
836
837 write_32bit_cp1_register(CP1_STATUS, fcr31 & ~mask_fcr31_x(fcr31));
838 local_irq_enable();
839
840 die_if_kernel("FP exception in kernel code", regs);
841
842 if (fcr31 & FPU_CSR_UNI_X) {
843
844
845
846
847
848
849
850
851
852
853
854 lose_fpu(1);
855
856
857 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
858 &fault_addr);
859
860
861
862
863
864 fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
865 current->thread.fpu.fcr31 &= ~fcr31;
866
867
868 own_fpu(1);
869 } else {
870 sig = SIGFPE;
871 fault_addr = (void __user *) regs->cp0_epc;
872 }
873
874
875 process_fpemu_return(sig, fault_addr, fcr31);
876
877out:
878 exception_exit(prev_state);
879}
880
881void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
882 const char *str)
883{
884 char b[40];
885
886#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
887 if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr,
888 SIGTRAP) == NOTIFY_STOP)
889 return;
890#endif
891
892 if (notify_die(DIE_TRAP, str, regs, code, current->thread.trap_nr,
893 SIGTRAP) == NOTIFY_STOP)
894 return;
895
896
897
898
899
900
901
902 switch (code) {
903 case BRK_OVERFLOW:
904 case BRK_DIVZERO:
905 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
906 die_if_kernel(b, regs);
907 force_sig_fault(SIGFPE,
908 code == BRK_DIVZERO ? FPE_INTDIV : FPE_INTOVF,
909 (void __user *) regs->cp0_epc, current);
910 break;
911 case BRK_BUG:
912 die_if_kernel("Kernel bug detected", regs);
913 force_sig(SIGTRAP, current);
914 break;
915 case BRK_MEMU:
916
917
918
919
920
921
922
923
924 if (do_dsemulret(regs))
925 return;
926
927 die_if_kernel("Math emu break/trap", regs);
928 force_sig(SIGTRAP, current);
929 break;
930 default:
931 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
932 die_if_kernel(b, regs);
933 if (si_code) {
934 force_sig_fault(SIGTRAP, si_code, NULL, current);
935 } else {
936 force_sig(SIGTRAP, current);
937 }
938 }
939}
940
941asmlinkage void do_bp(struct pt_regs *regs)
942{
943 unsigned long epc = msk_isa16_mode(exception_epc(regs));
944 unsigned int opcode, bcode;
945 enum ctx_state prev_state;
946 mm_segment_t seg;
947
948 seg = get_fs();
949 if (!user_mode(regs))
950 set_fs(KERNEL_DS);
951
952 prev_state = exception_enter();
953 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
954 if (get_isa16_mode(regs->cp0_epc)) {
955 u16 instr[2];
956
957 if (__get_user(instr[0], (u16 __user *)epc))
958 goto out_sigsegv;
959
960 if (!cpu_has_mmips) {
961
962 bcode = (instr[0] >> 5) & 0x3f;
963 } else if (mm_insn_16bit(instr[0])) {
964
965 bcode = instr[0] & 0xf;
966 } else {
967
968 if (__get_user(instr[1], (u16 __user *)(epc + 2)))
969 goto out_sigsegv;
970 opcode = (instr[0] << 16) | instr[1];
971 bcode = (opcode >> 6) & ((1 << 20) - 1);
972 }
973 } else {
974 if (__get_user(opcode, (unsigned int __user *)epc))
975 goto out_sigsegv;
976 bcode = (opcode >> 6) & ((1 << 20) - 1);
977 }
978
979
980
981
982
983
984
985 if (bcode >= (1 << 10))
986 bcode = ((bcode & ((1 << 10) - 1)) << 10) | (bcode >> 10);
987
988
989
990
991
992 switch (bcode) {
993 case BRK_UPROBE:
994 if (notify_die(DIE_UPROBE, "uprobe", regs, bcode,
995 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
996 goto out;
997 else
998 break;
999 case BRK_UPROBE_XOL:
1000 if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, bcode,
1001 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1002 goto out;
1003 else
1004 break;
1005 case BRK_KPROBE_BP:
1006 if (notify_die(DIE_BREAK, "debug", regs, bcode,
1007 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1008 goto out;
1009 else
1010 break;
1011 case BRK_KPROBE_SSTEPBP:
1012 if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
1013 current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1014 goto out;
1015 else
1016 break;
1017 default:
1018 break;
1019 }
1020
1021 do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break");
1022
1023out:
1024 set_fs(seg);
1025 exception_exit(prev_state);
1026 return;
1027
1028out_sigsegv:
1029 force_sig(SIGSEGV, current);
1030 goto out;
1031}
1032
1033asmlinkage void do_tr(struct pt_regs *regs)
1034{
1035 u32 opcode, tcode = 0;
1036 enum ctx_state prev_state;
1037 u16 instr[2];
1038 mm_segment_t seg;
1039 unsigned long epc = msk_isa16_mode(exception_epc(regs));
1040
1041 seg = get_fs();
1042 if (!user_mode(regs))
1043 set_fs(get_ds());
1044
1045 prev_state = exception_enter();
1046 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1047 if (get_isa16_mode(regs->cp0_epc)) {
1048 if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
1049 __get_user(instr[1], (u16 __user *)(epc + 2)))
1050 goto out_sigsegv;
1051 opcode = (instr[0] << 16) | instr[1];
1052
1053 if (!(opcode & OPCODE))
1054 tcode = (opcode >> 12) & ((1 << 4) - 1);
1055 } else {
1056 if (__get_user(opcode, (u32 __user *)epc))
1057 goto out_sigsegv;
1058
1059 if (!(opcode & OPCODE))
1060 tcode = (opcode >> 6) & ((1 << 10) - 1);
1061 }
1062
1063 do_trap_or_bp(regs, tcode, 0, "Trap");
1064
1065out:
1066 set_fs(seg);
1067 exception_exit(prev_state);
1068 return;
1069
1070out_sigsegv:
1071 force_sig(SIGSEGV, current);
1072 goto out;
1073}
1074
1075asmlinkage void do_ri(struct pt_regs *regs)
1076{
1077 unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
1078 unsigned long old_epc = regs->cp0_epc;
1079 unsigned long old31 = regs->regs[31];
1080 enum ctx_state prev_state;
1081 unsigned int opcode = 0;
1082 int status = -1;
1083
1084
1085
1086
1087
1088 if (mipsr2_emulation && cpu_has_mips_r6 &&
1089 likely(user_mode(regs)) &&
1090 likely(get_user(opcode, epc) >= 0)) {
1091 unsigned long fcr31 = 0;
1092
1093 status = mipsr2_decoder(regs, opcode, &fcr31);
1094 switch (status) {
1095 case 0:
1096 case SIGEMT:
1097 return;
1098 case SIGILL:
1099 goto no_r2_instr;
1100 default:
1101 process_fpemu_return(status,
1102 ¤t->thread.cp0_baduaddr,
1103 fcr31);
1104 return;
1105 }
1106 }
1107
1108no_r2_instr:
1109
1110 prev_state = exception_enter();
1111 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1112
1113 if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
1114 SIGILL) == NOTIFY_STOP)
1115 goto out;
1116
1117 die_if_kernel("Reserved instruction in kernel code", regs);
1118
1119 if (unlikely(compute_return_epc(regs) < 0))
1120 goto out;
1121
1122 if (!get_isa16_mode(regs->cp0_epc)) {
1123 if (unlikely(get_user(opcode, epc) < 0))
1124 status = SIGSEGV;
1125
1126 if (!cpu_has_llsc && status < 0)
1127 status = simulate_llsc(regs, opcode);
1128
1129 if (status < 0)
1130 status = simulate_rdhwr_normal(regs, opcode);
1131
1132 if (status < 0)
1133 status = simulate_sync(regs, opcode);
1134
1135 if (status < 0)
1136 status = simulate_fp(regs, opcode, old_epc, old31);
1137 } else if (cpu_has_mmips) {
1138 unsigned short mmop[2] = { 0 };
1139
1140 if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
1141 status = SIGSEGV;
1142 if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
1143 status = SIGSEGV;
1144 opcode = mmop[0];
1145 opcode = (opcode << 16) | mmop[1];
1146
1147 if (status < 0)
1148 status = simulate_rdhwr_mm(regs, opcode);
1149 }
1150
1151 if (status < 0)
1152 status = SIGILL;
1153
1154 if (unlikely(status > 0)) {
1155 regs->cp0_epc = old_epc;
1156 regs->regs[31] = old31;
1157 force_sig(status, current);
1158 }
1159
1160out:
1161 exception_exit(prev_state);
1162}
1163
1164
1165
1166
1167
1168
1169static void mt_ase_fp_affinity(void)
1170{
1171#ifdef CONFIG_MIPS_MT_FPAFF
1172 if (mt_fpemul_threshold > 0 &&
1173 ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
1174
1175
1176
1177
1178
1179 if (cpumask_intersects(¤t->cpus_allowed, &mt_fpu_cpumask)) {
1180 cpumask_t tmask;
1181
1182 current->thread.user_cpus_allowed
1183 = current->cpus_allowed;
1184 cpumask_and(&tmask, ¤t->cpus_allowed,
1185 &mt_fpu_cpumask);
1186 set_cpus_allowed_ptr(current, &tmask);
1187 set_thread_flag(TIF_FPUBOUND);
1188 }
1189 }
1190#endif
1191}
1192
1193
1194
1195
1196static RAW_NOTIFIER_HEAD(cu2_chain);
1197
1198int __ref register_cu2_notifier(struct notifier_block *nb)
1199{
1200 return raw_notifier_chain_register(&cu2_chain, nb);
1201}
1202
1203int cu2_notifier_call_chain(unsigned long val, void *v)
1204{
1205 return raw_notifier_call_chain(&cu2_chain, val, v);
1206}
1207
1208static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
1209 void *data)
1210{
1211 struct pt_regs *regs = data;
1212
1213 die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
1214 "instruction", regs);
1215 force_sig(SIGILL, current);
1216
1217 return NOTIFY_OK;
1218}
1219
1220static int enable_restore_fp_context(int msa)
1221{
1222 int err, was_fpu_owner, prior_msa;
1223
1224
1225
1226
1227
1228 wait_var_event(¤t->mm->context.fp_mode_switching,
1229 !atomic_read(¤t->mm->context.fp_mode_switching));
1230
1231 if (!used_math()) {
1232
1233 preempt_disable();
1234 err = init_fpu();
1235 if (msa && !err) {
1236 enable_msa();
1237 init_msa_upper();
1238 set_thread_flag(TIF_USEDMSA);
1239 set_thread_flag(TIF_MSA_CTX_LIVE);
1240 }
1241 preempt_enable();
1242 if (!err)
1243 set_used_math();
1244 return err;
1245 }
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274 if (!msa && !thread_msa_context_live())
1275 return own_fpu(1);
1276
1277
1278
1279
1280
1281 preempt_disable();
1282 was_fpu_owner = is_fpu_owner();
1283 err = own_fpu_inatomic(0);
1284 if (err)
1285 goto out;
1286
1287 enable_msa();
1288 write_msa_csr(current->thread.fpu.msacsr);
1289 set_thread_flag(TIF_USEDMSA);
1290
1291
1292
1293
1294
1295
1296
1297
1298 prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
1299 if (!prior_msa && was_fpu_owner) {
1300 init_msa_upper();
1301
1302 goto out;
1303 }
1304
1305 if (!prior_msa) {
1306
1307
1308
1309
1310 _restore_fp(current);
1311
1312
1313
1314
1315
1316
1317 init_msa_upper();
1318 } else {
1319
1320 restore_msa(current);
1321
1322
1323 if (!was_fpu_owner)
1324 write_32bit_cp1_register(CP1_STATUS,
1325 current->thread.fpu.fcr31);
1326 }
1327
1328out:
1329 preempt_enable();
1330
1331 return 0;
1332}
1333
1334asmlinkage void do_cpu(struct pt_regs *regs)
1335{
1336 enum ctx_state prev_state;
1337 unsigned int __user *epc;
1338 unsigned long old_epc, old31;
1339 void __user *fault_addr;
1340 unsigned int opcode;
1341 unsigned long fcr31;
1342 unsigned int cpid;
1343 int status, err;
1344 int sig;
1345
1346 prev_state = exception_enter();
1347 cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
1348
1349 if (cpid != 2)
1350 die_if_kernel("do_cpu invoked from kernel context!", regs);
1351
1352 switch (cpid) {
1353 case 0:
1354 epc = (unsigned int __user *)exception_epc(regs);
1355 old_epc = regs->cp0_epc;
1356 old31 = regs->regs[31];
1357 opcode = 0;
1358 status = -1;
1359
1360 if (unlikely(compute_return_epc(regs) < 0))
1361 break;
1362
1363 if (!get_isa16_mode(regs->cp0_epc)) {
1364 if (unlikely(get_user(opcode, epc) < 0))
1365 status = SIGSEGV;
1366
1367 if (!cpu_has_llsc && status < 0)
1368 status = simulate_llsc(regs, opcode);
1369 }
1370
1371 if (status < 0)
1372 status = SIGILL;
1373
1374 if (unlikely(status > 0)) {
1375 regs->cp0_epc = old_epc;
1376 regs->regs[31] = old31;
1377 force_sig(status, current);
1378 }
1379
1380 break;
1381
1382 case 3:
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395 if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) {
1396 force_sig(SIGILL, current);
1397 break;
1398 }
1399
1400
1401 case 1:
1402 err = enable_restore_fp_context(0);
1403
1404 if (raw_cpu_has_fpu && !err)
1405 break;
1406
1407 sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0,
1408 &fault_addr);
1409
1410
1411
1412
1413
1414 fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
1415 current->thread.fpu.fcr31 &= ~fcr31;
1416
1417
1418 if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
1419 mt_ase_fp_affinity();
1420
1421 break;
1422
1423 case 2:
1424 raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
1425 break;
1426 }
1427
1428 exception_exit(prev_state);
1429}
1430
1431asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr)
1432{
1433 enum ctx_state prev_state;
1434
1435 prev_state = exception_enter();
1436 current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1437 if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
1438 current->thread.trap_nr, SIGFPE) == NOTIFY_STOP)
1439 goto out;
1440
1441
1442 write_msa_csr(msacsr & ~MSA_CSR_CAUSEF);
1443 local_irq_enable();
1444
1445 die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
1446 force_sig(SIGFPE, current);
1447out:
1448 exception_exit(prev_state);
1449}
1450
1451asmlinkage void do_msa(struct pt_regs *regs)
1452{
1453 enum ctx_state prev_state;
1454 int err;
1455
1456 prev_state = exception_enter();
1457
1458 if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
1459 force_sig(SIGILL, current);
1460 goto out;
1461 }
1462
1463 die_if_kernel("do_msa invoked from kernel context!", regs);
1464
1465 err = enable_restore_fp_context(1);
1466 if (err)
1467 force_sig(SIGILL, current);
1468out:
1469 exception_exit(prev_state);
1470}
1471
1472asmlinkage void do_mdmx(struct pt_regs *regs)
1473{
1474 enum ctx_state prev_state;
1475
1476 prev_state = exception_enter();
1477 force_sig(SIGILL, current);
1478 exception_exit(prev_state);
1479}
1480
1481
1482
1483
1484asmlinkage void do_watch(struct pt_regs *regs)
1485{
1486 enum ctx_state prev_state;
1487
1488 prev_state = exception_enter();
1489
1490
1491
1492
1493 clear_c0_cause(CAUSEF_WP);
1494
1495
1496
1497
1498
1499
1500 if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
1501 mips_read_watch_registers();
1502 local_irq_enable();
1503 force_sig_fault(SIGTRAP, TRAP_HWBKPT, NULL, current);
1504 } else {
1505 mips_clear_watch_registers();
1506 local_irq_enable();
1507 }
1508 exception_exit(prev_state);
1509}
1510
1511asmlinkage void do_mcheck(struct pt_regs *regs)
1512{
1513 int multi_match = regs->cp0_status & ST0_TS;
1514 enum ctx_state prev_state;
1515 mm_segment_t old_fs = get_fs();
1516
1517 prev_state = exception_enter();
1518 show_regs(regs);
1519
1520 if (multi_match) {
1521 dump_tlb_regs();
1522 pr_info("\n");
1523 dump_tlb_all();
1524 }
1525
1526 if (!user_mode(regs))
1527 set_fs(KERNEL_DS);
1528
1529 show_code((unsigned int __user *) regs->cp0_epc);
1530
1531 set_fs(old_fs);
1532
1533
1534
1535
1536
1537 panic("Caught Machine Check exception - %scaused by multiple "
1538 "matching entries in the TLB.",
1539 (multi_match) ? "" : "not ");
1540}
1541
1542asmlinkage void do_mt(struct pt_regs *regs)
1543{
1544 int subcode;
1545
1546 subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
1547 >> VPECONTROL_EXCPT_SHIFT;
1548 switch (subcode) {
1549 case 0:
1550 printk(KERN_DEBUG "Thread Underflow\n");
1551 break;
1552 case 1:
1553 printk(KERN_DEBUG "Thread Overflow\n");
1554 break;
1555 case 2:
1556 printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
1557 break;
1558 case 3:
1559 printk(KERN_DEBUG "Gating Storage Exception\n");
1560 break;
1561 case 4:
1562 printk(KERN_DEBUG "YIELD Scheduler Exception\n");
1563 break;
1564 case 5:
1565 printk(KERN_DEBUG "Gating Storage Scheduler Exception\n");
1566 break;
1567 default:
1568 printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
1569 subcode);
1570 break;
1571 }
1572 die_if_kernel("MIPS MT Thread exception in kernel", regs);
1573
1574 force_sig(SIGILL, current);
1575}
1576
1577
1578asmlinkage void do_dsp(struct pt_regs *regs)
1579{
1580 if (cpu_has_dsp)
1581 panic("Unexpected DSP exception");
1582
1583 force_sig(SIGILL, current);
1584}
1585
1586asmlinkage void do_reserved(struct pt_regs *regs)
1587{
1588
1589
1590
1591
1592
1593 show_regs(regs);
1594 panic("Caught reserved exception %ld - should not happen.",
1595 (regs->cp0_cause & 0x7f) >> 2);
1596}
1597
1598static int __initdata l1parity = 1;
1599static int __init nol1parity(char *s)
1600{
1601 l1parity = 0;
1602 return 1;
1603}
1604__setup("nol1par", nol1parity);
1605static int __initdata l2parity = 1;
1606static int __init nol2parity(char *s)
1607{
1608 l2parity = 0;
1609 return 1;
1610}
1611__setup("nol2par", nol2parity);
1612
1613
1614
1615
1616
1617static inline void parity_protection_init(void)
1618{
1619#define ERRCTL_PE 0x80000000
1620#define ERRCTL_L2P 0x00800000
1621
1622 if (mips_cm_revision() >= CM_REV_CM3) {
1623 ulong gcr_ectl, cp0_ectl;
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633 l1parity &= l2parity;
1634 l2parity &= l1parity;
1635
1636
1637 cp0_ectl = read_c0_ecc();
1638 write_c0_ecc(cp0_ectl | ERRCTL_PE);
1639 back_to_back_c0_hazard();
1640 cp0_ectl = read_c0_ecc();
1641
1642
1643 gcr_ectl = read_gcr_err_control();
1644
1645 if (!(gcr_ectl & CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT) ||
1646 !(cp0_ectl & ERRCTL_PE)) {
1647
1648
1649
1650
1651 l1parity = l2parity = 0;
1652 }
1653
1654
1655 if (l1parity)
1656 cp0_ectl |= ERRCTL_PE;
1657 else
1658 cp0_ectl &= ~ERRCTL_PE;
1659 write_c0_ecc(cp0_ectl);
1660 back_to_back_c0_hazard();
1661 WARN_ON(!!(read_c0_ecc() & ERRCTL_PE) != l1parity);
1662
1663
1664 if (l2parity)
1665 gcr_ectl |= CM_GCR_ERR_CONTROL_L2_ECC_EN;
1666 else
1667 gcr_ectl &= ~CM_GCR_ERR_CONTROL_L2_ECC_EN;
1668 write_gcr_err_control(gcr_ectl);
1669 gcr_ectl = read_gcr_err_control();
1670 gcr_ectl &= CM_GCR_ERR_CONTROL_L2_ECC_EN;
1671 WARN_ON(!!gcr_ectl != l2parity);
1672
1673 pr_info("Cache parity protection %sabled\n",
1674 l1parity ? "en" : "dis");
1675 return;
1676 }
1677
1678 switch (current_cpu_type()) {
1679 case CPU_24K:
1680 case CPU_34K:
1681 case CPU_74K:
1682 case CPU_1004K:
1683 case CPU_1074K:
1684 case CPU_INTERAPTIV:
1685 case CPU_PROAPTIV:
1686 case CPU_P5600:
1687 case CPU_QEMU_GENERIC:
1688 case CPU_P6600:
1689 {
1690 unsigned long errctl;
1691 unsigned int l1parity_present, l2parity_present;
1692
1693 errctl = read_c0_ecc();
1694 errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
1695
1696
1697 write_c0_ecc(errctl | ERRCTL_PE);
1698 back_to_back_c0_hazard();
1699 l1parity_present = (read_c0_ecc() & ERRCTL_PE);
1700
1701
1702 write_c0_ecc(errctl|ERRCTL_L2P);
1703 back_to_back_c0_hazard();
1704 l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
1705
1706 if (l1parity_present && l2parity_present) {
1707 if (l1parity)
1708 errctl |= ERRCTL_PE;
1709 if (l1parity ^ l2parity)
1710 errctl |= ERRCTL_L2P;
1711 } else if (l1parity_present) {
1712 if (l1parity)
1713 errctl |= ERRCTL_PE;
1714 } else if (l2parity_present) {
1715 if (l2parity)
1716 errctl |= ERRCTL_L2P;
1717 } else {
1718
1719 }
1720
1721 printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
1722
1723 write_c0_ecc(errctl);
1724 back_to_back_c0_hazard();
1725 errctl = read_c0_ecc();
1726 printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
1727
1728 if (l1parity_present)
1729 printk(KERN_INFO "Cache parity protection %sabled\n",
1730 (errctl & ERRCTL_PE) ? "en" : "dis");
1731
1732 if (l2parity_present) {
1733 if (l1parity_present && l1parity)
1734 errctl ^= ERRCTL_L2P;
1735 printk(KERN_INFO "L2 cache parity protection %sabled\n",
1736 (errctl & ERRCTL_L2P) ? "en" : "dis");
1737 }
1738 }
1739 break;
1740
1741 case CPU_5KC:
1742 case CPU_5KE:
1743 case CPU_LOONGSON1:
1744 write_c0_ecc(0x80000000);
1745 back_to_back_c0_hazard();
1746
1747 printk(KERN_INFO "Cache parity protection %sabled\n",
1748 (read_c0_ecc() & 0x80000000) ? "en" : "dis");
1749 break;
1750 case CPU_20KC:
1751 case CPU_25KF:
1752
1753 printk(KERN_INFO "Enable cache parity protection for "
1754 "MIPS 20KC/25KF CPUs.\n");
1755 clear_c0_status(ST0_DE);
1756 break;
1757 default:
1758 break;
1759 }
1760}
1761
1762asmlinkage void cache_parity_error(void)
1763{
1764 const int field = 2 * sizeof(unsigned long);
1765 unsigned int reg_val;
1766
1767
1768 printk("Cache error exception:\n");
1769 printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1770 reg_val = read_c0_cacheerr();
1771 printk("c0_cacheerr == %08x\n", reg_val);
1772
1773 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1774 reg_val & (1<<30) ? "secondary" : "primary",
1775 reg_val & (1<<31) ? "data" : "insn");
1776 if ((cpu_has_mips_r2_r6) &&
1777 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1778 pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
1779 reg_val & (1<<29) ? "ED " : "",
1780 reg_val & (1<<28) ? "ET " : "",
1781 reg_val & (1<<27) ? "ES " : "",
1782 reg_val & (1<<26) ? "EE " : "",
1783 reg_val & (1<<25) ? "EB " : "",
1784 reg_val & (1<<24) ? "EI " : "",
1785 reg_val & (1<<23) ? "E1 " : "",
1786 reg_val & (1<<22) ? "E0 " : "");
1787 } else {
1788 pr_err("Error bits: %s%s%s%s%s%s%s\n",
1789 reg_val & (1<<29) ? "ED " : "",
1790 reg_val & (1<<28) ? "ET " : "",
1791 reg_val & (1<<26) ? "EE " : "",
1792 reg_val & (1<<25) ? "EB " : "",
1793 reg_val & (1<<24) ? "EI " : "",
1794 reg_val & (1<<23) ? "E1 " : "",
1795 reg_val & (1<<22) ? "E0 " : "");
1796 }
1797 printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
1798
1799#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1800 if (reg_val & (1<<22))
1801 printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
1802
1803 if (reg_val & (1<<23))
1804 printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
1805#endif
1806
1807 panic("Can't handle the cache error!");
1808}
1809
1810asmlinkage void do_ftlb(void)
1811{
1812 const int field = 2 * sizeof(unsigned long);
1813 unsigned int reg_val;
1814
1815
1816 if ((cpu_has_mips_r2_r6) &&
1817 (((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS) ||
1818 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_LOONGSON))) {
1819 pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
1820 read_c0_ecc());
1821 pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1822 reg_val = read_c0_cacheerr();
1823 pr_err("c0_cacheerr == %08x\n", reg_val);
1824
1825 if ((reg_val & 0xc0000000) == 0xc0000000) {
1826 pr_err("Decoded c0_cacheerr: FTLB parity error\n");
1827 } else {
1828 pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1829 reg_val & (1<<30) ? "secondary" : "primary",
1830 reg_val & (1<<31) ? "data" : "insn");
1831 }
1832 } else {
1833 pr_err("FTLB error exception\n");
1834 }
1835
1836 cache_parity_error();
1837}
1838
1839
1840
1841
1842
1843void ejtag_exception_handler(struct pt_regs *regs)
1844{
1845 const int field = 2 * sizeof(unsigned long);
1846 unsigned long depc, old_epc, old_ra;
1847 unsigned int debug;
1848
1849 printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
1850 depc = read_c0_depc();
1851 debug = read_c0_debug();
1852 printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
1853 if (debug & 0x80000000) {
1854
1855
1856
1857
1858
1859
1860 old_epc = regs->cp0_epc;
1861 old_ra = regs->regs[31];
1862 regs->cp0_epc = depc;
1863 compute_return_epc(regs);
1864 depc = regs->cp0_epc;
1865 regs->cp0_epc = old_epc;
1866 regs->regs[31] = old_ra;
1867 } else
1868 depc += 4;
1869 write_c0_depc(depc);
1870
1871#if 0
1872 printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
1873 write_c0_debug(debug | 0x100);
1874#endif
1875}
1876
1877
1878
1879
1880
1881static RAW_NOTIFIER_HEAD(nmi_chain);
1882
1883int register_nmi_notifier(struct notifier_block *nb)
1884{
1885 return raw_notifier_chain_register(&nmi_chain, nb);
1886}
1887
1888void __noreturn nmi_exception_handler(struct pt_regs *regs)
1889{
1890 char str[100];
1891
1892 nmi_enter();
1893 raw_notifier_call_chain(&nmi_chain, 0, regs);
1894 bust_spinlocks(1);
1895 snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
1896 smp_processor_id(), regs->cp0_epc);
1897 regs->cp0_epc = read_c0_errorepc();
1898 die(str, regs);
1899 nmi_exit();
1900}
1901
1902#define VECTORSPACING 0x100
1903
1904unsigned long ebase;
1905EXPORT_SYMBOL_GPL(ebase);
1906unsigned long exception_handlers[32];
1907unsigned long vi_handlers[64];
1908
1909void __init *set_except_vector(int n, void *addr)
1910{
1911 unsigned long handler = (unsigned long) addr;
1912 unsigned long old_handler;
1913
1914#ifdef CONFIG_CPU_MICROMIPS
1915
1916
1917
1918
1919
1920
1921
1922 if (!(handler & 0x1))
1923 handler |= 1;
1924#endif
1925 old_handler = xchg(&exception_handlers[n], handler);
1926
1927 if (n == 0 && cpu_has_divec) {
1928#ifdef CONFIG_CPU_MICROMIPS
1929 unsigned long jump_mask = ~((1 << 27) - 1);
1930#else
1931 unsigned long jump_mask = ~((1 << 28) - 1);
1932#endif
1933 u32 *buf = (u32 *)(ebase + 0x200);
1934 unsigned int k0 = 26;
1935 if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
1936 uasm_i_j(&buf, handler & ~jump_mask);
1937 uasm_i_nop(&buf);
1938 } else {
1939 UASM_i_LA(&buf, k0, handler);
1940 uasm_i_jr(&buf, k0);
1941 uasm_i_nop(&buf);
1942 }
1943 local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
1944 }
1945 return (void *)old_handler;
1946}
1947
1948static void do_default_vi(void)
1949{
1950 show_regs(get_irq_regs());
1951 panic("Caught unexpected vectored interrupt.");
1952}
1953
1954static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1955{
1956 unsigned long handler;
1957 unsigned long old_handler = vi_handlers[n];
1958 int srssets = current_cpu_data.srsets;
1959 u16 *h;
1960 unsigned char *b;
1961
1962 BUG_ON(!cpu_has_veic && !cpu_has_vint);
1963
1964 if (addr == NULL) {
1965 handler = (unsigned long) do_default_vi;
1966 srs = 0;
1967 } else
1968 handler = (unsigned long) addr;
1969 vi_handlers[n] = handler;
1970
1971 b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
1972
1973 if (srs >= srssets)
1974 panic("Shadow register set %d not supported", srs);
1975
1976 if (cpu_has_veic) {
1977 if (board_bind_eic_interrupt)
1978 board_bind_eic_interrupt(n, srs);
1979 } else if (cpu_has_vint) {
1980
1981 if (srssets > 1)
1982 change_c0_srsmap(0xf << n*4, srs << n*4);
1983 }
1984
1985 if (srs == 0) {
1986
1987
1988
1989
1990 extern char except_vec_vi, except_vec_vi_lui;
1991 extern char except_vec_vi_ori, except_vec_vi_end;
1992 extern char rollback_except_vec_vi;
1993 char *vec_start = using_rollback_handler() ?
1994 &rollback_except_vec_vi : &except_vec_vi;
1995#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
1996 const int lui_offset = &except_vec_vi_lui - vec_start + 2;
1997 const int ori_offset = &except_vec_vi_ori - vec_start + 2;
1998#else
1999 const int lui_offset = &except_vec_vi_lui - vec_start;
2000 const int ori_offset = &except_vec_vi_ori - vec_start;
2001#endif
2002 const int handler_len = &except_vec_vi_end - vec_start;
2003
2004 if (handler_len > VECTORSPACING) {
2005
2006
2007
2008
2009 panic("VECTORSPACING too small");
2010 }
2011
2012 set_handler(((unsigned long)b - ebase), vec_start,
2013#ifdef CONFIG_CPU_MICROMIPS
2014 (handler_len - 1));
2015#else
2016 handler_len);
2017#endif
2018 h = (u16 *)(b + lui_offset);
2019 *h = (handler >> 16) & 0xffff;
2020 h = (u16 *)(b + ori_offset);
2021 *h = (handler & 0xffff);
2022 local_flush_icache_range((unsigned long)b,
2023 (unsigned long)(b+handler_len));
2024 }
2025 else {
2026
2027
2028
2029
2030
2031 u32 insn;
2032
2033 h = (u16 *)b;
2034
2035#ifdef CONFIG_CPU_MICROMIPS
2036 insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
2037#else
2038 insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
2039#endif
2040 h[0] = (insn >> 16) & 0xffff;
2041 h[1] = insn & 0xffff;
2042 h[2] = 0;
2043 h[3] = 0;
2044 local_flush_icache_range((unsigned long)b,
2045 (unsigned long)(b+8));
2046 }
2047
2048 return (void *)old_handler;
2049}
2050
2051void *set_vi_handler(int n, vi_handler_t addr)
2052{
2053 return set_vi_srs_handler(n, addr, 0);
2054}
2055
2056extern void tlb_init(void);
2057
2058
2059
2060
2061int cp0_compare_irq;
2062EXPORT_SYMBOL_GPL(cp0_compare_irq);
2063int cp0_compare_irq_shift;
2064
2065
2066
2067
2068int cp0_perfcount_irq;
2069EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
2070
2071
2072
2073
2074int cp0_fdc_irq;
2075EXPORT_SYMBOL_GPL(cp0_fdc_irq);
2076
2077static int noulri;
2078
2079static int __init ulri_disable(char *s)
2080{
2081 pr_info("Disabling ulri\n");
2082 noulri = 1;
2083
2084 return 1;
2085}
2086__setup("noulri", ulri_disable);
2087
2088
2089static void configure_status(void)
2090{
2091
2092
2093
2094
2095
2096
2097 unsigned int status_set = ST0_CU0;
2098#ifdef CONFIG_64BIT
2099 status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
2100#endif
2101 if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
2102 status_set |= ST0_XX;
2103 if (cpu_has_dsp)
2104 status_set |= ST0_MX;
2105
2106 change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
2107 status_set);
2108}
2109
2110unsigned int hwrena;
2111EXPORT_SYMBOL_GPL(hwrena);
2112
2113
2114static void configure_hwrena(void)
2115{
2116 hwrena = cpu_hwrena_impl_bits;
2117
2118 if (cpu_has_mips_r2_r6)
2119 hwrena |= MIPS_HWRENA_CPUNUM |
2120 MIPS_HWRENA_SYNCISTEP |
2121 MIPS_HWRENA_CC |
2122 MIPS_HWRENA_CCRES;
2123
2124 if (!noulri && cpu_has_userlocal)
2125 hwrena |= MIPS_HWRENA_ULR;
2126
2127 if (hwrena)
2128 write_c0_hwrena(hwrena);
2129}
2130
2131static void configure_exception_vector(void)
2132{
2133 if (cpu_has_veic || cpu_has_vint) {
2134 unsigned long sr = set_c0_status(ST0_BEV);
2135
2136 if (cpu_has_ebase_wg) {
2137#ifdef CONFIG_64BIT
2138 write_c0_ebase_64(ebase | MIPS_EBASE_WG);
2139#else
2140 write_c0_ebase(ebase | MIPS_EBASE_WG);
2141#endif
2142 }
2143 write_c0_ebase(ebase);
2144 write_c0_status(sr);
2145
2146 change_c0_intctl(0x3e0, VECTORSPACING);
2147 }
2148 if (cpu_has_divec) {
2149 if (cpu_has_mipsmt) {
2150 unsigned int vpflags = dvpe();
2151 set_c0_cause(CAUSEF_IV);
2152 evpe(vpflags);
2153 } else
2154 set_c0_cause(CAUSEF_IV);
2155 }
2156}
2157
2158void per_cpu_trap_init(bool is_boot_cpu)
2159{
2160 unsigned int cpu = smp_processor_id();
2161
2162 configure_status();
2163 configure_hwrena();
2164
2165 configure_exception_vector();
2166
2167
2168
2169
2170
2171
2172
2173
2174 if (cpu_has_mips_r2_r6) {
2175
2176
2177
2178
2179 if (!is_boot_cpu) {
2180
2181 if (cpu_has_ebase_wg) {
2182#ifdef CONFIG_64BIT
2183 write_c0_ebase_64(ebase | MIPS_EBASE_WG);
2184#else
2185 write_c0_ebase(ebase | MIPS_EBASE_WG);
2186#endif
2187 }
2188 write_c0_ebase(ebase);
2189 }
2190
2191 cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
2192 cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
2193 cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
2194 cp0_fdc_irq = (read_c0_intctl() >> INTCTLB_IPFDC) & 7;
2195 if (!cp0_fdc_irq)
2196 cp0_fdc_irq = -1;
2197
2198 } else {
2199 cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
2200 cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
2201 cp0_perfcount_irq = -1;
2202 cp0_fdc_irq = -1;
2203 }
2204
2205 if (!cpu_data[cpu].asid_cache)
2206 cpu_data[cpu].asid_cache = asid_first_version(cpu);
2207
2208 mmgrab(&init_mm);
2209 current->active_mm = &init_mm;
2210 BUG_ON(current->mm);
2211 enter_lazy_tlb(&init_mm, current);
2212
2213
2214 if (!is_boot_cpu)
2215 cpu_cache_init();
2216 tlb_init();
2217 TLBMISS_HANDLER_SETUP();
2218}
2219
2220
2221void set_handler(unsigned long offset, void *addr, unsigned long size)
2222{
2223#ifdef CONFIG_CPU_MICROMIPS
2224 memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
2225#else
2226 memcpy((void *)(ebase + offset), addr, size);
2227#endif
2228 local_flush_icache_range(ebase + offset, ebase + offset + size);
2229}
2230
2231static const char panic_null_cerr[] =
2232 "Trying to set NULL cache error exception handler\n";
2233
2234
2235
2236
2237
2238
2239void set_uncached_handler(unsigned long offset, void *addr,
2240 unsigned long size)
2241{
2242 unsigned long uncached_ebase = CKSEG1ADDR(ebase);
2243
2244 if (!addr)
2245 panic(panic_null_cerr);
2246
2247 memcpy((void *)(uncached_ebase + offset), addr, size);
2248}
2249
2250static int __initdata rdhwr_noopt;
2251static int __init set_rdhwr_noopt(char *str)
2252{
2253 rdhwr_noopt = 1;
2254 return 1;
2255}
2256
2257__setup("rdhwr_noopt", set_rdhwr_noopt);
2258
2259void __init trap_init(void)
2260{
2261 extern char except_vec3_generic;
2262 extern char except_vec4;
2263 extern char except_vec3_r4000;
2264 unsigned long i;
2265
2266 check_wait();
2267
2268 if (cpu_has_veic || cpu_has_vint) {
2269 unsigned long size = 0x200 + VECTORSPACING*64;
2270 phys_addr_t ebase_pa;
2271
2272 ebase = (unsigned long)
2273 __alloc_bootmem(size, 1 << fls(size), 0);
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286 ebase_pa = __pa(ebase);
2287 if (!IS_ENABLED(CONFIG_EVA) && !WARN_ON(ebase_pa >= 0x20000000))
2288 ebase = CKSEG0ADDR(ebase_pa);
2289 } else {
2290 ebase = CAC_BASE;
2291
2292 if (cpu_has_mips_r2_r6) {
2293 if (cpu_has_ebase_wg) {
2294#ifdef CONFIG_64BIT
2295 ebase = (read_c0_ebase_64() & ~0xfff);
2296#else
2297 ebase = (read_c0_ebase() & ~0xfff);
2298#endif
2299 } else {
2300 ebase += (read_c0_ebase() & 0x3ffff000);
2301 }
2302 }
2303 }
2304
2305 if (cpu_has_mmips) {
2306 unsigned int config3 = read_c0_config3();
2307
2308 if (IS_ENABLED(CONFIG_CPU_MICROMIPS))
2309 write_c0_config3(config3 | MIPS_CONF3_ISA_OE);
2310 else
2311 write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE);
2312 }
2313
2314 if (board_ebase_setup)
2315 board_ebase_setup();
2316 per_cpu_trap_init(true);
2317
2318
2319
2320
2321
2322
2323 set_handler(0x180, &except_vec3_generic, 0x80);
2324
2325
2326
2327
2328 for (i = 0; i <= 31; i++)
2329 set_except_vector(i, handle_reserved);
2330
2331
2332
2333
2334
2335 if (cpu_has_ejtag && board_ejtag_handler_setup)
2336 board_ejtag_handler_setup();
2337
2338
2339
2340
2341 if (cpu_has_watch)
2342 set_except_vector(EXCCODE_WATCH, handle_watch);
2343
2344
2345
2346
2347 if (cpu_has_veic || cpu_has_vint) {
2348 int nvec = cpu_has_veic ? 64 : 8;
2349 for (i = 0; i < nvec; i++)
2350 set_vi_handler(i, NULL);
2351 }
2352 else if (cpu_has_divec)
2353 set_handler(0x200, &except_vec4, 0x8);
2354
2355
2356
2357
2358
2359 parity_protection_init();
2360
2361
2362
2363
2364
2365
2366 if (board_be_init)
2367 board_be_init();
2368
2369 set_except_vector(EXCCODE_INT, using_rollback_handler() ?
2370 rollback_handle_int : handle_int);
2371 set_except_vector(EXCCODE_MOD, handle_tlbm);
2372 set_except_vector(EXCCODE_TLBL, handle_tlbl);
2373 set_except_vector(EXCCODE_TLBS, handle_tlbs);
2374
2375 set_except_vector(EXCCODE_ADEL, handle_adel);
2376 set_except_vector(EXCCODE_ADES, handle_ades);
2377
2378 set_except_vector(EXCCODE_IBE, handle_ibe);
2379 set_except_vector(EXCCODE_DBE, handle_dbe);
2380
2381 set_except_vector(EXCCODE_SYS, handle_sys);
2382 set_except_vector(EXCCODE_BP, handle_bp);
2383
2384 if (rdhwr_noopt)
2385 set_except_vector(EXCCODE_RI, handle_ri);
2386 else {
2387 if (cpu_has_vtag_icache)
2388 set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2389 else if (current_cpu_type() == CPU_LOONGSON3)
2390 set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2391 else
2392 set_except_vector(EXCCODE_RI, handle_ri_rdhwr);
2393 }
2394
2395 set_except_vector(EXCCODE_CPU, handle_cpu);
2396 set_except_vector(EXCCODE_OV, handle_ov);
2397 set_except_vector(EXCCODE_TR, handle_tr);
2398 set_except_vector(EXCCODE_MSAFPE, handle_msa_fpe);
2399
2400 if (board_nmi_handler_setup)
2401 board_nmi_handler_setup();
2402
2403 if (cpu_has_fpu && !cpu_has_nofpuex)
2404 set_except_vector(EXCCODE_FPE, handle_fpe);
2405
2406 set_except_vector(MIPS_EXCCODE_TLBPAR, handle_ftlb);
2407
2408 if (cpu_has_rixiex) {
2409 set_except_vector(EXCCODE_TLBRI, tlb_do_page_fault_0);
2410 set_except_vector(EXCCODE_TLBXI, tlb_do_page_fault_0);
2411 }
2412
2413 set_except_vector(EXCCODE_MSADIS, handle_msa);
2414 set_except_vector(EXCCODE_MDMX, handle_mdmx);
2415
2416 if (cpu_has_mcheck)
2417 set_except_vector(EXCCODE_MCHECK, handle_mcheck);
2418
2419 if (cpu_has_mipsmt)
2420 set_except_vector(EXCCODE_THREAD, handle_mt);
2421
2422 set_except_vector(EXCCODE_DSPDIS, handle_dsp);
2423
2424 if (board_cache_error_setup)
2425 board_cache_error_setup();
2426
2427 if (cpu_has_vce)
2428
2429 set_handler(0x180, &except_vec3_r4000, 0x100);
2430 else if (cpu_has_4kex)
2431 set_handler(0x180, &except_vec3_generic, 0x80);
2432 else
2433 set_handler(0x080, &except_vec3_generic, 0x80);
2434
2435 local_flush_icache_range(ebase, ebase + 0x400);
2436
2437 sort_extable(__start___dbe_table, __stop___dbe_table);
2438
2439 cu2_notifier(default_cu2_call, 0x80000000);
2440}
2441
2442static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd,
2443 void *v)
2444{
2445 switch (cmd) {
2446 case CPU_PM_ENTER_FAILED:
2447 case CPU_PM_EXIT:
2448 configure_status();
2449 configure_hwrena();
2450 configure_exception_vector();
2451
2452
2453 TLBMISS_HANDLER_RESTORE();
2454
2455 break;
2456 }
2457
2458 return NOTIFY_OK;
2459}
2460
2461static struct notifier_block trap_pm_notifier_block = {
2462 .notifier_call = trap_pm_notifier,
2463};
2464
2465static int __init trap_pm_init(void)
2466{
2467 return cpu_pm_register_notifier(&trap_pm_notifier_block);
2468}
2469arch_initcall(trap_pm_init);
2470