linux/arch/mips/kernel/traps.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
   7 * Copyright (C) 1995, 1996 Paul M. Antoine
   8 * Copyright (C) 1998 Ulf Carlsson
   9 * Copyright (C) 1999 Silicon Graphics, Inc.
  10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
  11 * Copyright (C) 2002, 2003, 2004, 2005, 2007  Maciej W. Rozycki
  12 * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc.  All rights reserved.
  13 */
  14#include <linux/bug.h>
  15#include <linux/compiler.h>
  16#include <linux/context_tracking.h>
  17#include <linux/kexec.h>
  18#include <linux/init.h>
  19#include <linux/kernel.h>
  20#include <linux/module.h>
  21#include <linux/mm.h>
  22#include <linux/sched.h>
  23#include <linux/smp.h>
  24#include <linux/spinlock.h>
  25#include <linux/kallsyms.h>
  26#include <linux/bootmem.h>
  27#include <linux/interrupt.h>
  28#include <linux/ptrace.h>
  29#include <linux/kgdb.h>
  30#include <linux/kdebug.h>
  31#include <linux/kprobes.h>
  32#include <linux/notifier.h>
  33#include <linux/kdb.h>
  34#include <linux/irq.h>
  35#include <linux/perf_event.h>
  36
  37#include <asm/bootinfo.h>
  38#include <asm/branch.h>
  39#include <asm/break.h>
  40#include <asm/cop2.h>
  41#include <asm/cpu.h>
  42#include <asm/dsp.h>
  43#include <asm/fpu.h>
  44#include <asm/fpu_emulator.h>
  45#include <asm/idle.h>
  46#include <asm/mipsregs.h>
  47#include <asm/mipsmtregs.h>
  48#include <asm/module.h>
  49#include <asm/pgtable.h>
  50#include <asm/ptrace.h>
  51#include <asm/sections.h>
  52#include <asm/tlbdebug.h>
  53#include <asm/traps.h>
  54#include <asm/uaccess.h>
  55#include <asm/watch.h>
  56#include <asm/mmu_context.h>
  57#include <asm/types.h>
  58#include <asm/stacktrace.h>
  59#include <asm/uasm.h>
  60
  61extern void check_wait(void);
  62extern asmlinkage void rollback_handle_int(void);
  63extern asmlinkage void handle_int(void);
  64extern u32 handle_tlbl[];
  65extern u32 handle_tlbs[];
  66extern u32 handle_tlbm[];
  67extern asmlinkage void handle_adel(void);
  68extern asmlinkage void handle_ades(void);
  69extern asmlinkage void handle_ibe(void);
  70extern asmlinkage void handle_dbe(void);
  71extern asmlinkage void handle_sys(void);
  72extern asmlinkage void handle_bp(void);
  73extern asmlinkage void handle_ri(void);
  74extern asmlinkage void handle_ri_rdhwr_vivt(void);
  75extern asmlinkage void handle_ri_rdhwr(void);
  76extern asmlinkage void handle_cpu(void);
  77extern asmlinkage void handle_ov(void);
  78extern asmlinkage void handle_tr(void);
  79extern asmlinkage void handle_fpe(void);
  80extern asmlinkage void handle_mdmx(void);
  81extern asmlinkage void handle_watch(void);
  82extern asmlinkage void handle_mt(void);
  83extern asmlinkage void handle_dsp(void);
  84extern asmlinkage void handle_mcheck(void);
  85extern asmlinkage void handle_reserved(void);
  86
  87void (*board_be_init)(void);
  88int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
  89void (*board_nmi_handler_setup)(void);
  90void (*board_ejtag_handler_setup)(void);
  91void (*board_bind_eic_interrupt)(int irq, int regset);
  92void (*board_ebase_setup)(void);
  93void(*board_cache_error_setup)(void);
  94
  95static void show_raw_backtrace(unsigned long reg29)
  96{
  97        unsigned long *sp = (unsigned long *)(reg29 & ~3);
  98        unsigned long addr;
  99
 100        printk("Call Trace:");
 101#ifdef CONFIG_KALLSYMS
 102        printk("\n");
 103#endif
 104        while (!kstack_end(sp)) {
 105                unsigned long __user *p =
 106                        (unsigned long __user *)(unsigned long)sp++;
 107                if (__get_user(addr, p)) {
 108                        printk(" (Bad stack address)");
 109                        break;
 110                }
 111                if (__kernel_text_address(addr))
 112                        print_ip_sym(addr);
 113        }
 114        printk("\n");
 115}
 116
 117#ifdef CONFIG_KALLSYMS
 118int raw_show_trace;
 119static int __init set_raw_show_trace(char *str)
 120{
 121        raw_show_trace = 1;
 122        return 1;
 123}
 124__setup("raw_show_trace", set_raw_show_trace);
 125#endif
 126
 127static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
 128{
 129        unsigned long sp = regs->regs[29];
 130        unsigned long ra = regs->regs[31];
 131        unsigned long pc = regs->cp0_epc;
 132
 133        if (!task)
 134                task = current;
 135
 136        if (raw_show_trace || !__kernel_text_address(pc)) {
 137                show_raw_backtrace(sp);
 138                return;
 139        }
 140        printk("Call Trace:\n");
 141        do {
 142                print_ip_sym(pc);
 143                pc = unwind_stack(task, &sp, pc, &ra);
 144        } while (pc);
 145        printk("\n");
 146}
 147
 148/*
 149 * This routine abuses get_user()/put_user() to reference pointers
 150 * with at least a bit of error checking ...
 151 */
 152static void show_stacktrace(struct task_struct *task,
 153        const struct pt_regs *regs)
 154{
 155        const int field = 2 * sizeof(unsigned long);
 156        long stackdata;
 157        int i;
 158        unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
 159
 160        printk("Stack :");
 161        i = 0;
 162        while ((unsigned long) sp & (PAGE_SIZE - 1)) {
 163                if (i && ((i % (64 / field)) == 0))
 164                        printk("\n       ");
 165                if (i > 39) {
 166                        printk(" ...");
 167                        break;
 168                }
 169
 170                if (__get_user(stackdata, sp++)) {
 171                        printk(" (Bad stack address)");
 172                        break;
 173                }
 174
 175                printk(" %0*lx", field, stackdata);
 176                i++;
 177        }
 178        printk("\n");
 179        show_backtrace(task, regs);
 180}
 181
 182void show_stack(struct task_struct *task, unsigned long *sp)
 183{
 184        struct pt_regs regs;
 185        if (sp) {
 186                regs.regs[29] = (unsigned long)sp;
 187                regs.regs[31] = 0;
 188                regs.cp0_epc = 0;
 189        } else {
 190                if (task && task != current) {
 191                        regs.regs[29] = task->thread.reg29;
 192                        regs.regs[31] = 0;
 193                        regs.cp0_epc = task->thread.reg31;
 194#ifdef CONFIG_KGDB_KDB
 195                } else if (atomic_read(&kgdb_active) != -1 &&
 196                           kdb_current_regs) {
 197                        memcpy(&regs, kdb_current_regs, sizeof(regs));
 198#endif /* CONFIG_KGDB_KDB */
 199                } else {
 200                        prepare_frametrace(&regs);
 201                }
 202        }
 203        show_stacktrace(task, &regs);
 204}
 205
 206static void show_code(unsigned int __user *pc)
 207{
 208        long i;
 209        unsigned short __user *pc16 = NULL;
 210
 211        printk("\nCode:");
 212
 213        if ((unsigned long)pc & 1)
 214                pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
 215        for(i = -3 ; i < 6 ; i++) {
 216                unsigned int insn;
 217                if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
 218                        printk(" (Bad address in epc)\n");
 219                        break;
 220                }
 221                printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
 222        }
 223}
 224
 225static void __show_regs(const struct pt_regs *regs)
 226{
 227        const int field = 2 * sizeof(unsigned long);
 228        unsigned int cause = regs->cp0_cause;
 229        int i;
 230
 231        show_regs_print_info(KERN_DEFAULT);
 232
 233        /*
 234         * Saved main processor registers
 235         */
 236        for (i = 0; i < 32; ) {
 237                if ((i % 4) == 0)
 238                        printk("$%2d   :", i);
 239                if (i == 0)
 240                        printk(" %0*lx", field, 0UL);
 241                else if (i == 26 || i == 27)
 242                        printk(" %*s", field, "");
 243                else
 244                        printk(" %0*lx", field, regs->regs[i]);
 245
 246                i++;
 247                if ((i % 4) == 0)
 248                        printk("\n");
 249        }
 250
 251#ifdef CONFIG_CPU_HAS_SMARTMIPS
 252        printk("Acx    : %0*lx\n", field, regs->acx);
 253#endif
 254        printk("Hi    : %0*lx\n", field, regs->hi);
 255        printk("Lo    : %0*lx\n", field, regs->lo);
 256
 257        /*
 258         * Saved cp0 registers
 259         */
 260        printk("epc   : %0*lx %pS\n", field, regs->cp0_epc,
 261               (void *) regs->cp0_epc);
 262        printk("    %s\n", print_tainted());
 263        printk("ra    : %0*lx %pS\n", field, regs->regs[31],
 264               (void *) regs->regs[31]);
 265
 266        printk("Status: %08x    ", (uint32_t) regs->cp0_status);
 267
 268        if (cpu_has_3kex) {
 269                if (regs->cp0_status & ST0_KUO)
 270                        printk("KUo ");
 271                if (regs->cp0_status & ST0_IEO)
 272                        printk("IEo ");
 273                if (regs->cp0_status & ST0_KUP)
 274                        printk("KUp ");
 275                if (regs->cp0_status & ST0_IEP)
 276                        printk("IEp ");
 277                if (regs->cp0_status & ST0_KUC)
 278                        printk("KUc ");
 279                if (regs->cp0_status & ST0_IEC)
 280                        printk("IEc ");
 281        } else if (cpu_has_4kex) {
 282                if (regs->cp0_status & ST0_KX)
 283                        printk("KX ");
 284                if (regs->cp0_status & ST0_SX)
 285                        printk("SX ");
 286                if (regs->cp0_status & ST0_UX)
 287                        printk("UX ");
 288                switch (regs->cp0_status & ST0_KSU) {
 289                case KSU_USER:
 290                        printk("USER ");
 291                        break;
 292                case KSU_SUPERVISOR:
 293                        printk("SUPERVISOR ");
 294                        break;
 295                case KSU_KERNEL:
 296                        printk("KERNEL ");
 297                        break;
 298                default:
 299                        printk("BAD_MODE ");
 300                        break;
 301                }
 302                if (regs->cp0_status & ST0_ERL)
 303                        printk("ERL ");
 304                if (regs->cp0_status & ST0_EXL)
 305                        printk("EXL ");
 306                if (regs->cp0_status & ST0_IE)
 307                        printk("IE ");
 308        }
 309        printk("\n");
 310
 311        printk("Cause : %08x\n", cause);
 312
 313        cause = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
 314        if (1 <= cause && cause <= 5)
 315                printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
 316
 317        printk("PrId  : %08x (%s)\n", read_c0_prid(),
 318               cpu_name_string());
 319}
 320
 321/*
 322 * FIXME: really the generic show_regs should take a const pointer argument.
 323 */
 324void show_regs(struct pt_regs *regs)
 325{
 326        __show_regs((struct pt_regs *)regs);
 327}
 328
 329void show_registers(struct pt_regs *regs)
 330{
 331        const int field = 2 * sizeof(unsigned long);
 332
 333        __show_regs(regs);
 334        print_modules();
 335        printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
 336               current->comm, current->pid, current_thread_info(), current,
 337              field, current_thread_info()->tp_value);
 338        if (cpu_has_userlocal) {
 339                unsigned long tls;
 340
 341                tls = read_c0_userlocal();
 342                if (tls != current_thread_info()->tp_value)
 343                        printk("*HwTLS: %0*lx\n", field, tls);
 344        }
 345
 346        show_stacktrace(current, regs);
 347        show_code((unsigned int __user *) regs->cp0_epc);
 348        printk("\n");
 349}
 350
 351static int regs_to_trapnr(struct pt_regs *regs)
 352{
 353        return (regs->cp0_cause >> 2) & 0x1f;
 354}
 355
 356static DEFINE_RAW_SPINLOCK(die_lock);
 357
 358void __noreturn die(const char *str, struct pt_regs *regs)
 359{
 360        static int die_counter;
 361        int sig = SIGSEGV;
 362#ifdef CONFIG_MIPS_MT_SMTC
 363        unsigned long dvpret;
 364#endif /* CONFIG_MIPS_MT_SMTC */
 365
 366        oops_enter();
 367
 368        if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP)
 369                sig = 0;
 370
 371        console_verbose();
 372        raw_spin_lock_irq(&die_lock);
 373#ifdef CONFIG_MIPS_MT_SMTC
 374        dvpret = dvpe();
 375#endif /* CONFIG_MIPS_MT_SMTC */
 376        bust_spinlocks(1);
 377#ifdef CONFIG_MIPS_MT_SMTC
 378        mips_mt_regdump(dvpret);
 379#endif /* CONFIG_MIPS_MT_SMTC */
 380
 381        printk("%s[#%d]:\n", str, ++die_counter);
 382        show_registers(regs);
 383        add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
 384        raw_spin_unlock_irq(&die_lock);
 385
 386        oops_exit();
 387
 388        if (in_interrupt())
 389                panic("Fatal exception in interrupt");
 390
 391        if (panic_on_oops) {
 392                printk(KERN_EMERG "Fatal exception: panic in 5 seconds");
 393                ssleep(5);
 394                panic("Fatal exception");
 395        }
 396
 397        if (regs && kexec_should_crash(current))
 398                crash_kexec(regs);
 399
 400        do_exit(sig);
 401}
 402
 403extern struct exception_table_entry __start___dbe_table[];
 404extern struct exception_table_entry __stop___dbe_table[];
 405
 406__asm__(
 407"       .section        __dbe_table, \"a\"\n"
 408"       .previous                       \n");
 409
 410/* Given an address, look for it in the exception tables. */
 411static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
 412{
 413        const struct exception_table_entry *e;
 414
 415        e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr);
 416        if (!e)
 417                e = search_module_dbetables(addr);
 418        return e;
 419}
 420
 421asmlinkage void do_be(struct pt_regs *regs)
 422{
 423        const int field = 2 * sizeof(unsigned long);
 424        const struct exception_table_entry *fixup = NULL;
 425        int data = regs->cp0_cause & 4;
 426        int action = MIPS_BE_FATAL;
 427        enum ctx_state prev_state;
 428
 429        prev_state = exception_enter();
 430        /* XXX For now.  Fixme, this searches the wrong table ...  */
 431        if (data && !user_mode(regs))
 432                fixup = search_dbe_tables(exception_epc(regs));
 433
 434        if (fixup)
 435                action = MIPS_BE_FIXUP;
 436
 437        if (board_be_handler)
 438                action = board_be_handler(regs, fixup != NULL);
 439
 440        switch (action) {
 441        case MIPS_BE_DISCARD:
 442                goto out;
 443        case MIPS_BE_FIXUP:
 444                if (fixup) {
 445                        regs->cp0_epc = fixup->nextinsn;
 446                        goto out;
 447                }
 448                break;
 449        default:
 450                break;
 451        }
 452
 453        /*
 454         * Assume it would be too dangerous to continue ...
 455         */
 456        printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
 457               data ? "Data" : "Instruction",
 458               field, regs->cp0_epc, field, regs->regs[31]);
 459        if (notify_die(DIE_OOPS, "bus error", regs, 0, regs_to_trapnr(regs), SIGBUS)
 460            == NOTIFY_STOP)
 461                goto out;
 462
 463        die_if_kernel("Oops", regs);
 464        force_sig(SIGBUS, current);
 465
 466out:
 467        exception_exit(prev_state);
 468}
 469
 470/*
 471 * ll/sc, rdhwr, sync emulation
 472 */
 473
 474#define OPCODE 0xfc000000
 475#define BASE   0x03e00000
 476#define RT     0x001f0000
 477#define OFFSET 0x0000ffff
 478#define LL     0xc0000000
 479#define SC     0xe0000000
 480#define SPEC0  0x00000000
 481#define SPEC3  0x7c000000
 482#define RD     0x0000f800
 483#define FUNC   0x0000003f
 484#define SYNC   0x0000000f
 485#define RDHWR  0x0000003b
 486
 487/*  microMIPS definitions   */
 488#define MM_POOL32A_FUNC 0xfc00ffff
 489#define MM_RDHWR        0x00006b3c
 490#define MM_RS           0x001f0000
 491#define MM_RT           0x03e00000
 492
 493/*
 494 * The ll_bit is cleared by r*_switch.S
 495 */
 496
 497unsigned int ll_bit;
 498struct task_struct *ll_task;
 499
 500static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
 501{
 502        unsigned long value, __user *vaddr;
 503        long offset;
 504
 505        /*
 506         * analyse the ll instruction that just caused a ri exception
 507         * and put the referenced address to addr.
 508         */
 509
 510        /* sign extend offset */
 511        offset = opcode & OFFSET;
 512        offset <<= 16;
 513        offset >>= 16;
 514
 515        vaddr = (unsigned long __user *)
 516                ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
 517
 518        if ((unsigned long)vaddr & 3)
 519                return SIGBUS;
 520        if (get_user(value, vaddr))
 521                return SIGSEGV;
 522
 523        preempt_disable();
 524
 525        if (ll_task == NULL || ll_task == current) {
 526                ll_bit = 1;
 527        } else {
 528                ll_bit = 0;
 529        }
 530        ll_task = current;
 531
 532        preempt_enable();
 533
 534        regs->regs[(opcode & RT) >> 16] = value;
 535
 536        return 0;
 537}
 538
 539static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
 540{
 541        unsigned long __user *vaddr;
 542        unsigned long reg;
 543        long offset;
 544
 545        /*
 546         * analyse the sc instruction that just caused a ri exception
 547         * and put the referenced address to addr.
 548         */
 549
 550        /* sign extend offset */
 551        offset = opcode & OFFSET;
 552        offset <<= 16;
 553        offset >>= 16;
 554
 555        vaddr = (unsigned long __user *)
 556                ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
 557        reg = (opcode & RT) >> 16;
 558
 559        if ((unsigned long)vaddr & 3)
 560                return SIGBUS;
 561
 562        preempt_disable();
 563
 564        if (ll_bit == 0 || ll_task != current) {
 565                regs->regs[reg] = 0;
 566                preempt_enable();
 567                return 0;
 568        }
 569
 570        preempt_enable();
 571
 572        if (put_user(regs->regs[reg], vaddr))
 573                return SIGSEGV;
 574
 575        regs->regs[reg] = 1;
 576
 577        return 0;
 578}
 579
 580/*
 581 * ll uses the opcode of lwc0 and sc uses the opcode of swc0.  That is both
 582 * opcodes are supposed to result in coprocessor unusable exceptions if
 583 * executed on ll/sc-less processors.  That's the theory.  In practice a
 584 * few processors such as NEC's VR4100 throw reserved instruction exceptions
 585 * instead, so we're doing the emulation thing in both exception handlers.
 586 */
 587static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
 588{
 589        if ((opcode & OPCODE) == LL) {
 590                perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
 591                                1, regs, 0);
 592                return simulate_ll(regs, opcode);
 593        }
 594        if ((opcode & OPCODE) == SC) {
 595                perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
 596                                1, regs, 0);
 597                return simulate_sc(regs, opcode);
 598        }
 599
 600        return -1;                      /* Must be something else ... */
 601}
 602
 603/*
 604 * Simulate trapping 'rdhwr' instructions to provide user accessible
 605 * registers not implemented in hardware.
 606 */
 607static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
 608{
 609        struct thread_info *ti = task_thread_info(current);
 610
 611        perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
 612                        1, regs, 0);
 613        switch (rd) {
 614        case 0:         /* CPU number */
 615                regs->regs[rt] = smp_processor_id();
 616                return 0;
 617        case 1:         /* SYNCI length */
 618                regs->regs[rt] = min(current_cpu_data.dcache.linesz,
 619                                     current_cpu_data.icache.linesz);
 620                return 0;
 621        case 2:         /* Read count register */
 622                regs->regs[rt] = read_c0_count();
 623                return 0;
 624        case 3:         /* Count register resolution */
 625                switch (current_cpu_data.cputype) {
 626                case CPU_20KC:
 627                case CPU_25KF:
 628                        regs->regs[rt] = 1;
 629                        break;
 630                default:
 631                        regs->regs[rt] = 2;
 632                }
 633                return 0;
 634        case 29:
 635                regs->regs[rt] = ti->tp_value;
 636                return 0;
 637        default:
 638                return -1;
 639        }
 640}
 641
 642static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
 643{
 644        if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
 645                int rd = (opcode & RD) >> 11;
 646                int rt = (opcode & RT) >> 16;
 647
 648                simulate_rdhwr(regs, rd, rt);
 649                return 0;
 650        }
 651
 652        /* Not ours.  */
 653        return -1;
 654}
 655
 656static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned short opcode)
 657{
 658        if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
 659                int rd = (opcode & MM_RS) >> 16;
 660                int rt = (opcode & MM_RT) >> 21;
 661                simulate_rdhwr(regs, rd, rt);
 662                return 0;
 663        }
 664
 665        /* Not ours.  */
 666        return -1;
 667}
 668
 669static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
 670{
 671        if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
 672                perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
 673                                1, regs, 0);
 674                return 0;
 675        }
 676
 677        return -1;                      /* Must be something else ... */
 678}
 679
 680asmlinkage void do_ov(struct pt_regs *regs)
 681{
 682        enum ctx_state prev_state;
 683        siginfo_t info;
 684
 685        prev_state = exception_enter();
 686        die_if_kernel("Integer overflow", regs);
 687
 688        info.si_code = FPE_INTOVF;
 689        info.si_signo = SIGFPE;
 690        info.si_errno = 0;
 691        info.si_addr = (void __user *) regs->cp0_epc;
 692        force_sig_info(SIGFPE, &info, current);
 693        exception_exit(prev_state);
 694}
 695
 696int process_fpemu_return(int sig, void __user *fault_addr)
 697{
 698        if (sig == SIGSEGV || sig == SIGBUS) {
 699                struct siginfo si = {0};
 700                si.si_addr = fault_addr;
 701                si.si_signo = sig;
 702                if (sig == SIGSEGV) {
 703                        if (find_vma(current->mm, (unsigned long)fault_addr))
 704                                si.si_code = SEGV_ACCERR;
 705                        else
 706                                si.si_code = SEGV_MAPERR;
 707                } else {
 708                        si.si_code = BUS_ADRERR;
 709                }
 710                force_sig_info(sig, &si, current);
 711                return 1;
 712        } else if (sig) {
 713                force_sig(sig, current);
 714                return 1;
 715        } else {
 716                return 0;
 717        }
 718}
 719
 720/*
 721 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
 722 */
 723asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
 724{
 725        enum ctx_state prev_state;
 726        siginfo_t info = {0};
 727
 728        prev_state = exception_enter();
 729        if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs), SIGFPE)
 730            == NOTIFY_STOP)
 731                goto out;
 732        die_if_kernel("FP exception in kernel code", regs);
 733
 734        if (fcr31 & FPU_CSR_UNI_X) {
 735                int sig;
 736                void __user *fault_addr = NULL;
 737
 738                /*
 739                 * Unimplemented operation exception.  If we've got the full
 740                 * software emulator on-board, let's use it...
 741                 *
 742                 * Force FPU to dump state into task/thread context.  We're
 743                 * moving a lot of data here for what is probably a single
 744                 * instruction, but the alternative is to pre-decode the FP
 745                 * register operands before invoking the emulator, which seems
 746                 * a bit extreme for what should be an infrequent event.
 747                 */
 748                /* Ensure 'resume' not overwrite saved fp context again. */
 749                lose_fpu(1);
 750
 751                /* Run the emulator */
 752                sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
 753                                               &fault_addr);
 754
 755                /*
 756                 * We can't allow the emulated instruction to leave any of
 757                 * the cause bit set in $fcr31.
 758                 */
 759                current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
 760
 761                /* Restore the hardware register state */
 762                own_fpu(1);     /* Using the FPU again.  */
 763
 764                /* If something went wrong, signal */
 765                process_fpemu_return(sig, fault_addr);
 766
 767                goto out;
 768        } else if (fcr31 & FPU_CSR_INV_X)
 769                info.si_code = FPE_FLTINV;
 770        else if (fcr31 & FPU_CSR_DIV_X)
 771                info.si_code = FPE_FLTDIV;
 772        else if (fcr31 & FPU_CSR_OVF_X)
 773                info.si_code = FPE_FLTOVF;
 774        else if (fcr31 & FPU_CSR_UDF_X)
 775                info.si_code = FPE_FLTUND;
 776        else if (fcr31 & FPU_CSR_INE_X)
 777                info.si_code = FPE_FLTRES;
 778        else
 779                info.si_code = __SI_FAULT;
 780        info.si_signo = SIGFPE;
 781        info.si_errno = 0;
 782        info.si_addr = (void __user *) regs->cp0_epc;
 783        force_sig_info(SIGFPE, &info, current);
 784
 785out:
 786        exception_exit(prev_state);
 787}
 788
 789static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
 790        const char *str)
 791{
 792        siginfo_t info;
 793        char b[40];
 794
 795#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
 796        if (kgdb_ll_trap(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
 797                return;
 798#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
 799
 800        if (notify_die(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
 801                return;
 802
 803        /*
 804         * A short test says that IRIX 5.3 sends SIGTRAP for all trap
 805         * insns, even for trap and break codes that indicate arithmetic
 806         * failures.  Weird ...
 807         * But should we continue the brokenness???  --macro
 808         */
 809        switch (code) {
 810        case BRK_OVERFLOW:
 811        case BRK_DIVZERO:
 812                scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
 813                die_if_kernel(b, regs);
 814                if (code == BRK_DIVZERO)
 815                        info.si_code = FPE_INTDIV;
 816                else
 817                        info.si_code = FPE_INTOVF;
 818                info.si_signo = SIGFPE;
 819                info.si_errno = 0;
 820                info.si_addr = (void __user *) regs->cp0_epc;
 821                force_sig_info(SIGFPE, &info, current);
 822                break;
 823        case BRK_BUG:
 824                die_if_kernel("Kernel bug detected", regs);
 825                force_sig(SIGTRAP, current);
 826                break;
 827        case BRK_MEMU:
 828                /*
 829                 * Address errors may be deliberately induced by the FPU
 830                 * emulator to retake control of the CPU after executing the
 831                 * instruction in the delay slot of an emulated branch.
 832                 *
 833                 * Terminate if exception was recognized as a delay slot return
 834                 * otherwise handle as normal.
 835                 */
 836                if (do_dsemulret(regs))
 837                        return;
 838
 839                die_if_kernel("Math emu break/trap", regs);
 840                force_sig(SIGTRAP, current);
 841                break;
 842        default:
 843                scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
 844                die_if_kernel(b, regs);
 845                force_sig(SIGTRAP, current);
 846        }
 847}
 848
 849asmlinkage void do_bp(struct pt_regs *regs)
 850{
 851        unsigned int opcode, bcode;
 852        enum ctx_state prev_state;
 853        unsigned long epc;
 854        u16 instr[2];
 855
 856        prev_state = exception_enter();
 857        if (get_isa16_mode(regs->cp0_epc)) {
 858                /* Calculate EPC. */
 859                epc = exception_epc(regs);
 860                if (cpu_has_mmips) {
 861                        if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)) ||
 862                            (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2)))))
 863                                goto out_sigsegv;
 864                    opcode = (instr[0] << 16) | instr[1];
 865                } else {
 866                    /* MIPS16e mode */
 867                    if (__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)))
 868                                goto out_sigsegv;
 869                    bcode = (instr[0] >> 6) & 0x3f;
 870                    do_trap_or_bp(regs, bcode, "Break");
 871                    goto out;
 872                }
 873        } else {
 874                if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
 875                        goto out_sigsegv;
 876        }
 877
 878        /*
 879         * There is the ancient bug in the MIPS assemblers that the break
 880         * code starts left to bit 16 instead to bit 6 in the opcode.
 881         * Gas is bug-compatible, but not always, grrr...
 882         * We handle both cases with a simple heuristics.  --macro
 883         */
 884        bcode = ((opcode >> 6) & ((1 << 20) - 1));
 885        if (bcode >= (1 << 10))
 886                bcode >>= 10;
 887
 888        /*
 889         * notify the kprobe handlers, if instruction is likely to
 890         * pertain to them.
 891         */
 892        switch (bcode) {
 893        case BRK_KPROBE_BP:
 894                if (notify_die(DIE_BREAK, "debug", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
 895                        goto out;
 896                else
 897                        break;
 898        case BRK_KPROBE_SSTEPBP:
 899                if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
 900                        goto out;
 901                else
 902                        break;
 903        default:
 904                break;
 905        }
 906
 907        do_trap_or_bp(regs, bcode, "Break");
 908
 909out:
 910        exception_exit(prev_state);
 911        return;
 912
 913out_sigsegv:
 914        force_sig(SIGSEGV, current);
 915        goto out;
 916}
 917
 918asmlinkage void do_tr(struct pt_regs *regs)
 919{
 920        u32 opcode, tcode = 0;
 921        enum ctx_state prev_state;
 922        u16 instr[2];
 923        unsigned long epc = msk_isa16_mode(exception_epc(regs));
 924
 925        prev_state = exception_enter();
 926        if (get_isa16_mode(regs->cp0_epc)) {
 927                if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
 928                    __get_user(instr[1], (u16 __user *)(epc + 2)))
 929                        goto out_sigsegv;
 930                opcode = (instr[0] << 16) | instr[1];
 931                /* Immediate versions don't provide a code.  */
 932                if (!(opcode & OPCODE))
 933                        tcode = (opcode >> 12) & ((1 << 4) - 1);
 934        } else {
 935                if (__get_user(opcode, (u32 __user *)epc))
 936                        goto out_sigsegv;
 937                /* Immediate versions don't provide a code.  */
 938                if (!(opcode & OPCODE))
 939                        tcode = (opcode >> 6) & ((1 << 10) - 1);
 940        }
 941
 942        do_trap_or_bp(regs, tcode, "Trap");
 943
 944out:
 945        exception_exit(prev_state);
 946        return;
 947
 948out_sigsegv:
 949        force_sig(SIGSEGV, current);
 950        goto out;
 951}
 952
 953asmlinkage void do_ri(struct pt_regs *regs)
 954{
 955        unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
 956        unsigned long old_epc = regs->cp0_epc;
 957        unsigned long old31 = regs->regs[31];
 958        enum ctx_state prev_state;
 959        unsigned int opcode = 0;
 960        int status = -1;
 961
 962        prev_state = exception_enter();
 963        if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs), SIGILL)
 964            == NOTIFY_STOP)
 965                goto out;
 966
 967        die_if_kernel("Reserved instruction in kernel code", regs);
 968
 969        if (unlikely(compute_return_epc(regs) < 0))
 970                goto out;
 971
 972        if (get_isa16_mode(regs->cp0_epc)) {
 973                unsigned short mmop[2] = { 0 };
 974
 975                if (unlikely(get_user(mmop[0], epc) < 0))
 976                        status = SIGSEGV;
 977                if (unlikely(get_user(mmop[1], epc) < 0))
 978                        status = SIGSEGV;
 979                opcode = (mmop[0] << 16) | mmop[1];
 980
 981                if (status < 0)
 982                        status = simulate_rdhwr_mm(regs, opcode);
 983        } else {
 984                if (unlikely(get_user(opcode, epc) < 0))
 985                        status = SIGSEGV;
 986
 987                if (!cpu_has_llsc && status < 0)
 988                        status = simulate_llsc(regs, opcode);
 989
 990                if (status < 0)
 991                        status = simulate_rdhwr_normal(regs, opcode);
 992
 993                if (status < 0)
 994                        status = simulate_sync(regs, opcode);
 995        }
 996
 997        if (status < 0)
 998                status = SIGILL;
 999
1000        if (unlikely(status > 0)) {
1001                regs->cp0_epc = old_epc;                /* Undo skip-over.  */
1002                regs->regs[31] = old31;
1003                force_sig(status, current);
1004        }
1005
1006out:
1007        exception_exit(prev_state);
1008}
1009
1010/*
1011 * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
1012 * emulated more than some threshold number of instructions, force migration to
1013 * a "CPU" that has FP support.
1014 */
1015static void mt_ase_fp_affinity(void)
1016{
1017#ifdef CONFIG_MIPS_MT_FPAFF
1018        if (mt_fpemul_threshold > 0 &&
1019             ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
1020                /*
1021                 * If there's no FPU present, or if the application has already
1022                 * restricted the allowed set to exclude any CPUs with FPUs,
1023                 * we'll skip the procedure.
1024                 */
1025                if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) {
1026                        cpumask_t tmask;
1027
1028                        current->thread.user_cpus_allowed
1029                                = current->cpus_allowed;
1030                        cpus_and(tmask, current->cpus_allowed,
1031                                mt_fpu_cpumask);
1032                        set_cpus_allowed_ptr(current, &tmask);
1033                        set_thread_flag(TIF_FPUBOUND);
1034                }
1035        }
1036#endif /* CONFIG_MIPS_MT_FPAFF */
1037}
1038
1039/*
1040 * No lock; only written during early bootup by CPU 0.
1041 */
1042static RAW_NOTIFIER_HEAD(cu2_chain);
1043
1044int __ref register_cu2_notifier(struct notifier_block *nb)
1045{
1046        return raw_notifier_chain_register(&cu2_chain, nb);
1047}
1048
1049int cu2_notifier_call_chain(unsigned long val, void *v)
1050{
1051        return raw_notifier_call_chain(&cu2_chain, val, v);
1052}
1053
1054static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
1055        void *data)
1056{
1057        struct pt_regs *regs = data;
1058
1059        die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
1060                              "instruction", regs);
1061        force_sig(SIGILL, current);
1062
1063        return NOTIFY_OK;
1064}
1065
1066asmlinkage void do_cpu(struct pt_regs *regs)
1067{
1068        enum ctx_state prev_state;
1069        unsigned int __user *epc;
1070        unsigned long old_epc, old31;
1071        unsigned int opcode;
1072        unsigned int cpid;
1073        int status;
1074        unsigned long __maybe_unused flags;
1075
1076        prev_state = exception_enter();
1077        cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
1078
1079        if (cpid != 2)
1080                die_if_kernel("do_cpu invoked from kernel context!", regs);
1081
1082        switch (cpid) {
1083        case 0:
1084                epc = (unsigned int __user *)exception_epc(regs);
1085                old_epc = regs->cp0_epc;
1086                old31 = regs->regs[31];
1087                opcode = 0;
1088                status = -1;
1089
1090                if (unlikely(compute_return_epc(regs) < 0))
1091                        goto out;
1092
1093                if (get_isa16_mode(regs->cp0_epc)) {
1094                        unsigned short mmop[2] = { 0 };
1095
1096                        if (unlikely(get_user(mmop[0], epc) < 0))
1097                                status = SIGSEGV;
1098                        if (unlikely(get_user(mmop[1], epc) < 0))
1099                                status = SIGSEGV;
1100                        opcode = (mmop[0] << 16) | mmop[1];
1101
1102                        if (status < 0)
1103                                status = simulate_rdhwr_mm(regs, opcode);
1104                } else {
1105                        if (unlikely(get_user(opcode, epc) < 0))
1106                                status = SIGSEGV;
1107
1108                        if (!cpu_has_llsc && status < 0)
1109                                status = simulate_llsc(regs, opcode);
1110
1111                        if (status < 0)
1112                                status = simulate_rdhwr_normal(regs, opcode);
1113                }
1114
1115                if (status < 0)
1116                        status = SIGILL;
1117
1118                if (unlikely(status > 0)) {
1119                        regs->cp0_epc = old_epc;        /* Undo skip-over.  */
1120                        regs->regs[31] = old31;
1121                        force_sig(status, current);
1122                }
1123
1124                goto out;
1125
1126        case 3:
1127                /*
1128                 * Old (MIPS I and MIPS II) processors will set this code
1129                 * for COP1X opcode instructions that replaced the original
1130                 * COP3 space.  We don't limit COP1 space instructions in
1131                 * the emulator according to the CPU ISA, so we want to
1132                 * treat COP1X instructions consistently regardless of which
1133                 * code the CPU chose.  Therefore we redirect this trap to
1134                 * the FP emulator too.
1135                 *
1136                 * Then some newer FPU-less processors use this code
1137                 * erroneously too, so they are covered by this choice
1138                 * as well.
1139                 */
1140                if (raw_cpu_has_fpu)
1141                        break;
1142                /* Fall through.  */
1143
1144        case 1:
1145                if (used_math())        /* Using the FPU again.  */
1146                        own_fpu(1);
1147                else {                  /* First time FPU user.  */
1148                        init_fpu();
1149                        set_used_math();
1150                }
1151
1152                if (!raw_cpu_has_fpu) {
1153                        int sig;
1154                        void __user *fault_addr = NULL;
1155                        sig = fpu_emulator_cop1Handler(regs,
1156                                                       &current->thread.fpu,
1157                                                       0, &fault_addr);
1158                        if (!process_fpemu_return(sig, fault_addr))
1159                                mt_ase_fp_affinity();
1160                }
1161
1162                goto out;
1163
1164        case 2:
1165                raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
1166                goto out;
1167        }
1168
1169        force_sig(SIGILL, current);
1170
1171out:
1172        exception_exit(prev_state);
1173}
1174
1175asmlinkage void do_mdmx(struct pt_regs *regs)
1176{
1177        enum ctx_state prev_state;
1178
1179        prev_state = exception_enter();
1180        force_sig(SIGILL, current);
1181        exception_exit(prev_state);
1182}
1183
1184/*
1185 * Called with interrupts disabled.
1186 */
1187asmlinkage void do_watch(struct pt_regs *regs)
1188{
1189        enum ctx_state prev_state;
1190        u32 cause;
1191
1192        prev_state = exception_enter();
1193        /*
1194         * Clear WP (bit 22) bit of cause register so we don't loop
1195         * forever.
1196         */
1197        cause = read_c0_cause();
1198        cause &= ~(1 << 22);
1199        write_c0_cause(cause);
1200
1201        /*
1202         * If the current thread has the watch registers loaded, save
1203         * their values and send SIGTRAP.  Otherwise another thread
1204         * left the registers set, clear them and continue.
1205         */
1206        if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
1207                mips_read_watch_registers();
1208                local_irq_enable();
1209                force_sig(SIGTRAP, current);
1210        } else {
1211                mips_clear_watch_registers();
1212                local_irq_enable();
1213        }
1214        exception_exit(prev_state);
1215}
1216
1217asmlinkage void do_mcheck(struct pt_regs *regs)
1218{
1219        const int field = 2 * sizeof(unsigned long);
1220        int multi_match = regs->cp0_status & ST0_TS;
1221        enum ctx_state prev_state;
1222
1223        prev_state = exception_enter();
1224        show_regs(regs);
1225
1226        if (multi_match) {
1227                printk("Index   : %0x\n", read_c0_index());
1228                printk("Pagemask: %0x\n", read_c0_pagemask());
1229                printk("EntryHi : %0*lx\n", field, read_c0_entryhi());
1230                printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
1231                printk("EntryLo1: %0*lx\n", field, read_c0_entrylo1());
1232                printk("\n");
1233                dump_tlb_all();
1234        }
1235
1236        show_code((unsigned int __user *) regs->cp0_epc);
1237
1238        /*
1239         * Some chips may have other causes of machine check (e.g. SB1
1240         * graduation timer)
1241         */
1242        panic("Caught Machine Check exception - %scaused by multiple "
1243              "matching entries in the TLB.",
1244              (multi_match) ? "" : "not ");
1245}
1246
1247asmlinkage void do_mt(struct pt_regs *regs)
1248{
1249        int subcode;
1250
1251        subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
1252                        >> VPECONTROL_EXCPT_SHIFT;
1253        switch (subcode) {
1254        case 0:
1255                printk(KERN_DEBUG "Thread Underflow\n");
1256                break;
1257        case 1:
1258                printk(KERN_DEBUG "Thread Overflow\n");
1259                break;
1260        case 2:
1261                printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
1262                break;
1263        case 3:
1264                printk(KERN_DEBUG "Gating Storage Exception\n");
1265                break;
1266        case 4:
1267                printk(KERN_DEBUG "YIELD Scheduler Exception\n");
1268                break;
1269        case 5:
1270                printk(KERN_DEBUG "Gating Storage Scheduler Exception\n");
1271                break;
1272        default:
1273                printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
1274                        subcode);
1275                break;
1276        }
1277        die_if_kernel("MIPS MT Thread exception in kernel", regs);
1278
1279        force_sig(SIGILL, current);
1280}
1281
1282
1283asmlinkage void do_dsp(struct pt_regs *regs)
1284{
1285        if (cpu_has_dsp)
1286                panic("Unexpected DSP exception");
1287
1288        force_sig(SIGILL, current);
1289}
1290
1291asmlinkage void do_reserved(struct pt_regs *regs)
1292{
1293        /*
1294         * Game over - no way to handle this if it ever occurs.  Most probably
1295         * caused by a new unknown cpu type or after another deadly
1296         * hard/software error.
1297         */
1298        show_regs(regs);
1299        panic("Caught reserved exception %ld - should not happen.",
1300              (regs->cp0_cause & 0x7f) >> 2);
1301}
1302
1303static int __initdata l1parity = 1;
1304static int __init nol1parity(char *s)
1305{
1306        l1parity = 0;
1307        return 1;
1308}
1309__setup("nol1par", nol1parity);
1310static int __initdata l2parity = 1;
1311static int __init nol2parity(char *s)
1312{
1313        l2parity = 0;
1314        return 1;
1315}
1316__setup("nol2par", nol2parity);
1317
1318/*
1319 * Some MIPS CPUs can enable/disable for cache parity detection, but do
1320 * it different ways.
1321 */
1322static inline void parity_protection_init(void)
1323{
1324        switch (current_cpu_type()) {
1325        case CPU_24K:
1326        case CPU_34K:
1327        case CPU_74K:
1328        case CPU_1004K:
1329                {
1330#define ERRCTL_PE       0x80000000
1331#define ERRCTL_L2P      0x00800000
1332                        unsigned long errctl;
1333                        unsigned int l1parity_present, l2parity_present;
1334
1335                        errctl = read_c0_ecc();
1336                        errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
1337
1338                        /* probe L1 parity support */
1339                        write_c0_ecc(errctl | ERRCTL_PE);
1340                        back_to_back_c0_hazard();
1341                        l1parity_present = (read_c0_ecc() & ERRCTL_PE);
1342
1343                        /* probe L2 parity support */
1344                        write_c0_ecc(errctl|ERRCTL_L2P);
1345                        back_to_back_c0_hazard();
1346                        l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
1347
1348                        if (l1parity_present && l2parity_present) {
1349                                if (l1parity)
1350                                        errctl |= ERRCTL_PE;
1351                                if (l1parity ^ l2parity)
1352                                        errctl |= ERRCTL_L2P;
1353                        } else if (l1parity_present) {
1354                                if (l1parity)
1355                                        errctl |= ERRCTL_PE;
1356                        } else if (l2parity_present) {
1357                                if (l2parity)
1358                                        errctl |= ERRCTL_L2P;
1359                        } else {
1360                                /* No parity available */
1361                        }
1362
1363                        printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
1364
1365                        write_c0_ecc(errctl);
1366                        back_to_back_c0_hazard();
1367                        errctl = read_c0_ecc();
1368                        printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
1369
1370                        if (l1parity_present)
1371                                printk(KERN_INFO "Cache parity protection %sabled\n",
1372                                       (errctl & ERRCTL_PE) ? "en" : "dis");
1373
1374                        if (l2parity_present) {
1375                                if (l1parity_present && l1parity)
1376                                        errctl ^= ERRCTL_L2P;
1377                                printk(KERN_INFO "L2 cache parity protection %sabled\n",
1378                                       (errctl & ERRCTL_L2P) ? "en" : "dis");
1379                        }
1380                }
1381                break;
1382
1383        case CPU_5KC:
1384        case CPU_5KE:
1385        case CPU_LOONGSON1:
1386                write_c0_ecc(0x80000000);
1387                back_to_back_c0_hazard();
1388                /* Set the PE bit (bit 31) in the c0_errctl register. */
1389                printk(KERN_INFO "Cache parity protection %sabled\n",
1390                       (read_c0_ecc() & 0x80000000) ? "en" : "dis");
1391                break;
1392        case CPU_20KC:
1393        case CPU_25KF:
1394                /* Clear the DE bit (bit 16) in the c0_status register. */
1395                printk(KERN_INFO "Enable cache parity protection for "
1396                       "MIPS 20KC/25KF CPUs.\n");
1397                clear_c0_status(ST0_DE);
1398                break;
1399        default:
1400                break;
1401        }
1402}
1403
1404asmlinkage void cache_parity_error(void)
1405{
1406        const int field = 2 * sizeof(unsigned long);
1407        unsigned int reg_val;
1408
1409        /* For the moment, report the problem and hang. */
1410        printk("Cache error exception:\n");
1411        printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1412        reg_val = read_c0_cacheerr();
1413        printk("c0_cacheerr == %08x\n", reg_val);
1414
1415        printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1416               reg_val & (1<<30) ? "secondary" : "primary",
1417               reg_val & (1<<31) ? "data" : "insn");
1418        printk("Error bits: %s%s%s%s%s%s%s\n",
1419               reg_val & (1<<29) ? "ED " : "",
1420               reg_val & (1<<28) ? "ET " : "",
1421               reg_val & (1<<26) ? "EE " : "",
1422               reg_val & (1<<25) ? "EB " : "",
1423               reg_val & (1<<24) ? "EI " : "",
1424               reg_val & (1<<23) ? "E1 " : "",
1425               reg_val & (1<<22) ? "E0 " : "");
1426        printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
1427
1428#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1429        if (reg_val & (1<<22))
1430                printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
1431
1432        if (reg_val & (1<<23))
1433                printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
1434#endif
1435
1436        panic("Can't handle the cache error!");
1437}
1438
1439/*
1440 * SDBBP EJTAG debug exception handler.
1441 * We skip the instruction and return to the next instruction.
1442 */
1443void ejtag_exception_handler(struct pt_regs *regs)
1444{
1445        const int field = 2 * sizeof(unsigned long);
1446        unsigned long depc, old_epc, old_ra;
1447        unsigned int debug;
1448
1449        printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
1450        depc = read_c0_depc();
1451        debug = read_c0_debug();
1452        printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
1453        if (debug & 0x80000000) {
1454                /*
1455                 * In branch delay slot.
1456                 * We cheat a little bit here and use EPC to calculate the
1457                 * debug return address (DEPC). EPC is restored after the
1458                 * calculation.
1459                 */
1460                old_epc = regs->cp0_epc;
1461                old_ra = regs->regs[31];
1462                regs->cp0_epc = depc;
1463                compute_return_epc(regs);
1464                depc = regs->cp0_epc;
1465                regs->cp0_epc = old_epc;
1466                regs->regs[31] = old_ra;
1467        } else
1468                depc += 4;
1469        write_c0_depc(depc);
1470
1471#if 0
1472        printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
1473        write_c0_debug(debug | 0x100);
1474#endif
1475}
1476
1477/*
1478 * NMI exception handler.
1479 * No lock; only written during early bootup by CPU 0.
1480 */
1481static RAW_NOTIFIER_HEAD(nmi_chain);
1482
1483int register_nmi_notifier(struct notifier_block *nb)
1484{
1485        return raw_notifier_chain_register(&nmi_chain, nb);
1486}
1487
1488void __noreturn nmi_exception_handler(struct pt_regs *regs)
1489{
1490        raw_notifier_call_chain(&nmi_chain, 0, regs);
1491        bust_spinlocks(1);
1492        printk("NMI taken!!!!\n");
1493        die("NMI", regs);
1494}
1495
1496#define VECTORSPACING 0x100     /* for EI/VI mode */
1497
1498unsigned long ebase;
1499unsigned long exception_handlers[32];
1500unsigned long vi_handlers[64];
1501
1502void __init *set_except_vector(int n, void *addr)
1503{
1504        unsigned long handler = (unsigned long) addr;
1505        unsigned long old_handler;
1506
1507#ifdef CONFIG_CPU_MICROMIPS
1508        /*
1509         * Only the TLB handlers are cache aligned with an even
1510         * address. All other handlers are on an odd address and
1511         * require no modification. Otherwise, MIPS32 mode will
1512         * be entered when handling any TLB exceptions. That
1513         * would be bad...since we must stay in microMIPS mode.
1514         */
1515        if (!(handler & 0x1))
1516                handler |= 1;
1517#endif
1518        old_handler = xchg(&exception_handlers[n], handler);
1519
1520        if (n == 0 && cpu_has_divec) {
1521#ifdef CONFIG_CPU_MICROMIPS
1522                unsigned long jump_mask = ~((1 << 27) - 1);
1523#else
1524                unsigned long jump_mask = ~((1 << 28) - 1);
1525#endif
1526                u32 *buf = (u32 *)(ebase + 0x200);
1527                unsigned int k0 = 26;
1528                if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
1529                        uasm_i_j(&buf, handler & ~jump_mask);
1530                        uasm_i_nop(&buf);
1531                } else {
1532                        UASM_i_LA(&buf, k0, handler);
1533                        uasm_i_jr(&buf, k0);
1534                        uasm_i_nop(&buf);
1535                }
1536                local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
1537        }
1538        return (void *)old_handler;
1539}
1540
1541static void do_default_vi(void)
1542{
1543        show_regs(get_irq_regs());
1544        panic("Caught unexpected vectored interrupt.");
1545}
1546
1547static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1548{
1549        unsigned long handler;
1550        unsigned long old_handler = vi_handlers[n];
1551        int srssets = current_cpu_data.srsets;
1552        u16 *h;
1553        unsigned char *b;
1554
1555        BUG_ON(!cpu_has_veic && !cpu_has_vint);
1556        BUG_ON((n < 0) && (n > 9));
1557
1558        if (addr == NULL) {
1559                handler = (unsigned long) do_default_vi;
1560                srs = 0;
1561        } else
1562                handler = (unsigned long) addr;
1563        vi_handlers[n] = handler;
1564
1565        b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
1566
1567        if (srs >= srssets)
1568                panic("Shadow register set %d not supported", srs);
1569
1570        if (cpu_has_veic) {
1571                if (board_bind_eic_interrupt)
1572                        board_bind_eic_interrupt(n, srs);
1573        } else if (cpu_has_vint) {
1574                /* SRSMap is only defined if shadow sets are implemented */
1575                if (srssets > 1)
1576                        change_c0_srsmap(0xf << n*4, srs << n*4);
1577        }
1578
1579        if (srs == 0) {
1580                /*
1581                 * If no shadow set is selected then use the default handler
1582                 * that does normal register saving and standard interrupt exit
1583                 */
1584                extern char except_vec_vi, except_vec_vi_lui;
1585                extern char except_vec_vi_ori, except_vec_vi_end;
1586                extern char rollback_except_vec_vi;
1587                char *vec_start = using_rollback_handler() ?
1588                        &rollback_except_vec_vi : &except_vec_vi;
1589#ifdef CONFIG_MIPS_MT_SMTC
1590                /*
1591                 * We need to provide the SMTC vectored interrupt handler
1592                 * not only with the address of the handler, but with the
1593                 * Status.IM bit to be masked before going there.
1594                 */
1595                extern char except_vec_vi_mori;
1596#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
1597                const int mori_offset = &except_vec_vi_mori - vec_start + 2;
1598#else
1599                const int mori_offset = &except_vec_vi_mori - vec_start;
1600#endif
1601#endif /* CONFIG_MIPS_MT_SMTC */
1602#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
1603                const int lui_offset = &except_vec_vi_lui - vec_start + 2;
1604                const int ori_offset = &except_vec_vi_ori - vec_start + 2;
1605#else
1606                const int lui_offset = &except_vec_vi_lui - vec_start;
1607                const int ori_offset = &except_vec_vi_ori - vec_start;
1608#endif
1609                const int handler_len = &except_vec_vi_end - vec_start;
1610
1611                if (handler_len > VECTORSPACING) {
1612                        /*
1613                         * Sigh... panicing won't help as the console
1614                         * is probably not configured :(
1615                         */
1616                        panic("VECTORSPACING too small");
1617                }
1618
1619                set_handler(((unsigned long)b - ebase), vec_start,
1620#ifdef CONFIG_CPU_MICROMIPS
1621                                (handler_len - 1));
1622#else
1623                                handler_len);
1624#endif
1625#ifdef CONFIG_MIPS_MT_SMTC
1626                BUG_ON(n > 7);  /* Vector index %d exceeds SMTC maximum. */
1627
1628                h = (u16 *)(b + mori_offset);
1629                *h = (0x100 << n);
1630#endif /* CONFIG_MIPS_MT_SMTC */
1631                h = (u16 *)(b + lui_offset);
1632                *h = (handler >> 16) & 0xffff;
1633                h = (u16 *)(b + ori_offset);
1634                *h = (handler & 0xffff);
1635                local_flush_icache_range((unsigned long)b,
1636                                         (unsigned long)(b+handler_len));
1637        }
1638        else {
1639                /*
1640                 * In other cases jump directly to the interrupt handler. It
1641                 * is the handler's responsibility to save registers if required
1642                 * (eg hi/lo) and return from the exception using "eret".
1643                 */
1644                u32 insn;
1645
1646                h = (u16 *)b;
1647                /* j handler */
1648#ifdef CONFIG_CPU_MICROMIPS
1649                insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
1650#else
1651                insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
1652#endif
1653                h[0] = (insn >> 16) & 0xffff;
1654                h[1] = insn & 0xffff;
1655                h[2] = 0;
1656                h[3] = 0;
1657                local_flush_icache_range((unsigned long)b,
1658                                         (unsigned long)(b+8));
1659        }
1660
1661        return (void *)old_handler;
1662}
1663
1664void *set_vi_handler(int n, vi_handler_t addr)
1665{
1666        return set_vi_srs_handler(n, addr, 0);
1667}
1668
1669extern void tlb_init(void);
1670
1671/*
1672 * Timer interrupt
1673 */
1674int cp0_compare_irq;
1675EXPORT_SYMBOL_GPL(cp0_compare_irq);
1676int cp0_compare_irq_shift;
1677
1678/*
1679 * Performance counter IRQ or -1 if shared with timer
1680 */
1681int cp0_perfcount_irq;
1682EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
1683
1684static int noulri;
1685
1686static int __init ulri_disable(char *s)
1687{
1688        pr_info("Disabling ulri\n");
1689        noulri = 1;
1690
1691        return 1;
1692}
1693__setup("noulri", ulri_disable);
1694
1695void per_cpu_trap_init(bool is_boot_cpu)
1696{
1697        unsigned int cpu = smp_processor_id();
1698        unsigned int status_set = ST0_CU0;
1699        unsigned int hwrena = cpu_hwrena_impl_bits;
1700#ifdef CONFIG_MIPS_MT_SMTC
1701        int secondaryTC = 0;
1702        int bootTC = (cpu == 0);
1703
1704        /*
1705         * Only do per_cpu_trap_init() for first TC of Each VPE.
1706         * Note that this hack assumes that the SMTC init code
1707         * assigns TCs consecutively and in ascending order.
1708         */
1709
1710        if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
1711            ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id))
1712                secondaryTC = 1;
1713#endif /* CONFIG_MIPS_MT_SMTC */
1714
1715        /*
1716         * Disable coprocessors and select 32-bit or 64-bit addressing
1717         * and the 16/32 or 32/32 FPR register model.  Reset the BEV
1718         * flag that some firmware may have left set and the TS bit (for
1719         * IP27).  Set XX for ISA IV code to work.
1720         */
1721#ifdef CONFIG_64BIT
1722        status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
1723#endif
1724        if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
1725                status_set |= ST0_XX;
1726        if (cpu_has_dsp)
1727                status_set |= ST0_MX;
1728
1729        change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
1730                         status_set);
1731
1732        if (cpu_has_mips_r2)
1733                hwrena |= 0x0000000f;
1734
1735        if (!noulri && cpu_has_userlocal)
1736                hwrena |= (1 << 29);
1737
1738        if (hwrena)
1739                write_c0_hwrena(hwrena);
1740
1741#ifdef CONFIG_MIPS_MT_SMTC
1742        if (!secondaryTC) {
1743#endif /* CONFIG_MIPS_MT_SMTC */
1744
1745        if (cpu_has_veic || cpu_has_vint) {
1746                unsigned long sr = set_c0_status(ST0_BEV);
1747                write_c0_ebase(ebase);
1748                write_c0_status(sr);
1749                /* Setting vector spacing enables EI/VI mode  */
1750                change_c0_intctl(0x3e0, VECTORSPACING);
1751        }
1752        if (cpu_has_divec) {
1753                if (cpu_has_mipsmt) {
1754                        unsigned int vpflags = dvpe();
1755                        set_c0_cause(CAUSEF_IV);
1756                        evpe(vpflags);
1757                } else
1758                        set_c0_cause(CAUSEF_IV);
1759        }
1760
1761        /*
1762         * Before R2 both interrupt numbers were fixed to 7, so on R2 only:
1763         *
1764         *  o read IntCtl.IPTI to determine the timer interrupt
1765         *  o read IntCtl.IPPCI to determine the performance counter interrupt
1766         */
1767        if (cpu_has_mips_r2) {
1768                cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
1769                cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
1770                cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
1771                if (cp0_perfcount_irq == cp0_compare_irq)
1772                        cp0_perfcount_irq = -1;
1773        } else {
1774                cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
1775                cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
1776                cp0_perfcount_irq = -1;
1777        }
1778
1779#ifdef CONFIG_MIPS_MT_SMTC
1780        }
1781#endif /* CONFIG_MIPS_MT_SMTC */
1782
1783        if (!cpu_data[cpu].asid_cache)
1784                cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
1785
1786        atomic_inc(&init_mm.mm_count);
1787        current->active_mm = &init_mm;
1788        BUG_ON(current->mm);
1789        enter_lazy_tlb(&init_mm, current);
1790
1791#ifdef CONFIG_MIPS_MT_SMTC
1792        if (bootTC) {
1793#endif /* CONFIG_MIPS_MT_SMTC */
1794                /* Boot CPU's cache setup in setup_arch(). */
1795                if (!is_boot_cpu)
1796                        cpu_cache_init();
1797                tlb_init();
1798#ifdef CONFIG_MIPS_MT_SMTC
1799        } else if (!secondaryTC) {
1800                /*
1801                 * First TC in non-boot VPE must do subset of tlb_init()
1802                 * for MMU countrol registers.
1803                 */
1804                write_c0_pagemask(PM_DEFAULT_MASK);
1805                write_c0_wired(0);
1806        }
1807#endif /* CONFIG_MIPS_MT_SMTC */
1808        TLBMISS_HANDLER_SETUP();
1809}
1810
1811/* Install CPU exception handler */
1812void set_handler(unsigned long offset, void *addr, unsigned long size)
1813{
1814#ifdef CONFIG_CPU_MICROMIPS
1815        memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
1816#else
1817        memcpy((void *)(ebase + offset), addr, size);
1818#endif
1819        local_flush_icache_range(ebase + offset, ebase + offset + size);
1820}
1821
1822static char panic_null_cerr[] =
1823        "Trying to set NULL cache error exception handler";
1824
1825/*
1826 * Install uncached CPU exception handler.
1827 * This is suitable only for the cache error exception which is the only
1828 * exception handler that is being run uncached.
1829 */
1830void set_uncached_handler(unsigned long offset, void *addr,
1831        unsigned long size)
1832{
1833        unsigned long uncached_ebase = CKSEG1ADDR(ebase);
1834
1835        if (!addr)
1836                panic(panic_null_cerr);
1837
1838        memcpy((void *)(uncached_ebase + offset), addr, size);
1839}
1840
1841static int __initdata rdhwr_noopt;
1842static int __init set_rdhwr_noopt(char *str)
1843{
1844        rdhwr_noopt = 1;
1845        return 1;
1846}
1847
1848__setup("rdhwr_noopt", set_rdhwr_noopt);
1849
1850void __init trap_init(void)
1851{
1852        extern char except_vec3_generic;
1853        extern char except_vec4;
1854        extern char except_vec3_r4000;
1855        unsigned long i;
1856
1857        check_wait();
1858
1859#if defined(CONFIG_KGDB)
1860        if (kgdb_early_setup)
1861                return; /* Already done */
1862#endif
1863
1864        if (cpu_has_veic || cpu_has_vint) {
1865                unsigned long size = 0x200 + VECTORSPACING*64;
1866                ebase = (unsigned long)
1867                        __alloc_bootmem(size, 1 << fls(size), 0);
1868        } else {
1869#ifdef CONFIG_KVM_GUEST
1870#define KVM_GUEST_KSEG0     0x40000000
1871        ebase = KVM_GUEST_KSEG0;
1872#else
1873        ebase = CKSEG0;
1874#endif
1875                if (cpu_has_mips_r2)
1876                        ebase += (read_c0_ebase() & 0x3ffff000);
1877        }
1878
1879        if (cpu_has_mmips) {
1880                unsigned int config3 = read_c0_config3();
1881
1882                if (IS_ENABLED(CONFIG_CPU_MICROMIPS))
1883                        write_c0_config3(config3 | MIPS_CONF3_ISA_OE);
1884                else
1885                        write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE);
1886        }
1887
1888        if (board_ebase_setup)
1889                board_ebase_setup();
1890        per_cpu_trap_init(true);
1891
1892        /*
1893         * Copy the generic exception handlers to their final destination.
1894         * This will be overriden later as suitable for a particular
1895         * configuration.
1896         */
1897        set_handler(0x180, &except_vec3_generic, 0x80);
1898
1899        /*
1900         * Setup default vectors
1901         */
1902        for (i = 0; i <= 31; i++)
1903                set_except_vector(i, handle_reserved);
1904
1905        /*
1906         * Copy the EJTAG debug exception vector handler code to it's final
1907         * destination.
1908         */
1909        if (cpu_has_ejtag && board_ejtag_handler_setup)
1910                board_ejtag_handler_setup();
1911
1912        /*
1913         * Only some CPUs have the watch exceptions.
1914         */
1915        if (cpu_has_watch)
1916                set_except_vector(23, handle_watch);
1917
1918        /*
1919         * Initialise interrupt handlers
1920         */
1921        if (cpu_has_veic || cpu_has_vint) {
1922                int nvec = cpu_has_veic ? 64 : 8;
1923                for (i = 0; i < nvec; i++)
1924                        set_vi_handler(i, NULL);
1925        }
1926        else if (cpu_has_divec)
1927                set_handler(0x200, &except_vec4, 0x8);
1928
1929        /*
1930         * Some CPUs can enable/disable for cache parity detection, but does
1931         * it different ways.
1932         */
1933        parity_protection_init();
1934
1935        /*
1936         * The Data Bus Errors / Instruction Bus Errors are signaled
1937         * by external hardware.  Therefore these two exceptions
1938         * may have board specific handlers.
1939         */
1940        if (board_be_init)
1941                board_be_init();
1942
1943        set_except_vector(0, using_rollback_handler() ? rollback_handle_int
1944                                                      : handle_int);
1945        set_except_vector(1, handle_tlbm);
1946        set_except_vector(2, handle_tlbl);
1947        set_except_vector(3, handle_tlbs);
1948
1949        set_except_vector(4, handle_adel);
1950        set_except_vector(5, handle_ades);
1951
1952        set_except_vector(6, handle_ibe);
1953        set_except_vector(7, handle_dbe);
1954
1955        set_except_vector(8, handle_sys);
1956        set_except_vector(9, handle_bp);
1957        set_except_vector(10, rdhwr_noopt ? handle_ri :
1958                          (cpu_has_vtag_icache ?
1959                           handle_ri_rdhwr_vivt : handle_ri_rdhwr));
1960        set_except_vector(11, handle_cpu);
1961        set_except_vector(12, handle_ov);
1962        set_except_vector(13, handle_tr);
1963
1964        if (current_cpu_type() == CPU_R6000 ||
1965            current_cpu_type() == CPU_R6000A) {
1966                /*
1967                 * The R6000 is the only R-series CPU that features a machine
1968                 * check exception (similar to the R4000 cache error) and
1969                 * unaligned ldc1/sdc1 exception.  The handlers have not been
1970                 * written yet.  Well, anyway there is no R6000 machine on the
1971                 * current list of targets for Linux/MIPS.
1972                 * (Duh, crap, there is someone with a triple R6k machine)
1973                 */
1974                //set_except_vector(14, handle_mc);
1975                //set_except_vector(15, handle_ndc);
1976        }
1977
1978
1979        if (board_nmi_handler_setup)
1980                board_nmi_handler_setup();
1981
1982        if (cpu_has_fpu && !cpu_has_nofpuex)
1983                set_except_vector(15, handle_fpe);
1984
1985        set_except_vector(22, handle_mdmx);
1986
1987        if (cpu_has_mcheck)
1988                set_except_vector(24, handle_mcheck);
1989
1990        if (cpu_has_mipsmt)
1991                set_except_vector(25, handle_mt);
1992
1993        set_except_vector(26, handle_dsp);
1994
1995        if (board_cache_error_setup)
1996                board_cache_error_setup();
1997
1998        if (cpu_has_vce)
1999                /* Special exception: R4[04]00 uses also the divec space. */
2000                set_handler(0x180, &except_vec3_r4000, 0x100);
2001        else if (cpu_has_4kex)
2002                set_handler(0x180, &except_vec3_generic, 0x80);
2003        else
2004                set_handler(0x080, &except_vec3_generic, 0x80);
2005
2006        local_flush_icache_range(ebase, ebase + 0x400);
2007
2008        sort_extable(__start___dbe_table, __stop___dbe_table);
2009
2010        cu2_notifier(default_cu2_call, 0x80000000);     /* Run last  */
2011}
2012