linux/arch/mips/kernel/traps.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
   7 * Copyright (C) 1995, 1996 Paul M. Antoine
   8 * Copyright (C) 1998 Ulf Carlsson
   9 * Copyright (C) 1999 Silicon Graphics, Inc.
  10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
  11 * Copyright (C) 2002, 2003, 2004, 2005, 2007  Maciej W. Rozycki
  12 * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc.  All rights reserved.
  13 * Copyright (C) 2014, Imagination Technologies Ltd.
  14 */
  15#include <linux/bitops.h>
  16#include <linux/bug.h>
  17#include <linux/compiler.h>
  18#include <linux/context_tracking.h>
  19#include <linux/cpu_pm.h>
  20#include <linux/kexec.h>
  21#include <linux/init.h>
  22#include <linux/kernel.h>
  23#include <linux/module.h>
  24#include <linux/mm.h>
  25#include <linux/sched.h>
  26#include <linux/smp.h>
  27#include <linux/spinlock.h>
  28#include <linux/kallsyms.h>
  29#include <linux/bootmem.h>
  30#include <linux/interrupt.h>
  31#include <linux/ptrace.h>
  32#include <linux/kgdb.h>
  33#include <linux/kdebug.h>
  34#include <linux/kprobes.h>
  35#include <linux/notifier.h>
  36#include <linux/kdb.h>
  37#include <linux/irq.h>
  38#include <linux/perf_event.h>
  39
  40#include <asm/bootinfo.h>
  41#include <asm/branch.h>
  42#include <asm/break.h>
  43#include <asm/cop2.h>
  44#include <asm/cpu.h>
  45#include <asm/cpu-type.h>
  46#include <asm/dsp.h>
  47#include <asm/fpu.h>
  48#include <asm/fpu_emulator.h>
  49#include <asm/idle.h>
  50#include <asm/mips-r2-to-r6-emul.h>
  51#include <asm/mipsregs.h>
  52#include <asm/mipsmtregs.h>
  53#include <asm/module.h>
  54#include <asm/msa.h>
  55#include <asm/pgtable.h>
  56#include <asm/ptrace.h>
  57#include <asm/sections.h>
  58#include <asm/tlbdebug.h>
  59#include <asm/traps.h>
  60#include <asm/uaccess.h>
  61#include <asm/watch.h>
  62#include <asm/mmu_context.h>
  63#include <asm/types.h>
  64#include <asm/stacktrace.h>
  65#include <asm/uasm.h>
  66
  67extern void check_wait(void);
  68extern asmlinkage void rollback_handle_int(void);
  69extern asmlinkage void handle_int(void);
  70extern u32 handle_tlbl[];
  71extern u32 handle_tlbs[];
  72extern u32 handle_tlbm[];
  73extern asmlinkage void handle_adel(void);
  74extern asmlinkage void handle_ades(void);
  75extern asmlinkage void handle_ibe(void);
  76extern asmlinkage void handle_dbe(void);
  77extern asmlinkage void handle_sys(void);
  78extern asmlinkage void handle_bp(void);
  79extern asmlinkage void handle_ri(void);
  80extern asmlinkage void handle_ri_rdhwr_vivt(void);
  81extern asmlinkage void handle_ri_rdhwr(void);
  82extern asmlinkage void handle_cpu(void);
  83extern asmlinkage void handle_ov(void);
  84extern asmlinkage void handle_tr(void);
  85extern asmlinkage void handle_msa_fpe(void);
  86extern asmlinkage void handle_fpe(void);
  87extern asmlinkage void handle_ftlb(void);
  88extern asmlinkage void handle_msa(void);
  89extern asmlinkage void handle_mdmx(void);
  90extern asmlinkage void handle_watch(void);
  91extern asmlinkage void handle_mt(void);
  92extern asmlinkage void handle_dsp(void);
  93extern asmlinkage void handle_mcheck(void);
  94extern asmlinkage void handle_reserved(void);
  95extern void tlb_do_page_fault_0(void);
  96
  97void (*board_be_init)(void);
  98int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
  99void (*board_nmi_handler_setup)(void);
 100void (*board_ejtag_handler_setup)(void);
 101void (*board_bind_eic_interrupt)(int irq, int regset);
 102void (*board_ebase_setup)(void);
 103void(*board_cache_error_setup)(void);
 104
 105static void show_raw_backtrace(unsigned long reg29)
 106{
 107        unsigned long *sp = (unsigned long *)(reg29 & ~3);
 108        unsigned long addr;
 109
 110        printk("Call Trace:");
 111#ifdef CONFIG_KALLSYMS
 112        printk("\n");
 113#endif
 114        while (!kstack_end(sp)) {
 115                unsigned long __user *p =
 116                        (unsigned long __user *)(unsigned long)sp++;
 117                if (__get_user(addr, p)) {
 118                        printk(" (Bad stack address)");
 119                        break;
 120                }
 121                if (__kernel_text_address(addr))
 122                        print_ip_sym(addr);
 123        }
 124        printk("\n");
 125}
 126
 127#ifdef CONFIG_KALLSYMS
 128int raw_show_trace;
 129static int __init set_raw_show_trace(char *str)
 130{
 131        raw_show_trace = 1;
 132        return 1;
 133}
 134__setup("raw_show_trace", set_raw_show_trace);
 135#endif
 136
 137static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
 138{
 139        unsigned long sp = regs->regs[29];
 140        unsigned long ra = regs->regs[31];
 141        unsigned long pc = regs->cp0_epc;
 142
 143        if (!task)
 144                task = current;
 145
 146        if (raw_show_trace || !__kernel_text_address(pc)) {
 147                show_raw_backtrace(sp);
 148                return;
 149        }
 150        printk("Call Trace:\n");
 151        do {
 152                print_ip_sym(pc);
 153                pc = unwind_stack(task, &sp, pc, &ra);
 154        } while (pc);
 155        printk("\n");
 156}
 157
 158/*
 159 * This routine abuses get_user()/put_user() to reference pointers
 160 * with at least a bit of error checking ...
 161 */
 162static void show_stacktrace(struct task_struct *task,
 163        const struct pt_regs *regs)
 164{
 165        const int field = 2 * sizeof(unsigned long);
 166        long stackdata;
 167        int i;
 168        unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
 169
 170        printk("Stack :");
 171        i = 0;
 172        while ((unsigned long) sp & (PAGE_SIZE - 1)) {
 173                if (i && ((i % (64 / field)) == 0))
 174                        printk("\n       ");
 175                if (i > 39) {
 176                        printk(" ...");
 177                        break;
 178                }
 179
 180                if (__get_user(stackdata, sp++)) {
 181                        printk(" (Bad stack address)");
 182                        break;
 183                }
 184
 185                printk(" %0*lx", field, stackdata);
 186                i++;
 187        }
 188        printk("\n");
 189        show_backtrace(task, regs);
 190}
 191
 192void show_stack(struct task_struct *task, unsigned long *sp)
 193{
 194        struct pt_regs regs;
 195        mm_segment_t old_fs = get_fs();
 196        if (sp) {
 197                regs.regs[29] = (unsigned long)sp;
 198                regs.regs[31] = 0;
 199                regs.cp0_epc = 0;
 200        } else {
 201                if (task && task != current) {
 202                        regs.regs[29] = task->thread.reg29;
 203                        regs.regs[31] = 0;
 204                        regs.cp0_epc = task->thread.reg31;
 205#ifdef CONFIG_KGDB_KDB
 206                } else if (atomic_read(&kgdb_active) != -1 &&
 207                           kdb_current_regs) {
 208                        memcpy(&regs, kdb_current_regs, sizeof(regs));
 209#endif /* CONFIG_KGDB_KDB */
 210                } else {
 211                        prepare_frametrace(&regs);
 212                }
 213        }
 214        /*
 215         * show_stack() deals exclusively with kernel mode, so be sure to access
 216         * the stack in the kernel (not user) address space.
 217         */
 218        set_fs(KERNEL_DS);
 219        show_stacktrace(task, &regs);
 220        set_fs(old_fs);
 221}
 222
 223static void show_code(unsigned int __user *pc)
 224{
 225        long i;
 226        unsigned short __user *pc16 = NULL;
 227
 228        printk("\nCode:");
 229
 230        if ((unsigned long)pc & 1)
 231                pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
 232        for(i = -3 ; i < 6 ; i++) {
 233                unsigned int insn;
 234                if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
 235                        printk(" (Bad address in epc)\n");
 236                        break;
 237                }
 238                printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
 239        }
 240}
 241
 242static void __show_regs(const struct pt_regs *regs)
 243{
 244        const int field = 2 * sizeof(unsigned long);
 245        unsigned int cause = regs->cp0_cause;
 246        unsigned int exccode;
 247        int i;
 248
 249        show_regs_print_info(KERN_DEFAULT);
 250
 251        /*
 252         * Saved main processor registers
 253         */
 254        for (i = 0; i < 32; ) {
 255                if ((i % 4) == 0)
 256                        printk("$%2d   :", i);
 257                if (i == 0)
 258                        printk(" %0*lx", field, 0UL);
 259                else if (i == 26 || i == 27)
 260                        printk(" %*s", field, "");
 261                else
 262                        printk(" %0*lx", field, regs->regs[i]);
 263
 264                i++;
 265                if ((i % 4) == 0)
 266                        printk("\n");
 267        }
 268
 269#ifdef CONFIG_CPU_HAS_SMARTMIPS
 270        printk("Acx    : %0*lx\n", field, regs->acx);
 271#endif
 272        printk("Hi    : %0*lx\n", field, regs->hi);
 273        printk("Lo    : %0*lx\n", field, regs->lo);
 274
 275        /*
 276         * Saved cp0 registers
 277         */
 278        printk("epc   : %0*lx %pS\n", field, regs->cp0_epc,
 279               (void *) regs->cp0_epc);
 280        printk("ra    : %0*lx %pS\n", field, regs->regs[31],
 281               (void *) regs->regs[31]);
 282
 283        printk("Status: %08x    ", (uint32_t) regs->cp0_status);
 284
 285        if (cpu_has_3kex) {
 286                if (regs->cp0_status & ST0_KUO)
 287                        printk("KUo ");
 288                if (regs->cp0_status & ST0_IEO)
 289                        printk("IEo ");
 290                if (regs->cp0_status & ST0_KUP)
 291                        printk("KUp ");
 292                if (regs->cp0_status & ST0_IEP)
 293                        printk("IEp ");
 294                if (regs->cp0_status & ST0_KUC)
 295                        printk("KUc ");
 296                if (regs->cp0_status & ST0_IEC)
 297                        printk("IEc ");
 298        } else if (cpu_has_4kex) {
 299                if (regs->cp0_status & ST0_KX)
 300                        printk("KX ");
 301                if (regs->cp0_status & ST0_SX)
 302                        printk("SX ");
 303                if (regs->cp0_status & ST0_UX)
 304                        printk("UX ");
 305                switch (regs->cp0_status & ST0_KSU) {
 306                case KSU_USER:
 307                        printk("USER ");
 308                        break;
 309                case KSU_SUPERVISOR:
 310                        printk("SUPERVISOR ");
 311                        break;
 312                case KSU_KERNEL:
 313                        printk("KERNEL ");
 314                        break;
 315                default:
 316                        printk("BAD_MODE ");
 317                        break;
 318                }
 319                if (regs->cp0_status & ST0_ERL)
 320                        printk("ERL ");
 321                if (regs->cp0_status & ST0_EXL)
 322                        printk("EXL ");
 323                if (regs->cp0_status & ST0_IE)
 324                        printk("IE ");
 325        }
 326        printk("\n");
 327
 328        exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
 329        printk("Cause : %08x (ExcCode %02x)\n", cause, exccode);
 330
 331        if (1 <= exccode && exccode <= 5)
 332                printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
 333
 334        printk("PrId  : %08x (%s)\n", read_c0_prid(),
 335               cpu_name_string());
 336}
 337
 338/*
 339 * FIXME: really the generic show_regs should take a const pointer argument.
 340 */
 341void show_regs(struct pt_regs *regs)
 342{
 343        __show_regs((struct pt_regs *)regs);
 344}
 345
 346void show_registers(struct pt_regs *regs)
 347{
 348        const int field = 2 * sizeof(unsigned long);
 349        mm_segment_t old_fs = get_fs();
 350
 351        __show_regs(regs);
 352        print_modules();
 353        printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
 354               current->comm, current->pid, current_thread_info(), current,
 355              field, current_thread_info()->tp_value);
 356        if (cpu_has_userlocal) {
 357                unsigned long tls;
 358
 359                tls = read_c0_userlocal();
 360                if (tls != current_thread_info()->tp_value)
 361                        printk("*HwTLS: %0*lx\n", field, tls);
 362        }
 363
 364        if (!user_mode(regs))
 365                /* Necessary for getting the correct stack content */
 366                set_fs(KERNEL_DS);
 367        show_stacktrace(current, regs);
 368        show_code((unsigned int __user *) regs->cp0_epc);
 369        printk("\n");
 370        set_fs(old_fs);
 371}
 372
 373static DEFINE_RAW_SPINLOCK(die_lock);
 374
 375void __noreturn die(const char *str, struct pt_regs *regs)
 376{
 377        static int die_counter;
 378        int sig = SIGSEGV;
 379
 380        oops_enter();
 381
 382        if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
 383                       SIGSEGV) == NOTIFY_STOP)
 384                sig = 0;
 385
 386        console_verbose();
 387        raw_spin_lock_irq(&die_lock);
 388        bust_spinlocks(1);
 389
 390        printk("%s[#%d]:\n", str, ++die_counter);
 391        show_registers(regs);
 392        add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
 393        raw_spin_unlock_irq(&die_lock);
 394
 395        oops_exit();
 396
 397        if (in_interrupt())
 398                panic("Fatal exception in interrupt");
 399
 400        if (panic_on_oops) {
 401                printk(KERN_EMERG "Fatal exception: panic in 5 seconds");
 402                ssleep(5);
 403                panic("Fatal exception");
 404        }
 405
 406        if (regs && kexec_should_crash(current))
 407                crash_kexec(regs);
 408
 409        do_exit(sig);
 410}
 411
 412extern struct exception_table_entry __start___dbe_table[];
 413extern struct exception_table_entry __stop___dbe_table[];
 414
 415__asm__(
 416"       .section        __dbe_table, \"a\"\n"
 417"       .previous                       \n");
 418
 419/* Given an address, look for it in the exception tables. */
 420static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
 421{
 422        const struct exception_table_entry *e;
 423
 424        e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr);
 425        if (!e)
 426                e = search_module_dbetables(addr);
 427        return e;
 428}
 429
 430asmlinkage void do_be(struct pt_regs *regs)
 431{
 432        const int field = 2 * sizeof(unsigned long);
 433        const struct exception_table_entry *fixup = NULL;
 434        int data = regs->cp0_cause & 4;
 435        int action = MIPS_BE_FATAL;
 436        enum ctx_state prev_state;
 437
 438        prev_state = exception_enter();
 439        /* XXX For now.  Fixme, this searches the wrong table ...  */
 440        if (data && !user_mode(regs))
 441                fixup = search_dbe_tables(exception_epc(regs));
 442
 443        if (fixup)
 444                action = MIPS_BE_FIXUP;
 445
 446        if (board_be_handler)
 447                action = board_be_handler(regs, fixup != NULL);
 448
 449        switch (action) {
 450        case MIPS_BE_DISCARD:
 451                goto out;
 452        case MIPS_BE_FIXUP:
 453                if (fixup) {
 454                        regs->cp0_epc = fixup->nextinsn;
 455                        goto out;
 456                }
 457                break;
 458        default:
 459                break;
 460        }
 461
 462        /*
 463         * Assume it would be too dangerous to continue ...
 464         */
 465        printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
 466               data ? "Data" : "Instruction",
 467               field, regs->cp0_epc, field, regs->regs[31]);
 468        if (notify_die(DIE_OOPS, "bus error", regs, 0, current->thread.trap_nr,
 469                       SIGBUS) == NOTIFY_STOP)
 470                goto out;
 471
 472        die_if_kernel("Oops", regs);
 473        force_sig(SIGBUS, current);
 474
 475out:
 476        exception_exit(prev_state);
 477}
 478
 479/*
 480 * ll/sc, rdhwr, sync emulation
 481 */
 482
 483#define OPCODE 0xfc000000
 484#define BASE   0x03e00000
 485#define RT     0x001f0000
 486#define OFFSET 0x0000ffff
 487#define LL     0xc0000000
 488#define SC     0xe0000000
 489#define SPEC0  0x00000000
 490#define SPEC3  0x7c000000
 491#define RD     0x0000f800
 492#define FUNC   0x0000003f
 493#define SYNC   0x0000000f
 494#define RDHWR  0x0000003b
 495
 496/*  microMIPS definitions   */
 497#define MM_POOL32A_FUNC 0xfc00ffff
 498#define MM_RDHWR        0x00006b3c
 499#define MM_RS           0x001f0000
 500#define MM_RT           0x03e00000
 501
 502/*
 503 * The ll_bit is cleared by r*_switch.S
 504 */
 505
 506unsigned int ll_bit;
 507struct task_struct *ll_task;
 508
 509static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
 510{
 511        unsigned long value, __user *vaddr;
 512        long offset;
 513
 514        /*
 515         * analyse the ll instruction that just caused a ri exception
 516         * and put the referenced address to addr.
 517         */
 518
 519        /* sign extend offset */
 520        offset = opcode & OFFSET;
 521        offset <<= 16;
 522        offset >>= 16;
 523
 524        vaddr = (unsigned long __user *)
 525                ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
 526
 527        if ((unsigned long)vaddr & 3)
 528                return SIGBUS;
 529        if (get_user(value, vaddr))
 530                return SIGSEGV;
 531
 532        preempt_disable();
 533
 534        if (ll_task == NULL || ll_task == current) {
 535                ll_bit = 1;
 536        } else {
 537                ll_bit = 0;
 538        }
 539        ll_task = current;
 540
 541        preempt_enable();
 542
 543        regs->regs[(opcode & RT) >> 16] = value;
 544
 545        return 0;
 546}
 547
 548static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
 549{
 550        unsigned long __user *vaddr;
 551        unsigned long reg;
 552        long offset;
 553
 554        /*
 555         * analyse the sc instruction that just caused a ri exception
 556         * and put the referenced address to addr.
 557         */
 558
 559        /* sign extend offset */
 560        offset = opcode & OFFSET;
 561        offset <<= 16;
 562        offset >>= 16;
 563
 564        vaddr = (unsigned long __user *)
 565                ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
 566        reg = (opcode & RT) >> 16;
 567
 568        if ((unsigned long)vaddr & 3)
 569                return SIGBUS;
 570
 571        preempt_disable();
 572
 573        if (ll_bit == 0 || ll_task != current) {
 574                regs->regs[reg] = 0;
 575                preempt_enable();
 576                return 0;
 577        }
 578
 579        preempt_enable();
 580
 581        if (put_user(regs->regs[reg], vaddr))
 582                return SIGSEGV;
 583
 584        regs->regs[reg] = 1;
 585
 586        return 0;
 587}
 588
 589/*
 590 * ll uses the opcode of lwc0 and sc uses the opcode of swc0.  That is both
 591 * opcodes are supposed to result in coprocessor unusable exceptions if
 592 * executed on ll/sc-less processors.  That's the theory.  In practice a
 593 * few processors such as NEC's VR4100 throw reserved instruction exceptions
 594 * instead, so we're doing the emulation thing in both exception handlers.
 595 */
 596static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
 597{
 598        if ((opcode & OPCODE) == LL) {
 599                perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
 600                                1, regs, 0);
 601                return simulate_ll(regs, opcode);
 602        }
 603        if ((opcode & OPCODE) == SC) {
 604                perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
 605                                1, regs, 0);
 606                return simulate_sc(regs, opcode);
 607        }
 608
 609        return -1;                      /* Must be something else ... */
 610}
 611
 612/*
 613 * Simulate trapping 'rdhwr' instructions to provide user accessible
 614 * registers not implemented in hardware.
 615 */
 616static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
 617{
 618        struct thread_info *ti = task_thread_info(current);
 619
 620        perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
 621                        1, regs, 0);
 622        switch (rd) {
 623        case 0:         /* CPU number */
 624                regs->regs[rt] = smp_processor_id();
 625                return 0;
 626        case 1:         /* SYNCI length */
 627                regs->regs[rt] = min(current_cpu_data.dcache.linesz,
 628                                     current_cpu_data.icache.linesz);
 629                return 0;
 630        case 2:         /* Read count register */
 631                regs->regs[rt] = read_c0_count();
 632                return 0;
 633        case 3:         /* Count register resolution */
 634                switch (current_cpu_type()) {
 635                case CPU_20KC:
 636                case CPU_25KF:
 637                        regs->regs[rt] = 1;
 638                        break;
 639                default:
 640                        regs->regs[rt] = 2;
 641                }
 642                return 0;
 643        case 29:
 644                regs->regs[rt] = ti->tp_value;
 645                return 0;
 646        default:
 647                return -1;
 648        }
 649}
 650
 651static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
 652{
 653        if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
 654                int rd = (opcode & RD) >> 11;
 655                int rt = (opcode & RT) >> 16;
 656
 657                simulate_rdhwr(regs, rd, rt);
 658                return 0;
 659        }
 660
 661        /* Not ours.  */
 662        return -1;
 663}
 664
 665static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned short opcode)
 666{
 667        if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
 668                int rd = (opcode & MM_RS) >> 16;
 669                int rt = (opcode & MM_RT) >> 21;
 670                simulate_rdhwr(regs, rd, rt);
 671                return 0;
 672        }
 673
 674        /* Not ours.  */
 675        return -1;
 676}
 677
 678static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
 679{
 680        if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
 681                perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
 682                                1, regs, 0);
 683                return 0;
 684        }
 685
 686        return -1;                      /* Must be something else ... */
 687}
 688
 689asmlinkage void do_ov(struct pt_regs *regs)
 690{
 691        enum ctx_state prev_state;
 692        siginfo_t info;
 693
 694        prev_state = exception_enter();
 695        die_if_kernel("Integer overflow", regs);
 696
 697        info.si_code = FPE_INTOVF;
 698        info.si_signo = SIGFPE;
 699        info.si_errno = 0;
 700        info.si_addr = (void __user *) regs->cp0_epc;
 701        force_sig_info(SIGFPE, &info, current);
 702        exception_exit(prev_state);
 703}
 704
 705int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
 706{
 707        struct siginfo si = { 0 };
 708
 709        switch (sig) {
 710        case 0:
 711                return 0;
 712
 713        case SIGFPE:
 714                si.si_addr = fault_addr;
 715                si.si_signo = sig;
 716                /*
 717                 * Inexact can happen together with Overflow or Underflow.
 718                 * Respect the mask to deliver the correct exception.
 719                 */
 720                fcr31 &= (fcr31 & FPU_CSR_ALL_E) <<
 721                         (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E));
 722                if (fcr31 & FPU_CSR_INV_X)
 723                        si.si_code = FPE_FLTINV;
 724                else if (fcr31 & FPU_CSR_DIV_X)
 725                        si.si_code = FPE_FLTDIV;
 726                else if (fcr31 & FPU_CSR_OVF_X)
 727                        si.si_code = FPE_FLTOVF;
 728                else if (fcr31 & FPU_CSR_UDF_X)
 729                        si.si_code = FPE_FLTUND;
 730                else if (fcr31 & FPU_CSR_INE_X)
 731                        si.si_code = FPE_FLTRES;
 732                else
 733                        si.si_code = __SI_FAULT;
 734                force_sig_info(sig, &si, current);
 735                return 1;
 736
 737        case SIGBUS:
 738                si.si_addr = fault_addr;
 739                si.si_signo = sig;
 740                si.si_code = BUS_ADRERR;
 741                force_sig_info(sig, &si, current);
 742                return 1;
 743
 744        case SIGSEGV:
 745                si.si_addr = fault_addr;
 746                si.si_signo = sig;
 747                down_read(&current->mm->mmap_sem);
 748                if (find_vma(current->mm, (unsigned long)fault_addr))
 749                        si.si_code = SEGV_ACCERR;
 750                else
 751                        si.si_code = SEGV_MAPERR;
 752                up_read(&current->mm->mmap_sem);
 753                force_sig_info(sig, &si, current);
 754                return 1;
 755
 756        default:
 757                force_sig(sig, current);
 758                return 1;
 759        }
 760}
 761
 762static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
 763                       unsigned long old_epc, unsigned long old_ra)
 764{
 765        union mips_instruction inst = { .word = opcode };
 766        void __user *fault_addr;
 767        unsigned long fcr31;
 768        int sig;
 769
 770        /* If it's obviously not an FP instruction, skip it */
 771        switch (inst.i_format.opcode) {
 772        case cop1_op:
 773        case cop1x_op:
 774        case lwc1_op:
 775        case ldc1_op:
 776        case swc1_op:
 777        case sdc1_op:
 778                break;
 779
 780        default:
 781                return -1;
 782        }
 783
 784        /*
 785         * do_ri skipped over the instruction via compute_return_epc, undo
 786         * that for the FPU emulator.
 787         */
 788        regs->cp0_epc = old_epc;
 789        regs->regs[31] = old_ra;
 790
 791        /* Save the FP context to struct thread_struct */
 792        lose_fpu(1);
 793
 794        /* Run the emulator */
 795        sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
 796                                       &fault_addr);
 797        fcr31 = current->thread.fpu.fcr31;
 798
 799        /*
 800         * We can't allow the emulated instruction to leave any of
 801         * the cause bits set in $fcr31.
 802         */
 803        current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
 804
 805        /* Restore the hardware register state */
 806        own_fpu(1);
 807
 808        /* Send a signal if required.  */
 809        process_fpemu_return(sig, fault_addr, fcr31);
 810
 811        return 0;
 812}
 813
 814/*
 815 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
 816 */
 817asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
 818{
 819        enum ctx_state prev_state;
 820        void __user *fault_addr;
 821        int sig;
 822
 823        prev_state = exception_enter();
 824        if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
 825                       SIGFPE) == NOTIFY_STOP)
 826                goto out;
 827
 828        /* Clear FCSR.Cause before enabling interrupts */
 829        write_32bit_cp1_register(CP1_STATUS, fcr31 & ~FPU_CSR_ALL_X);
 830        local_irq_enable();
 831
 832        die_if_kernel("FP exception in kernel code", regs);
 833
 834        if (fcr31 & FPU_CSR_UNI_X) {
 835                /*
 836                 * Unimplemented operation exception.  If we've got the full
 837                 * software emulator on-board, let's use it...
 838                 *
 839                 * Force FPU to dump state into task/thread context.  We're
 840                 * moving a lot of data here for what is probably a single
 841                 * instruction, but the alternative is to pre-decode the FP
 842                 * register operands before invoking the emulator, which seems
 843                 * a bit extreme for what should be an infrequent event.
 844                 */
 845                /* Ensure 'resume' not overwrite saved fp context again. */
 846                lose_fpu(1);
 847
 848                /* Run the emulator */
 849                sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
 850                                               &fault_addr);
 851                fcr31 = current->thread.fpu.fcr31;
 852
 853                /*
 854                 * We can't allow the emulated instruction to leave any of
 855                 * the cause bits set in $fcr31.
 856                 */
 857                current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
 858
 859                /* Restore the hardware register state */
 860                own_fpu(1);     /* Using the FPU again.  */
 861        } else {
 862                sig = SIGFPE;
 863                fault_addr = (void __user *) regs->cp0_epc;
 864        }
 865
 866        /* Send a signal if required.  */
 867        process_fpemu_return(sig, fault_addr, fcr31);
 868
 869out:
 870        exception_exit(prev_state);
 871}
 872
 873void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
 874        const char *str)
 875{
 876        siginfo_t info;
 877        char b[40];
 878
 879#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
 880        if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr,
 881                         SIGTRAP) == NOTIFY_STOP)
 882                return;
 883#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
 884
 885        if (notify_die(DIE_TRAP, str, regs, code, current->thread.trap_nr,
 886                       SIGTRAP) == NOTIFY_STOP)
 887                return;
 888
 889        /*
 890         * A short test says that IRIX 5.3 sends SIGTRAP for all trap
 891         * insns, even for trap and break codes that indicate arithmetic
 892         * failures.  Weird ...
 893         * But should we continue the brokenness???  --macro
 894         */
 895        switch (code) {
 896        case BRK_OVERFLOW:
 897        case BRK_DIVZERO:
 898                scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
 899                die_if_kernel(b, regs);
 900                if (code == BRK_DIVZERO)
 901                        info.si_code = FPE_INTDIV;
 902                else
 903                        info.si_code = FPE_INTOVF;
 904                info.si_signo = SIGFPE;
 905                info.si_errno = 0;
 906                info.si_addr = (void __user *) regs->cp0_epc;
 907                force_sig_info(SIGFPE, &info, current);
 908                break;
 909        case BRK_BUG:
 910                die_if_kernel("Kernel bug detected", regs);
 911                force_sig(SIGTRAP, current);
 912                break;
 913        case BRK_MEMU:
 914                /*
 915                 * This breakpoint code is used by the FPU emulator to retake
 916                 * control of the CPU after executing the instruction from the
 917                 * delay slot of an emulated branch.
 918                 *
 919                 * Terminate if exception was recognized as a delay slot return
 920                 * otherwise handle as normal.
 921                 */
 922                if (do_dsemulret(regs))
 923                        return;
 924
 925                die_if_kernel("Math emu break/trap", regs);
 926                force_sig(SIGTRAP, current);
 927                break;
 928        default:
 929                scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
 930                die_if_kernel(b, regs);
 931                force_sig(SIGTRAP, current);
 932        }
 933}
 934
 935asmlinkage void do_bp(struct pt_regs *regs)
 936{
 937        unsigned long epc = msk_isa16_mode(exception_epc(regs));
 938        unsigned int opcode, bcode;
 939        enum ctx_state prev_state;
 940        mm_segment_t seg;
 941
 942        seg = get_fs();
 943        if (!user_mode(regs))
 944                set_fs(KERNEL_DS);
 945
 946        prev_state = exception_enter();
 947        current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
 948        if (get_isa16_mode(regs->cp0_epc)) {
 949                u16 instr[2];
 950
 951                if (__get_user(instr[0], (u16 __user *)epc))
 952                        goto out_sigsegv;
 953
 954                if (!cpu_has_mmips) {
 955                        /* MIPS16e mode */
 956                        bcode = (instr[0] >> 5) & 0x3f;
 957                } else if (mm_insn_16bit(instr[0])) {
 958                        /* 16-bit microMIPS BREAK */
 959                        bcode = instr[0] & 0xf;
 960                } else {
 961                        /* 32-bit microMIPS BREAK */
 962                        if (__get_user(instr[1], (u16 __user *)(epc + 2)))
 963                                goto out_sigsegv;
 964                        opcode = (instr[0] << 16) | instr[1];
 965                        bcode = (opcode >> 6) & ((1 << 20) - 1);
 966                }
 967        } else {
 968                if (__get_user(opcode, (unsigned int __user *)epc))
 969                        goto out_sigsegv;
 970                bcode = (opcode >> 6) & ((1 << 20) - 1);
 971        }
 972
 973        /*
 974         * There is the ancient bug in the MIPS assemblers that the break
 975         * code starts left to bit 16 instead to bit 6 in the opcode.
 976         * Gas is bug-compatible, but not always, grrr...
 977         * We handle both cases with a simple heuristics.  --macro
 978         */
 979        if (bcode >= (1 << 10))
 980                bcode = ((bcode & ((1 << 10) - 1)) << 10) | (bcode >> 10);
 981
 982        /*
 983         * notify the kprobe handlers, if instruction is likely to
 984         * pertain to them.
 985         */
 986        switch (bcode) {
 987        case BRK_UPROBE:
 988                if (notify_die(DIE_UPROBE, "uprobe", regs, bcode,
 989                               current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
 990                        goto out;
 991                else
 992                        break;
 993        case BRK_UPROBE_XOL:
 994                if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, bcode,
 995                               current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
 996                        goto out;
 997                else
 998                        break;
 999        case BRK_KPROBE_BP:
1000                if (notify_die(DIE_BREAK, "debug", regs, bcode,
1001                               current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1002                        goto out;
1003                else
1004                        break;
1005        case BRK_KPROBE_SSTEPBP:
1006                if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
1007                               current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1008                        goto out;
1009                else
1010                        break;
1011        default:
1012                break;
1013        }
1014
1015        do_trap_or_bp(regs, bcode, "Break");
1016
1017out:
1018        set_fs(seg);
1019        exception_exit(prev_state);
1020        return;
1021
1022out_sigsegv:
1023        force_sig(SIGSEGV, current);
1024        goto out;
1025}
1026
1027asmlinkage void do_tr(struct pt_regs *regs)
1028{
1029        u32 opcode, tcode = 0;
1030        enum ctx_state prev_state;
1031        u16 instr[2];
1032        mm_segment_t seg;
1033        unsigned long epc = msk_isa16_mode(exception_epc(regs));
1034
1035        seg = get_fs();
1036        if (!user_mode(regs))
1037                set_fs(get_ds());
1038
1039        prev_state = exception_enter();
1040        current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1041        if (get_isa16_mode(regs->cp0_epc)) {
1042                if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
1043                    __get_user(instr[1], (u16 __user *)(epc + 2)))
1044                        goto out_sigsegv;
1045                opcode = (instr[0] << 16) | instr[1];
1046                /* Immediate versions don't provide a code.  */
1047                if (!(opcode & OPCODE))
1048                        tcode = (opcode >> 12) & ((1 << 4) - 1);
1049        } else {
1050                if (__get_user(opcode, (u32 __user *)epc))
1051                        goto out_sigsegv;
1052                /* Immediate versions don't provide a code.  */
1053                if (!(opcode & OPCODE))
1054                        tcode = (opcode >> 6) & ((1 << 10) - 1);
1055        }
1056
1057        do_trap_or_bp(regs, tcode, "Trap");
1058
1059out:
1060        set_fs(seg);
1061        exception_exit(prev_state);
1062        return;
1063
1064out_sigsegv:
1065        force_sig(SIGSEGV, current);
1066        goto out;
1067}
1068
1069asmlinkage void do_ri(struct pt_regs *regs)
1070{
1071        unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
1072        unsigned long old_epc = regs->cp0_epc;
1073        unsigned long old31 = regs->regs[31];
1074        enum ctx_state prev_state;
1075        unsigned int opcode = 0;
1076        int status = -1;
1077
1078        /*
1079         * Avoid any kernel code. Just emulate the R2 instruction
1080         * as quickly as possible.
1081         */
1082        if (mipsr2_emulation && cpu_has_mips_r6 &&
1083            likely(user_mode(regs)) &&
1084            likely(get_user(opcode, epc) >= 0)) {
1085                unsigned long fcr31 = 0;
1086
1087                status = mipsr2_decoder(regs, opcode, &fcr31);
1088                switch (status) {
1089                case 0:
1090                case SIGEMT:
1091                        task_thread_info(current)->r2_emul_return = 1;
1092                        return;
1093                case SIGILL:
1094                        goto no_r2_instr;
1095                default:
1096                        process_fpemu_return(status,
1097                                             &current->thread.cp0_baduaddr,
1098                                             fcr31);
1099                        task_thread_info(current)->r2_emul_return = 1;
1100                        return;
1101                }
1102        }
1103
1104no_r2_instr:
1105
1106        prev_state = exception_enter();
1107        current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1108
1109        if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
1110                       SIGILL) == NOTIFY_STOP)
1111                goto out;
1112
1113        die_if_kernel("Reserved instruction in kernel code", regs);
1114
1115        if (unlikely(compute_return_epc(regs) < 0))
1116                goto out;
1117
1118        if (get_isa16_mode(regs->cp0_epc)) {
1119                unsigned short mmop[2] = { 0 };
1120
1121                if (unlikely(get_user(mmop[0], epc) < 0))
1122                        status = SIGSEGV;
1123                if (unlikely(get_user(mmop[1], epc) < 0))
1124                        status = SIGSEGV;
1125                opcode = (mmop[0] << 16) | mmop[1];
1126
1127                if (status < 0)
1128                        status = simulate_rdhwr_mm(regs, opcode);
1129        } else {
1130                if (unlikely(get_user(opcode, epc) < 0))
1131                        status = SIGSEGV;
1132
1133                if (!cpu_has_llsc && status < 0)
1134                        status = simulate_llsc(regs, opcode);
1135
1136                if (status < 0)
1137                        status = simulate_rdhwr_normal(regs, opcode);
1138
1139                if (status < 0)
1140                        status = simulate_sync(regs, opcode);
1141
1142                if (status < 0)
1143                        status = simulate_fp(regs, opcode, old_epc, old31);
1144        }
1145
1146        if (status < 0)
1147                status = SIGILL;
1148
1149        if (unlikely(status > 0)) {
1150                regs->cp0_epc = old_epc;                /* Undo skip-over.  */
1151                regs->regs[31] = old31;
1152                force_sig(status, current);
1153        }
1154
1155out:
1156        exception_exit(prev_state);
1157}
1158
1159/*
1160 * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
1161 * emulated more than some threshold number of instructions, force migration to
1162 * a "CPU" that has FP support.
1163 */
1164static void mt_ase_fp_affinity(void)
1165{
1166#ifdef CONFIG_MIPS_MT_FPAFF
1167        if (mt_fpemul_threshold > 0 &&
1168             ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
1169                /*
1170                 * If there's no FPU present, or if the application has already
1171                 * restricted the allowed set to exclude any CPUs with FPUs,
1172                 * we'll skip the procedure.
1173                 */
1174                if (cpumask_intersects(&current->cpus_allowed, &mt_fpu_cpumask)) {
1175                        cpumask_t tmask;
1176
1177                        current->thread.user_cpus_allowed
1178                                = current->cpus_allowed;
1179                        cpumask_and(&tmask, &current->cpus_allowed,
1180                                    &mt_fpu_cpumask);
1181                        set_cpus_allowed_ptr(current, &tmask);
1182                        set_thread_flag(TIF_FPUBOUND);
1183                }
1184        }
1185#endif /* CONFIG_MIPS_MT_FPAFF */
1186}
1187
1188/*
1189 * No lock; only written during early bootup by CPU 0.
1190 */
1191static RAW_NOTIFIER_HEAD(cu2_chain);
1192
1193int __ref register_cu2_notifier(struct notifier_block *nb)
1194{
1195        return raw_notifier_chain_register(&cu2_chain, nb);
1196}
1197
1198int cu2_notifier_call_chain(unsigned long val, void *v)
1199{
1200        return raw_notifier_call_chain(&cu2_chain, val, v);
1201}
1202
1203static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
1204        void *data)
1205{
1206        struct pt_regs *regs = data;
1207
1208        die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
1209                              "instruction", regs);
1210        force_sig(SIGILL, current);
1211
1212        return NOTIFY_OK;
1213}
1214
1215static int wait_on_fp_mode_switch(atomic_t *p)
1216{
1217        /*
1218         * The FP mode for this task is currently being switched. That may
1219         * involve modifications to the format of this tasks FP context which
1220         * make it unsafe to proceed with execution for the moment. Instead,
1221         * schedule some other task.
1222         */
1223        schedule();
1224        return 0;
1225}
1226
1227static int enable_restore_fp_context(int msa)
1228{
1229        int err, was_fpu_owner, prior_msa;
1230
1231        /*
1232         * If an FP mode switch is currently underway, wait for it to
1233         * complete before proceeding.
1234         */
1235        wait_on_atomic_t(&current->mm->context.fp_mode_switching,
1236                         wait_on_fp_mode_switch, TASK_KILLABLE);
1237
1238        if (!used_math()) {
1239                /* First time FP context user. */
1240                preempt_disable();
1241                err = init_fpu();
1242                if (msa && !err) {
1243                        enable_msa();
1244                        _init_msa_upper();
1245                        set_thread_flag(TIF_USEDMSA);
1246                        set_thread_flag(TIF_MSA_CTX_LIVE);
1247                }
1248                preempt_enable();
1249                if (!err)
1250                        set_used_math();
1251                return err;
1252        }
1253
1254        /*
1255         * This task has formerly used the FP context.
1256         *
1257         * If this thread has no live MSA vector context then we can simply
1258         * restore the scalar FP context. If it has live MSA vector context
1259         * (that is, it has or may have used MSA since last performing a
1260         * function call) then we'll need to restore the vector context. This
1261         * applies even if we're currently only executing a scalar FP
1262         * instruction. This is because if we were to later execute an MSA
1263         * instruction then we'd either have to:
1264         *
1265         *  - Restore the vector context & clobber any registers modified by
1266         *    scalar FP instructions between now & then.
1267         *
1268         * or
1269         *
1270         *  - Not restore the vector context & lose the most significant bits
1271         *    of all vector registers.
1272         *
1273         * Neither of those options is acceptable. We cannot restore the least
1274         * significant bits of the registers now & only restore the most
1275         * significant bits later because the most significant bits of any
1276         * vector registers whose aliased FP register is modified now will have
1277         * been zeroed. We'd have no way to know that when restoring the vector
1278         * context & thus may load an outdated value for the most significant
1279         * bits of a vector register.
1280         */
1281        if (!msa && !thread_msa_context_live())
1282                return own_fpu(1);
1283
1284        /*
1285         * This task is using or has previously used MSA. Thus we require
1286         * that Status.FR == 1.
1287         */
1288        preempt_disable();
1289        was_fpu_owner = is_fpu_owner();
1290        err = own_fpu_inatomic(0);
1291        if (err)
1292                goto out;
1293
1294        enable_msa();
1295        write_msa_csr(current->thread.fpu.msacsr);
1296        set_thread_flag(TIF_USEDMSA);
1297
1298        /*
1299         * If this is the first time that the task is using MSA and it has
1300         * previously used scalar FP in this time slice then we already nave
1301         * FP context which we shouldn't clobber. We do however need to clear
1302         * the upper 64b of each vector register so that this task has no
1303         * opportunity to see data left behind by another.
1304         */
1305        prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
1306        if (!prior_msa && was_fpu_owner) {
1307                _init_msa_upper();
1308
1309                goto out;
1310        }
1311
1312        if (!prior_msa) {
1313                /*
1314                 * Restore the least significant 64b of each vector register
1315                 * from the existing scalar FP context.
1316                 */
1317                _restore_fp(current);
1318
1319                /*
1320                 * The task has not formerly used MSA, so clear the upper 64b
1321                 * of each vector register such that it cannot see data left
1322                 * behind by another task.
1323                 */
1324                _init_msa_upper();
1325        } else {
1326                /* We need to restore the vector context. */
1327                restore_msa(current);
1328
1329                /* Restore the scalar FP control & status register */
1330                if (!was_fpu_owner)
1331                        write_32bit_cp1_register(CP1_STATUS,
1332                                                 current->thread.fpu.fcr31);
1333        }
1334
1335out:
1336        preempt_enable();
1337
1338        return 0;
1339}
1340
1341asmlinkage void do_cpu(struct pt_regs *regs)
1342{
1343        enum ctx_state prev_state;
1344        unsigned int __user *epc;
1345        unsigned long old_epc, old31;
1346        void __user *fault_addr;
1347        unsigned int opcode;
1348        unsigned long fcr31;
1349        unsigned int cpid;
1350        int status, err;
1351        unsigned long __maybe_unused flags;
1352        int sig;
1353
1354        prev_state = exception_enter();
1355        cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
1356
1357        if (cpid != 2)
1358                die_if_kernel("do_cpu invoked from kernel context!", regs);
1359
1360        switch (cpid) {
1361        case 0:
1362                epc = (unsigned int __user *)exception_epc(regs);
1363                old_epc = regs->cp0_epc;
1364                old31 = regs->regs[31];
1365                opcode = 0;
1366                status = -1;
1367
1368                if (unlikely(compute_return_epc(regs) < 0))
1369                        break;
1370
1371                if (get_isa16_mode(regs->cp0_epc)) {
1372                        unsigned short mmop[2] = { 0 };
1373
1374                        if (unlikely(get_user(mmop[0], epc) < 0))
1375                                status = SIGSEGV;
1376                        if (unlikely(get_user(mmop[1], epc) < 0))
1377                                status = SIGSEGV;
1378                        opcode = (mmop[0] << 16) | mmop[1];
1379
1380                        if (status < 0)
1381                                status = simulate_rdhwr_mm(regs, opcode);
1382                } else {
1383                        if (unlikely(get_user(opcode, epc) < 0))
1384                                status = SIGSEGV;
1385
1386                        if (!cpu_has_llsc && status < 0)
1387                                status = simulate_llsc(regs, opcode);
1388
1389                        if (status < 0)
1390                                status = simulate_rdhwr_normal(regs, opcode);
1391                }
1392
1393                if (status < 0)
1394                        status = SIGILL;
1395
1396                if (unlikely(status > 0)) {
1397                        regs->cp0_epc = old_epc;        /* Undo skip-over.  */
1398                        regs->regs[31] = old31;
1399                        force_sig(status, current);
1400                }
1401
1402                break;
1403
1404        case 3:
1405                /*
1406                 * The COP3 opcode space and consequently the CP0.Status.CU3
1407                 * bit and the CP0.Cause.CE=3 encoding have been removed as
1408                 * of the MIPS III ISA.  From the MIPS IV and MIPS32r2 ISAs
1409                 * up the space has been reused for COP1X instructions, that
1410                 * are enabled by the CP0.Status.CU1 bit and consequently
1411                 * use the CP0.Cause.CE=1 encoding for Coprocessor Unusable
1412                 * exceptions.  Some FPU-less processors that implement one
1413                 * of these ISAs however use this code erroneously for COP1X
1414                 * instructions.  Therefore we redirect this trap to the FP
1415                 * emulator too.
1416                 */
1417                if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) {
1418                        force_sig(SIGILL, current);
1419                        break;
1420                }
1421                /* Fall through.  */
1422
1423        case 1:
1424                err = enable_restore_fp_context(0);
1425
1426                if (raw_cpu_has_fpu && !err)
1427                        break;
1428
1429                sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
1430                                               &fault_addr);
1431                fcr31 = current->thread.fpu.fcr31;
1432
1433                /*
1434                 * We can't allow the emulated instruction to leave
1435                 * any of the cause bits set in $fcr31.
1436                 */
1437                current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
1438
1439                /* Send a signal if required.  */
1440                if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
1441                        mt_ase_fp_affinity();
1442
1443                break;
1444
1445        case 2:
1446                raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
1447                break;
1448        }
1449
1450        exception_exit(prev_state);
1451}
1452
1453asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr)
1454{
1455        enum ctx_state prev_state;
1456
1457        prev_state = exception_enter();
1458        current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1459        if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
1460                       current->thread.trap_nr, SIGFPE) == NOTIFY_STOP)
1461                goto out;
1462
1463        /* Clear MSACSR.Cause before enabling interrupts */
1464        write_msa_csr(msacsr & ~MSA_CSR_CAUSEF);
1465        local_irq_enable();
1466
1467        die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
1468        force_sig(SIGFPE, current);
1469out:
1470        exception_exit(prev_state);
1471}
1472
1473asmlinkage void do_msa(struct pt_regs *regs)
1474{
1475        enum ctx_state prev_state;
1476        int err;
1477
1478        prev_state = exception_enter();
1479
1480        if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
1481                force_sig(SIGILL, current);
1482                goto out;
1483        }
1484
1485        die_if_kernel("do_msa invoked from kernel context!", regs);
1486
1487        err = enable_restore_fp_context(1);
1488        if (err)
1489                force_sig(SIGILL, current);
1490out:
1491        exception_exit(prev_state);
1492}
1493
1494asmlinkage void do_mdmx(struct pt_regs *regs)
1495{
1496        enum ctx_state prev_state;
1497
1498        prev_state = exception_enter();
1499        force_sig(SIGILL, current);
1500        exception_exit(prev_state);
1501}
1502
1503/*
1504 * Called with interrupts disabled.
1505 */
1506asmlinkage void do_watch(struct pt_regs *regs)
1507{
1508        enum ctx_state prev_state;
1509        u32 cause;
1510
1511        prev_state = exception_enter();
1512        /*
1513         * Clear WP (bit 22) bit of cause register so we don't loop
1514         * forever.
1515         */
1516        cause = read_c0_cause();
1517        cause &= ~(1 << 22);
1518        write_c0_cause(cause);
1519
1520        /*
1521         * If the current thread has the watch registers loaded, save
1522         * their values and send SIGTRAP.  Otherwise another thread
1523         * left the registers set, clear them and continue.
1524         */
1525        if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
1526                mips_read_watch_registers();
1527                local_irq_enable();
1528                force_sig(SIGTRAP, current);
1529        } else {
1530                mips_clear_watch_registers();
1531                local_irq_enable();
1532        }
1533        exception_exit(prev_state);
1534}
1535
1536asmlinkage void do_mcheck(struct pt_regs *regs)
1537{
1538        int multi_match = regs->cp0_status & ST0_TS;
1539        enum ctx_state prev_state;
1540        mm_segment_t old_fs = get_fs();
1541
1542        prev_state = exception_enter();
1543        show_regs(regs);
1544
1545        if (multi_match) {
1546                dump_tlb_regs();
1547                pr_info("\n");
1548                dump_tlb_all();
1549        }
1550
1551        if (!user_mode(regs))
1552                set_fs(KERNEL_DS);
1553
1554        show_code((unsigned int __user *) regs->cp0_epc);
1555
1556        set_fs(old_fs);
1557
1558        /*
1559         * Some chips may have other causes of machine check (e.g. SB1
1560         * graduation timer)
1561         */
1562        panic("Caught Machine Check exception - %scaused by multiple "
1563              "matching entries in the TLB.",
1564              (multi_match) ? "" : "not ");
1565}
1566
1567asmlinkage void do_mt(struct pt_regs *regs)
1568{
1569        int subcode;
1570
1571        subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
1572                        >> VPECONTROL_EXCPT_SHIFT;
1573        switch (subcode) {
1574        case 0:
1575                printk(KERN_DEBUG "Thread Underflow\n");
1576                break;
1577        case 1:
1578                printk(KERN_DEBUG "Thread Overflow\n");
1579                break;
1580        case 2:
1581                printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
1582                break;
1583        case 3:
1584                printk(KERN_DEBUG "Gating Storage Exception\n");
1585                break;
1586        case 4:
1587                printk(KERN_DEBUG "YIELD Scheduler Exception\n");
1588                break;
1589        case 5:
1590                printk(KERN_DEBUG "Gating Storage Scheduler Exception\n");
1591                break;
1592        default:
1593                printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
1594                        subcode);
1595                break;
1596        }
1597        die_if_kernel("MIPS MT Thread exception in kernel", regs);
1598
1599        force_sig(SIGILL, current);
1600}
1601
1602
1603asmlinkage void do_dsp(struct pt_regs *regs)
1604{
1605        if (cpu_has_dsp)
1606                panic("Unexpected DSP exception");
1607
1608        force_sig(SIGILL, current);
1609}
1610
1611asmlinkage void do_reserved(struct pt_regs *regs)
1612{
1613        /*
1614         * Game over - no way to handle this if it ever occurs.  Most probably
1615         * caused by a new unknown cpu type or after another deadly
1616         * hard/software error.
1617         */
1618        show_regs(regs);
1619        panic("Caught reserved exception %ld - should not happen.",
1620              (regs->cp0_cause & 0x7f) >> 2);
1621}
1622
1623static int __initdata l1parity = 1;
1624static int __init nol1parity(char *s)
1625{
1626        l1parity = 0;
1627        return 1;
1628}
1629__setup("nol1par", nol1parity);
1630static int __initdata l2parity = 1;
1631static int __init nol2parity(char *s)
1632{
1633        l2parity = 0;
1634        return 1;
1635}
1636__setup("nol2par", nol2parity);
1637
1638/*
1639 * Some MIPS CPUs can enable/disable for cache parity detection, but do
1640 * it different ways.
1641 */
1642static inline void parity_protection_init(void)
1643{
1644        switch (current_cpu_type()) {
1645        case CPU_24K:
1646        case CPU_34K:
1647        case CPU_74K:
1648        case CPU_1004K:
1649        case CPU_1074K:
1650        case CPU_INTERAPTIV:
1651        case CPU_PROAPTIV:
1652        case CPU_P5600:
1653        case CPU_QEMU_GENERIC:
1654        case CPU_I6400:
1655                {
1656#define ERRCTL_PE       0x80000000
1657#define ERRCTL_L2P      0x00800000
1658                        unsigned long errctl;
1659                        unsigned int l1parity_present, l2parity_present;
1660
1661                        errctl = read_c0_ecc();
1662                        errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
1663
1664                        /* probe L1 parity support */
1665                        write_c0_ecc(errctl | ERRCTL_PE);
1666                        back_to_back_c0_hazard();
1667                        l1parity_present = (read_c0_ecc() & ERRCTL_PE);
1668
1669                        /* probe L2 parity support */
1670                        write_c0_ecc(errctl|ERRCTL_L2P);
1671                        back_to_back_c0_hazard();
1672                        l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
1673
1674                        if (l1parity_present && l2parity_present) {
1675                                if (l1parity)
1676                                        errctl |= ERRCTL_PE;
1677                                if (l1parity ^ l2parity)
1678                                        errctl |= ERRCTL_L2P;
1679                        } else if (l1parity_present) {
1680                                if (l1parity)
1681                                        errctl |= ERRCTL_PE;
1682                        } else if (l2parity_present) {
1683                                if (l2parity)
1684                                        errctl |= ERRCTL_L2P;
1685                        } else {
1686                                /* No parity available */
1687                        }
1688
1689                        printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
1690
1691                        write_c0_ecc(errctl);
1692                        back_to_back_c0_hazard();
1693                        errctl = read_c0_ecc();
1694                        printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
1695
1696                        if (l1parity_present)
1697                                printk(KERN_INFO "Cache parity protection %sabled\n",
1698                                       (errctl & ERRCTL_PE) ? "en" : "dis");
1699
1700                        if (l2parity_present) {
1701                                if (l1parity_present && l1parity)
1702                                        errctl ^= ERRCTL_L2P;
1703                                printk(KERN_INFO "L2 cache parity protection %sabled\n",
1704                                       (errctl & ERRCTL_L2P) ? "en" : "dis");
1705                        }
1706                }
1707                break;
1708
1709        case CPU_5KC:
1710        case CPU_5KE:
1711        case CPU_LOONGSON1:
1712                write_c0_ecc(0x80000000);
1713                back_to_back_c0_hazard();
1714                /* Set the PE bit (bit 31) in the c0_errctl register. */
1715                printk(KERN_INFO "Cache parity protection %sabled\n",
1716                       (read_c0_ecc() & 0x80000000) ? "en" : "dis");
1717                break;
1718        case CPU_20KC:
1719        case CPU_25KF:
1720                /* Clear the DE bit (bit 16) in the c0_status register. */
1721                printk(KERN_INFO "Enable cache parity protection for "
1722                       "MIPS 20KC/25KF CPUs.\n");
1723                clear_c0_status(ST0_DE);
1724                break;
1725        default:
1726                break;
1727        }
1728}
1729
1730asmlinkage void cache_parity_error(void)
1731{
1732        const int field = 2 * sizeof(unsigned long);
1733        unsigned int reg_val;
1734
1735        /* For the moment, report the problem and hang. */
1736        printk("Cache error exception:\n");
1737        printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1738        reg_val = read_c0_cacheerr();
1739        printk("c0_cacheerr == %08x\n", reg_val);
1740
1741        printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1742               reg_val & (1<<30) ? "secondary" : "primary",
1743               reg_val & (1<<31) ? "data" : "insn");
1744        if ((cpu_has_mips_r2_r6) &&
1745            ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1746                pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
1747                        reg_val & (1<<29) ? "ED " : "",
1748                        reg_val & (1<<28) ? "ET " : "",
1749                        reg_val & (1<<27) ? "ES " : "",
1750                        reg_val & (1<<26) ? "EE " : "",
1751                        reg_val & (1<<25) ? "EB " : "",
1752                        reg_val & (1<<24) ? "EI " : "",
1753                        reg_val & (1<<23) ? "E1 " : "",
1754                        reg_val & (1<<22) ? "E0 " : "");
1755        } else {
1756                pr_err("Error bits: %s%s%s%s%s%s%s\n",
1757                        reg_val & (1<<29) ? "ED " : "",
1758                        reg_val & (1<<28) ? "ET " : "",
1759                        reg_val & (1<<26) ? "EE " : "",
1760                        reg_val & (1<<25) ? "EB " : "",
1761                        reg_val & (1<<24) ? "EI " : "",
1762                        reg_val & (1<<23) ? "E1 " : "",
1763                        reg_val & (1<<22) ? "E0 " : "");
1764        }
1765        printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
1766
1767#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1768        if (reg_val & (1<<22))
1769                printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
1770
1771        if (reg_val & (1<<23))
1772                printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
1773#endif
1774
1775        panic("Can't handle the cache error!");
1776}
1777
1778asmlinkage void do_ftlb(void)
1779{
1780        const int field = 2 * sizeof(unsigned long);
1781        unsigned int reg_val;
1782
1783        /* For the moment, report the problem and hang. */
1784        if ((cpu_has_mips_r2_r6) &&
1785            ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1786                pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
1787                       read_c0_ecc());
1788                pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1789                reg_val = read_c0_cacheerr();
1790                pr_err("c0_cacheerr == %08x\n", reg_val);
1791
1792                if ((reg_val & 0xc0000000) == 0xc0000000) {
1793                        pr_err("Decoded c0_cacheerr: FTLB parity error\n");
1794                } else {
1795                        pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1796                               reg_val & (1<<30) ? "secondary" : "primary",
1797                               reg_val & (1<<31) ? "data" : "insn");
1798                }
1799        } else {
1800                pr_err("FTLB error exception\n");
1801        }
1802        /* Just print the cacheerr bits for now */
1803        cache_parity_error();
1804}
1805
1806/*
1807 * SDBBP EJTAG debug exception handler.
1808 * We skip the instruction and return to the next instruction.
1809 */
1810void ejtag_exception_handler(struct pt_regs *regs)
1811{
1812        const int field = 2 * sizeof(unsigned long);
1813        unsigned long depc, old_epc, old_ra;
1814        unsigned int debug;
1815
1816        printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
1817        depc = read_c0_depc();
1818        debug = read_c0_debug();
1819        printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
1820        if (debug & 0x80000000) {
1821                /*
1822                 * In branch delay slot.
1823                 * We cheat a little bit here and use EPC to calculate the
1824                 * debug return address (DEPC). EPC is restored after the
1825                 * calculation.
1826                 */
1827                old_epc = regs->cp0_epc;
1828                old_ra = regs->regs[31];
1829                regs->cp0_epc = depc;
1830                compute_return_epc(regs);
1831                depc = regs->cp0_epc;
1832                regs->cp0_epc = old_epc;
1833                regs->regs[31] = old_ra;
1834        } else
1835                depc += 4;
1836        write_c0_depc(depc);
1837
1838#if 0
1839        printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
1840        write_c0_debug(debug | 0x100);
1841#endif
1842}
1843
1844/*
1845 * NMI exception handler.
1846 * No lock; only written during early bootup by CPU 0.
1847 */
1848static RAW_NOTIFIER_HEAD(nmi_chain);
1849
1850int register_nmi_notifier(struct notifier_block *nb)
1851{
1852        return raw_notifier_chain_register(&nmi_chain, nb);
1853}
1854
1855void __noreturn nmi_exception_handler(struct pt_regs *regs)
1856{
1857        char str[100];
1858
1859        raw_notifier_call_chain(&nmi_chain, 0, regs);
1860        bust_spinlocks(1);
1861        snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
1862                 smp_processor_id(), regs->cp0_epc);
1863        regs->cp0_epc = read_c0_errorepc();
1864        die(str, regs);
1865}
1866
1867#define VECTORSPACING 0x100     /* for EI/VI mode */
1868
1869unsigned long ebase;
1870unsigned long exception_handlers[32];
1871unsigned long vi_handlers[64];
1872
1873void __init *set_except_vector(int n, void *addr)
1874{
1875        unsigned long handler = (unsigned long) addr;
1876        unsigned long old_handler;
1877
1878#ifdef CONFIG_CPU_MICROMIPS
1879        /*
1880         * Only the TLB handlers are cache aligned with an even
1881         * address. All other handlers are on an odd address and
1882         * require no modification. Otherwise, MIPS32 mode will
1883         * be entered when handling any TLB exceptions. That
1884         * would be bad...since we must stay in microMIPS mode.
1885         */
1886        if (!(handler & 0x1))
1887                handler |= 1;
1888#endif
1889        old_handler = xchg(&exception_handlers[n], handler);
1890
1891        if (n == 0 && cpu_has_divec) {
1892#ifdef CONFIG_CPU_MICROMIPS
1893                unsigned long jump_mask = ~((1 << 27) - 1);
1894#else
1895                unsigned long jump_mask = ~((1 << 28) - 1);
1896#endif
1897                u32 *buf = (u32 *)(ebase + 0x200);
1898                unsigned int k0 = 26;
1899                if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
1900                        uasm_i_j(&buf, handler & ~jump_mask);
1901                        uasm_i_nop(&buf);
1902                } else {
1903                        UASM_i_LA(&buf, k0, handler);
1904                        uasm_i_jr(&buf, k0);
1905                        uasm_i_nop(&buf);
1906                }
1907                local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
1908        }
1909        return (void *)old_handler;
1910}
1911
1912static void do_default_vi(void)
1913{
1914        show_regs(get_irq_regs());
1915        panic("Caught unexpected vectored interrupt.");
1916}
1917
1918static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1919{
1920        unsigned long handler;
1921        unsigned long old_handler = vi_handlers[n];
1922        int srssets = current_cpu_data.srsets;
1923        u16 *h;
1924        unsigned char *b;
1925
1926        BUG_ON(!cpu_has_veic && !cpu_has_vint);
1927
1928        if (addr == NULL) {
1929                handler = (unsigned long) do_default_vi;
1930                srs = 0;
1931        } else
1932                handler = (unsigned long) addr;
1933        vi_handlers[n] = handler;
1934
1935        b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
1936
1937        if (srs >= srssets)
1938                panic("Shadow register set %d not supported", srs);
1939
1940        if (cpu_has_veic) {
1941                if (board_bind_eic_interrupt)
1942                        board_bind_eic_interrupt(n, srs);
1943        } else if (cpu_has_vint) {
1944                /* SRSMap is only defined if shadow sets are implemented */
1945                if (srssets > 1)
1946                        change_c0_srsmap(0xf << n*4, srs << n*4);
1947        }
1948
1949        if (srs == 0) {
1950                /*
1951                 * If no shadow set is selected then use the default handler
1952                 * that does normal register saving and standard interrupt exit
1953                 */
1954                extern char except_vec_vi, except_vec_vi_lui;
1955                extern char except_vec_vi_ori, except_vec_vi_end;
1956                extern char rollback_except_vec_vi;
1957                char *vec_start = using_rollback_handler() ?
1958                        &rollback_except_vec_vi : &except_vec_vi;
1959#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
1960                const int lui_offset = &except_vec_vi_lui - vec_start + 2;
1961                const int ori_offset = &except_vec_vi_ori - vec_start + 2;
1962#else
1963                const int lui_offset = &except_vec_vi_lui - vec_start;
1964                const int ori_offset = &except_vec_vi_ori - vec_start;
1965#endif
1966                const int handler_len = &except_vec_vi_end - vec_start;
1967
1968                if (handler_len > VECTORSPACING) {
1969                        /*
1970                         * Sigh... panicing won't help as the console
1971                         * is probably not configured :(
1972                         */
1973                        panic("VECTORSPACING too small");
1974                }
1975
1976                set_handler(((unsigned long)b - ebase), vec_start,
1977#ifdef CONFIG_CPU_MICROMIPS
1978                                (handler_len - 1));
1979#else
1980                                handler_len);
1981#endif
1982                h = (u16 *)(b + lui_offset);
1983                *h = (handler >> 16) & 0xffff;
1984                h = (u16 *)(b + ori_offset);
1985                *h = (handler & 0xffff);
1986                local_flush_icache_range((unsigned long)b,
1987                                         (unsigned long)(b+handler_len));
1988        }
1989        else {
1990                /*
1991                 * In other cases jump directly to the interrupt handler. It
1992                 * is the handler's responsibility to save registers if required
1993                 * (eg hi/lo) and return from the exception using "eret".
1994                 */
1995                u32 insn;
1996
1997                h = (u16 *)b;
1998                /* j handler */
1999#ifdef CONFIG_CPU_MICROMIPS
2000                insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
2001#else
2002                insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
2003#endif
2004                h[0] = (insn >> 16) & 0xffff;
2005                h[1] = insn & 0xffff;
2006                h[2] = 0;
2007                h[3] = 0;
2008                local_flush_icache_range((unsigned long)b,
2009                                         (unsigned long)(b+8));
2010        }
2011
2012        return (void *)old_handler;
2013}
2014
2015void *set_vi_handler(int n, vi_handler_t addr)
2016{
2017        return set_vi_srs_handler(n, addr, 0);
2018}
2019
2020extern void tlb_init(void);
2021
2022/*
2023 * Timer interrupt
2024 */
2025int cp0_compare_irq;
2026EXPORT_SYMBOL_GPL(cp0_compare_irq);
2027int cp0_compare_irq_shift;
2028
2029/*
2030 * Performance counter IRQ or -1 if shared with timer
2031 */
2032int cp0_perfcount_irq;
2033EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
2034
2035/*
2036 * Fast debug channel IRQ or -1 if not present
2037 */
2038int cp0_fdc_irq;
2039EXPORT_SYMBOL_GPL(cp0_fdc_irq);
2040
2041static int noulri;
2042
2043static int __init ulri_disable(char *s)
2044{
2045        pr_info("Disabling ulri\n");
2046        noulri = 1;
2047
2048        return 1;
2049}
2050__setup("noulri", ulri_disable);
2051
2052/* configure STATUS register */
2053static void configure_status(void)
2054{
2055        /*
2056         * Disable coprocessors and select 32-bit or 64-bit addressing
2057         * and the 16/32 or 32/32 FPR register model.  Reset the BEV
2058         * flag that some firmware may have left set and the TS bit (for
2059         * IP27).  Set XX for ISA IV code to work.
2060         */
2061        unsigned int status_set = ST0_CU0;
2062#ifdef CONFIG_64BIT
2063        status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
2064#endif
2065        if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
2066                status_set |= ST0_XX;
2067        if (cpu_has_dsp)
2068                status_set |= ST0_MX;
2069
2070        change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
2071                         status_set);
2072}
2073
2074/* configure HWRENA register */
2075static void configure_hwrena(void)
2076{
2077        unsigned int hwrena = cpu_hwrena_impl_bits;
2078
2079        if (cpu_has_mips_r2_r6)
2080                hwrena |= 0x0000000f;
2081
2082        if (!noulri && cpu_has_userlocal)
2083                hwrena |= (1 << 29);
2084
2085        if (hwrena)
2086                write_c0_hwrena(hwrena);
2087}
2088
2089static void configure_exception_vector(void)
2090{
2091        if (cpu_has_veic || cpu_has_vint) {
2092                unsigned long sr = set_c0_status(ST0_BEV);
2093                write_c0_ebase(ebase);
2094                write_c0_status(sr);
2095                /* Setting vector spacing enables EI/VI mode  */
2096                change_c0_intctl(0x3e0, VECTORSPACING);
2097        }
2098        if (cpu_has_divec) {
2099                if (cpu_has_mipsmt) {
2100                        unsigned int vpflags = dvpe();
2101                        set_c0_cause(CAUSEF_IV);
2102                        evpe(vpflags);
2103                } else
2104                        set_c0_cause(CAUSEF_IV);
2105        }
2106}
2107
2108void per_cpu_trap_init(bool is_boot_cpu)
2109{
2110        unsigned int cpu = smp_processor_id();
2111
2112        configure_status();
2113        configure_hwrena();
2114
2115        configure_exception_vector();
2116
2117        /*
2118         * Before R2 both interrupt numbers were fixed to 7, so on R2 only:
2119         *
2120         *  o read IntCtl.IPTI to determine the timer interrupt
2121         *  o read IntCtl.IPPCI to determine the performance counter interrupt
2122         *  o read IntCtl.IPFDC to determine the fast debug channel interrupt
2123         */
2124        if (cpu_has_mips_r2_r6) {
2125                cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
2126                cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
2127                cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
2128                cp0_fdc_irq = (read_c0_intctl() >> INTCTLB_IPFDC) & 7;
2129                if (!cp0_fdc_irq)
2130                        cp0_fdc_irq = -1;
2131
2132        } else {
2133                cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
2134                cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
2135                cp0_perfcount_irq = -1;
2136                cp0_fdc_irq = -1;
2137        }
2138
2139        if (!cpu_data[cpu].asid_cache)
2140                cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
2141
2142        atomic_inc(&init_mm.mm_count);
2143        current->active_mm = &init_mm;
2144        BUG_ON(current->mm);
2145        enter_lazy_tlb(&init_mm, current);
2146
2147        /* Boot CPU's cache setup in setup_arch(). */
2148        if (!is_boot_cpu)
2149                cpu_cache_init();
2150        tlb_init();
2151        TLBMISS_HANDLER_SETUP();
2152}
2153
2154/* Install CPU exception handler */
2155void set_handler(unsigned long offset, void *addr, unsigned long size)
2156{
2157#ifdef CONFIG_CPU_MICROMIPS
2158        memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
2159#else
2160        memcpy((void *)(ebase + offset), addr, size);
2161#endif
2162        local_flush_icache_range(ebase + offset, ebase + offset + size);
2163}
2164
2165static char panic_null_cerr[] =
2166        "Trying to set NULL cache error exception handler";
2167
2168/*
2169 * Install uncached CPU exception handler.
2170 * This is suitable only for the cache error exception which is the only
2171 * exception handler that is being run uncached.
2172 */
2173void set_uncached_handler(unsigned long offset, void *addr,
2174        unsigned long size)
2175{
2176        unsigned long uncached_ebase = CKSEG1ADDR(ebase);
2177
2178        if (!addr)
2179                panic(panic_null_cerr);
2180
2181        memcpy((void *)(uncached_ebase + offset), addr, size);
2182}
2183
2184static int __initdata rdhwr_noopt;
2185static int __init set_rdhwr_noopt(char *str)
2186{
2187        rdhwr_noopt = 1;
2188        return 1;
2189}
2190
2191__setup("rdhwr_noopt", set_rdhwr_noopt);
2192
2193void __init trap_init(void)
2194{
2195        extern char except_vec3_generic;
2196        extern char except_vec4;
2197        extern char except_vec3_r4000;
2198        unsigned long i;
2199
2200        check_wait();
2201
2202        if (cpu_has_veic || cpu_has_vint) {
2203                unsigned long size = 0x200 + VECTORSPACING*64;
2204                ebase = (unsigned long)
2205                        __alloc_bootmem(size, 1 << fls(size), 0);
2206        } else {
2207#ifdef CONFIG_KVM_GUEST
2208#define KVM_GUEST_KSEG0     0x40000000
2209        ebase = KVM_GUEST_KSEG0;
2210#else
2211        ebase = CKSEG0;
2212#endif
2213                if (cpu_has_mips_r2_r6)
2214                        ebase += (read_c0_ebase() & 0x3ffff000);
2215        }
2216
2217        if (cpu_has_mmips) {
2218                unsigned int config3 = read_c0_config3();
2219
2220                if (IS_ENABLED(CONFIG_CPU_MICROMIPS))
2221                        write_c0_config3(config3 | MIPS_CONF3_ISA_OE);
2222                else
2223                        write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE);
2224        }
2225
2226        if (board_ebase_setup)
2227                board_ebase_setup();
2228        per_cpu_trap_init(true);
2229
2230        /*
2231         * Copy the generic exception handlers to their final destination.
2232         * This will be overriden later as suitable for a particular
2233         * configuration.
2234         */
2235        set_handler(0x180, &except_vec3_generic, 0x80);
2236
2237        /*
2238         * Setup default vectors
2239         */
2240        for (i = 0; i <= 31; i++)
2241                set_except_vector(i, handle_reserved);
2242
2243        /*
2244         * Copy the EJTAG debug exception vector handler code to it's final
2245         * destination.
2246         */
2247        if (cpu_has_ejtag && board_ejtag_handler_setup)
2248                board_ejtag_handler_setup();
2249
2250        /*
2251         * Only some CPUs have the watch exceptions.
2252         */
2253        if (cpu_has_watch)
2254                set_except_vector(23, handle_watch);
2255
2256        /*
2257         * Initialise interrupt handlers
2258         */
2259        if (cpu_has_veic || cpu_has_vint) {
2260                int nvec = cpu_has_veic ? 64 : 8;
2261                for (i = 0; i < nvec; i++)
2262                        set_vi_handler(i, NULL);
2263        }
2264        else if (cpu_has_divec)
2265                set_handler(0x200, &except_vec4, 0x8);
2266
2267        /*
2268         * Some CPUs can enable/disable for cache parity detection, but does
2269         * it different ways.
2270         */
2271        parity_protection_init();
2272
2273        /*
2274         * The Data Bus Errors / Instruction Bus Errors are signaled
2275         * by external hardware.  Therefore these two exceptions
2276         * may have board specific handlers.
2277         */
2278        if (board_be_init)
2279                board_be_init();
2280
2281        set_except_vector(0, using_rollback_handler() ? rollback_handle_int
2282                                                      : handle_int);
2283        set_except_vector(1, handle_tlbm);
2284        set_except_vector(2, handle_tlbl);
2285        set_except_vector(3, handle_tlbs);
2286
2287        set_except_vector(4, handle_adel);
2288        set_except_vector(5, handle_ades);
2289
2290        set_except_vector(6, handle_ibe);
2291        set_except_vector(7, handle_dbe);
2292
2293        set_except_vector(8, handle_sys);
2294        set_except_vector(9, handle_bp);
2295        set_except_vector(10, rdhwr_noopt ? handle_ri :
2296                          (cpu_has_vtag_icache ?
2297                           handle_ri_rdhwr_vivt : handle_ri_rdhwr));
2298        set_except_vector(11, handle_cpu);
2299        set_except_vector(12, handle_ov);
2300        set_except_vector(13, handle_tr);
2301        set_except_vector(14, handle_msa_fpe);
2302
2303        if (current_cpu_type() == CPU_R6000 ||
2304            current_cpu_type() == CPU_R6000A) {
2305                /*
2306                 * The R6000 is the only R-series CPU that features a machine
2307                 * check exception (similar to the R4000 cache error) and
2308                 * unaligned ldc1/sdc1 exception.  The handlers have not been
2309                 * written yet.  Well, anyway there is no R6000 machine on the
2310                 * current list of targets for Linux/MIPS.
2311                 * (Duh, crap, there is someone with a triple R6k machine)
2312                 */
2313                //set_except_vector(14, handle_mc);
2314                //set_except_vector(15, handle_ndc);
2315        }
2316
2317
2318        if (board_nmi_handler_setup)
2319                board_nmi_handler_setup();
2320
2321        if (cpu_has_fpu && !cpu_has_nofpuex)
2322                set_except_vector(15, handle_fpe);
2323
2324        set_except_vector(16, handle_ftlb);
2325
2326        if (cpu_has_rixiex) {
2327                set_except_vector(19, tlb_do_page_fault_0);
2328                set_except_vector(20, tlb_do_page_fault_0);
2329        }
2330
2331        set_except_vector(21, handle_msa);
2332        set_except_vector(22, handle_mdmx);
2333
2334        if (cpu_has_mcheck)
2335                set_except_vector(24, handle_mcheck);
2336
2337        if (cpu_has_mipsmt)
2338                set_except_vector(25, handle_mt);
2339
2340        set_except_vector(26, handle_dsp);
2341
2342        if (board_cache_error_setup)
2343                board_cache_error_setup();
2344
2345        if (cpu_has_vce)
2346                /* Special exception: R4[04]00 uses also the divec space. */
2347                set_handler(0x180, &except_vec3_r4000, 0x100);
2348        else if (cpu_has_4kex)
2349                set_handler(0x180, &except_vec3_generic, 0x80);
2350        else
2351                set_handler(0x080, &except_vec3_generic, 0x80);
2352
2353        local_flush_icache_range(ebase, ebase + 0x400);
2354
2355        sort_extable(__start___dbe_table, __stop___dbe_table);
2356
2357        cu2_notifier(default_cu2_call, 0x80000000);     /* Run last  */
2358}
2359
2360static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd,
2361                            void *v)
2362{
2363        switch (cmd) {
2364        case CPU_PM_ENTER_FAILED:
2365        case CPU_PM_EXIT:
2366                configure_status();
2367                configure_hwrena();
2368                configure_exception_vector();
2369
2370                /* Restore register with CPU number for TLB handlers */
2371                TLBMISS_HANDLER_RESTORE();
2372
2373                break;
2374        }
2375
2376        return NOTIFY_OK;
2377}
2378
2379static struct notifier_block trap_pm_notifier_block = {
2380        .notifier_call = trap_pm_notifier,
2381};
2382
2383static int __init trap_pm_init(void)
2384{
2385        return cpu_pm_register_notifier(&trap_pm_notifier_block);
2386}
2387arch_initcall(trap_pm_init);
2388