linux/arch/mips/kernel/traps.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
   7 * Copyright (C) 1995, 1996 Paul M. Antoine
   8 * Copyright (C) 1998 Ulf Carlsson
   9 * Copyright (C) 1999 Silicon Graphics, Inc.
  10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
  11 * Copyright (C) 2002, 2003, 2004, 2005, 2007  Maciej W. Rozycki
  12 * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc.  All rights reserved.
  13 * Copyright (C) 2014, Imagination Technologies Ltd.
  14 */
  15#include <linux/bitops.h>
  16#include <linux/bug.h>
  17#include <linux/compiler.h>
  18#include <linux/context_tracking.h>
  19#include <linux/cpu_pm.h>
  20#include <linux/kexec.h>
  21#include <linux/init.h>
  22#include <linux/kernel.h>
  23#include <linux/module.h>
  24#include <linux/extable.h>
  25#include <linux/mm.h>
  26#include <linux/sched/mm.h>
  27#include <linux/sched/debug.h>
  28#include <linux/smp.h>
  29#include <linux/spinlock.h>
  30#include <linux/kallsyms.h>
  31#include <linux/memblock.h>
  32#include <linux/interrupt.h>
  33#include <linux/ptrace.h>
  34#include <linux/kgdb.h>
  35#include <linux/kdebug.h>
  36#include <linux/kprobes.h>
  37#include <linux/notifier.h>
  38#include <linux/kdb.h>
  39#include <linux/irq.h>
  40#include <linux/perf_event.h>
  41
  42#include <asm/addrspace.h>
  43#include <asm/bootinfo.h>
  44#include <asm/branch.h>
  45#include <asm/break.h>
  46#include <asm/cop2.h>
  47#include <asm/cpu.h>
  48#include <asm/cpu-type.h>
  49#include <asm/dsp.h>
  50#include <asm/fpu.h>
  51#include <asm/fpu_emulator.h>
  52#include <asm/idle.h>
  53#include <asm/isa-rev.h>
  54#include <asm/mips-cps.h>
  55#include <asm/mips-r2-to-r6-emul.h>
  56#include <asm/mipsregs.h>
  57#include <asm/mipsmtregs.h>
  58#include <asm/module.h>
  59#include <asm/msa.h>
  60#include <asm/pgtable.h>
  61#include <asm/ptrace.h>
  62#include <asm/sections.h>
  63#include <asm/siginfo.h>
  64#include <asm/tlbdebug.h>
  65#include <asm/traps.h>
  66#include <linux/uaccess.h>
  67#include <asm/watch.h>
  68#include <asm/mmu_context.h>
  69#include <asm/types.h>
  70#include <asm/stacktrace.h>
  71#include <asm/tlbex.h>
  72#include <asm/uasm.h>
  73
  74extern void check_wait(void);
  75extern asmlinkage void rollback_handle_int(void);
  76extern asmlinkage void handle_int(void);
  77extern asmlinkage void handle_adel(void);
  78extern asmlinkage void handle_ades(void);
  79extern asmlinkage void handle_ibe(void);
  80extern asmlinkage void handle_dbe(void);
  81extern asmlinkage void handle_sys(void);
  82extern asmlinkage void handle_bp(void);
  83extern asmlinkage void handle_ri(void);
  84extern asmlinkage void handle_ri_rdhwr_tlbp(void);
  85extern asmlinkage void handle_ri_rdhwr(void);
  86extern asmlinkage void handle_cpu(void);
  87extern asmlinkage void handle_ov(void);
  88extern asmlinkage void handle_tr(void);
  89extern asmlinkage void handle_msa_fpe(void);
  90extern asmlinkage void handle_fpe(void);
  91extern asmlinkage void handle_ftlb(void);
  92extern asmlinkage void handle_msa(void);
  93extern asmlinkage void handle_mdmx(void);
  94extern asmlinkage void handle_watch(void);
  95extern asmlinkage void handle_mt(void);
  96extern asmlinkage void handle_dsp(void);
  97extern asmlinkage void handle_mcheck(void);
  98extern asmlinkage void handle_reserved(void);
  99extern void tlb_do_page_fault_0(void);
 100
 101void (*board_be_init)(void);
 102int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
 103void (*board_nmi_handler_setup)(void);
 104void (*board_ejtag_handler_setup)(void);
 105void (*board_bind_eic_interrupt)(int irq, int regset);
 106void (*board_ebase_setup)(void);
 107void(*board_cache_error_setup)(void);
 108
 109static void show_raw_backtrace(unsigned long reg29)
 110{
 111        unsigned long *sp = (unsigned long *)(reg29 & ~3);
 112        unsigned long addr;
 113
 114        printk("Call Trace:");
 115#ifdef CONFIG_KALLSYMS
 116        printk("\n");
 117#endif
 118        while (!kstack_end(sp)) {
 119                unsigned long __user *p =
 120                        (unsigned long __user *)(unsigned long)sp++;
 121                if (__get_user(addr, p)) {
 122                        printk(" (Bad stack address)");
 123                        break;
 124                }
 125                if (__kernel_text_address(addr))
 126                        print_ip_sym(addr);
 127        }
 128        printk("\n");
 129}
 130
 131#ifdef CONFIG_KALLSYMS
 132int raw_show_trace;
 133static int __init set_raw_show_trace(char *str)
 134{
 135        raw_show_trace = 1;
 136        return 1;
 137}
 138__setup("raw_show_trace", set_raw_show_trace);
 139#endif
 140
 141static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
 142{
 143        unsigned long sp = regs->regs[29];
 144        unsigned long ra = regs->regs[31];
 145        unsigned long pc = regs->cp0_epc;
 146
 147        if (!task)
 148                task = current;
 149
 150        if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) {
 151                show_raw_backtrace(sp);
 152                return;
 153        }
 154        printk("Call Trace:\n");
 155        do {
 156                print_ip_sym(pc);
 157                pc = unwind_stack(task, &sp, pc, &ra);
 158        } while (pc);
 159        pr_cont("\n");
 160}
 161
 162/*
 163 * This routine abuses get_user()/put_user() to reference pointers
 164 * with at least a bit of error checking ...
 165 */
 166static void show_stacktrace(struct task_struct *task,
 167        const struct pt_regs *regs)
 168{
 169        const int field = 2 * sizeof(unsigned long);
 170        long stackdata;
 171        int i;
 172        unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
 173
 174        printk("Stack :");
 175        i = 0;
 176        while ((unsigned long) sp & (PAGE_SIZE - 1)) {
 177                if (i && ((i % (64 / field)) == 0)) {
 178                        pr_cont("\n");
 179                        printk("       ");
 180                }
 181                if (i > 39) {
 182                        pr_cont(" ...");
 183                        break;
 184                }
 185
 186                if (__get_user(stackdata, sp++)) {
 187                        pr_cont(" (Bad stack address)");
 188                        break;
 189                }
 190
 191                pr_cont(" %0*lx", field, stackdata);
 192                i++;
 193        }
 194        pr_cont("\n");
 195        show_backtrace(task, regs);
 196}
 197
 198void show_stack(struct task_struct *task, unsigned long *sp)
 199{
 200        struct pt_regs regs;
 201        mm_segment_t old_fs = get_fs();
 202
 203        regs.cp0_status = KSU_KERNEL;
 204        if (sp) {
 205                regs.regs[29] = (unsigned long)sp;
 206                regs.regs[31] = 0;
 207                regs.cp0_epc = 0;
 208        } else {
 209                if (task && task != current) {
 210                        regs.regs[29] = task->thread.reg29;
 211                        regs.regs[31] = 0;
 212                        regs.cp0_epc = task->thread.reg31;
 213#ifdef CONFIG_KGDB_KDB
 214                } else if (atomic_read(&kgdb_active) != -1 &&
 215                           kdb_current_regs) {
 216                        memcpy(&regs, kdb_current_regs, sizeof(regs));
 217#endif /* CONFIG_KGDB_KDB */
 218                } else {
 219                        prepare_frametrace(&regs);
 220                }
 221        }
 222        /*
 223         * show_stack() deals exclusively with kernel mode, so be sure to access
 224         * the stack in the kernel (not user) address space.
 225         */
 226        set_fs(KERNEL_DS);
 227        show_stacktrace(task, &regs);
 228        set_fs(old_fs);
 229}
 230
 231static void show_code(unsigned int __user *pc)
 232{
 233        long i;
 234        unsigned short __user *pc16 = NULL;
 235
 236        printk("Code:");
 237
 238        if ((unsigned long)pc & 1)
 239                pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
 240        for(i = -3 ; i < 6 ; i++) {
 241                unsigned int insn;
 242                if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
 243                        pr_cont(" (Bad address in epc)\n");
 244                        break;
 245                }
 246                pr_cont("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
 247        }
 248        pr_cont("\n");
 249}
 250
 251static void __show_regs(const struct pt_regs *regs)
 252{
 253        const int field = 2 * sizeof(unsigned long);
 254        unsigned int cause = regs->cp0_cause;
 255        unsigned int exccode;
 256        int i;
 257
 258        show_regs_print_info(KERN_DEFAULT);
 259
 260        /*
 261         * Saved main processor registers
 262         */
 263        for (i = 0; i < 32; ) {
 264                if ((i % 4) == 0)
 265                        printk("$%2d   :", i);
 266                if (i == 0)
 267                        pr_cont(" %0*lx", field, 0UL);
 268                else if (i == 26 || i == 27)
 269                        pr_cont(" %*s", field, "");
 270                else
 271                        pr_cont(" %0*lx", field, regs->regs[i]);
 272
 273                i++;
 274                if ((i % 4) == 0)
 275                        pr_cont("\n");
 276        }
 277
 278#ifdef CONFIG_CPU_HAS_SMARTMIPS
 279        printk("Acx    : %0*lx\n", field, regs->acx);
 280#endif
 281        if (MIPS_ISA_REV < 6) {
 282                printk("Hi    : %0*lx\n", field, regs->hi);
 283                printk("Lo    : %0*lx\n", field, regs->lo);
 284        }
 285
 286        /*
 287         * Saved cp0 registers
 288         */
 289        printk("epc   : %0*lx %pS\n", field, regs->cp0_epc,
 290               (void *) regs->cp0_epc);
 291        printk("ra    : %0*lx %pS\n", field, regs->regs[31],
 292               (void *) regs->regs[31]);
 293
 294        printk("Status: %08x    ", (uint32_t) regs->cp0_status);
 295
 296        if (cpu_has_3kex) {
 297                if (regs->cp0_status & ST0_KUO)
 298                        pr_cont("KUo ");
 299                if (regs->cp0_status & ST0_IEO)
 300                        pr_cont("IEo ");
 301                if (regs->cp0_status & ST0_KUP)
 302                        pr_cont("KUp ");
 303                if (regs->cp0_status & ST0_IEP)
 304                        pr_cont("IEp ");
 305                if (regs->cp0_status & ST0_KUC)
 306                        pr_cont("KUc ");
 307                if (regs->cp0_status & ST0_IEC)
 308                        pr_cont("IEc ");
 309        } else if (cpu_has_4kex) {
 310                if (regs->cp0_status & ST0_KX)
 311                        pr_cont("KX ");
 312                if (regs->cp0_status & ST0_SX)
 313                        pr_cont("SX ");
 314                if (regs->cp0_status & ST0_UX)
 315                        pr_cont("UX ");
 316                switch (regs->cp0_status & ST0_KSU) {
 317                case KSU_USER:
 318                        pr_cont("USER ");
 319                        break;
 320                case KSU_SUPERVISOR:
 321                        pr_cont("SUPERVISOR ");
 322                        break;
 323                case KSU_KERNEL:
 324                        pr_cont("KERNEL ");
 325                        break;
 326                default:
 327                        pr_cont("BAD_MODE ");
 328                        break;
 329                }
 330                if (regs->cp0_status & ST0_ERL)
 331                        pr_cont("ERL ");
 332                if (regs->cp0_status & ST0_EXL)
 333                        pr_cont("EXL ");
 334                if (regs->cp0_status & ST0_IE)
 335                        pr_cont("IE ");
 336        }
 337        pr_cont("\n");
 338
 339        exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
 340        printk("Cause : %08x (ExcCode %02x)\n", cause, exccode);
 341
 342        if (1 <= exccode && exccode <= 5)
 343                printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
 344
 345        printk("PrId  : %08x (%s)\n", read_c0_prid(),
 346               cpu_name_string());
 347}
 348
 349/*
 350 * FIXME: really the generic show_regs should take a const pointer argument.
 351 */
 352void show_regs(struct pt_regs *regs)
 353{
 354        __show_regs(regs);
 355        dump_stack();
 356}
 357
 358void show_registers(struct pt_regs *regs)
 359{
 360        const int field = 2 * sizeof(unsigned long);
 361        mm_segment_t old_fs = get_fs();
 362
 363        __show_regs(regs);
 364        print_modules();
 365        printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
 366               current->comm, current->pid, current_thread_info(), current,
 367              field, current_thread_info()->tp_value);
 368        if (cpu_has_userlocal) {
 369                unsigned long tls;
 370
 371                tls = read_c0_userlocal();
 372                if (tls != current_thread_info()->tp_value)
 373                        printk("*HwTLS: %0*lx\n", field, tls);
 374        }
 375
 376        if (!user_mode(regs))
 377                /* Necessary for getting the correct stack content */
 378                set_fs(KERNEL_DS);
 379        show_stacktrace(current, regs);
 380        show_code((unsigned int __user *) regs->cp0_epc);
 381        printk("\n");
 382        set_fs(old_fs);
 383}
 384
 385static DEFINE_RAW_SPINLOCK(die_lock);
 386
 387void __noreturn die(const char *str, struct pt_regs *regs)
 388{
 389        static int die_counter;
 390        int sig = SIGSEGV;
 391
 392        oops_enter();
 393
 394        if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
 395                       SIGSEGV) == NOTIFY_STOP)
 396                sig = 0;
 397
 398        console_verbose();
 399        raw_spin_lock_irq(&die_lock);
 400        bust_spinlocks(1);
 401
 402        printk("%s[#%d]:\n", str, ++die_counter);
 403        show_registers(regs);
 404        add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
 405        raw_spin_unlock_irq(&die_lock);
 406
 407        oops_exit();
 408
 409        if (in_interrupt())
 410                panic("Fatal exception in interrupt");
 411
 412        if (panic_on_oops)
 413                panic("Fatal exception");
 414
 415        if (regs && kexec_should_crash(current))
 416                crash_kexec(regs);
 417
 418        do_exit(sig);
 419}
 420
 421extern struct exception_table_entry __start___dbe_table[];
 422extern struct exception_table_entry __stop___dbe_table[];
 423
 424__asm__(
 425"       .section        __dbe_table, \"a\"\n"
 426"       .previous                       \n");
 427
 428/* Given an address, look for it in the exception tables. */
 429static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
 430{
 431        const struct exception_table_entry *e;
 432
 433        e = search_extable(__start___dbe_table,
 434                           __stop___dbe_table - __start___dbe_table, addr);
 435        if (!e)
 436                e = search_module_dbetables(addr);
 437        return e;
 438}
 439
 440asmlinkage void do_be(struct pt_regs *regs)
 441{
 442        const int field = 2 * sizeof(unsigned long);
 443        const struct exception_table_entry *fixup = NULL;
 444        int data = regs->cp0_cause & 4;
 445        int action = MIPS_BE_FATAL;
 446        enum ctx_state prev_state;
 447
 448        prev_state = exception_enter();
 449        /* XXX For now.  Fixme, this searches the wrong table ...  */
 450        if (data && !user_mode(regs))
 451                fixup = search_dbe_tables(exception_epc(regs));
 452
 453        if (fixup)
 454                action = MIPS_BE_FIXUP;
 455
 456        if (board_be_handler)
 457                action = board_be_handler(regs, fixup != NULL);
 458        else
 459                mips_cm_error_report();
 460
 461        switch (action) {
 462        case MIPS_BE_DISCARD:
 463                goto out;
 464        case MIPS_BE_FIXUP:
 465                if (fixup) {
 466                        regs->cp0_epc = fixup->nextinsn;
 467                        goto out;
 468                }
 469                break;
 470        default:
 471                break;
 472        }
 473
 474        /*
 475         * Assume it would be too dangerous to continue ...
 476         */
 477        printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
 478               data ? "Data" : "Instruction",
 479               field, regs->cp0_epc, field, regs->regs[31]);
 480        if (notify_die(DIE_OOPS, "bus error", regs, 0, current->thread.trap_nr,
 481                       SIGBUS) == NOTIFY_STOP)
 482                goto out;
 483
 484        die_if_kernel("Oops", regs);
 485        force_sig(SIGBUS);
 486
 487out:
 488        exception_exit(prev_state);
 489}
 490
 491/*
 492 * ll/sc, rdhwr, sync emulation
 493 */
 494
 495#define OPCODE 0xfc000000
 496#define BASE   0x03e00000
 497#define RT     0x001f0000
 498#define OFFSET 0x0000ffff
 499#define LL     0xc0000000
 500#define SC     0xe0000000
 501#define SPEC0  0x00000000
 502#define SPEC3  0x7c000000
 503#define RD     0x0000f800
 504#define FUNC   0x0000003f
 505#define SYNC   0x0000000f
 506#define RDHWR  0x0000003b
 507
 508/*  microMIPS definitions   */
 509#define MM_POOL32A_FUNC 0xfc00ffff
 510#define MM_RDHWR        0x00006b3c
 511#define MM_RS           0x001f0000
 512#define MM_RT           0x03e00000
 513
 514/*
 515 * The ll_bit is cleared by r*_switch.S
 516 */
 517
 518unsigned int ll_bit;
 519struct task_struct *ll_task;
 520
 521static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
 522{
 523        unsigned long value, __user *vaddr;
 524        long offset;
 525
 526        /*
 527         * analyse the ll instruction that just caused a ri exception
 528         * and put the referenced address to addr.
 529         */
 530
 531        /* sign extend offset */
 532        offset = opcode & OFFSET;
 533        offset <<= 16;
 534        offset >>= 16;
 535
 536        vaddr = (unsigned long __user *)
 537                ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
 538
 539        if ((unsigned long)vaddr & 3)
 540                return SIGBUS;
 541        if (get_user(value, vaddr))
 542                return SIGSEGV;
 543
 544        preempt_disable();
 545
 546        if (ll_task == NULL || ll_task == current) {
 547                ll_bit = 1;
 548        } else {
 549                ll_bit = 0;
 550        }
 551        ll_task = current;
 552
 553        preempt_enable();
 554
 555        regs->regs[(opcode & RT) >> 16] = value;
 556
 557        return 0;
 558}
 559
 560static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
 561{
 562        unsigned long __user *vaddr;
 563        unsigned long reg;
 564        long offset;
 565
 566        /*
 567         * analyse the sc instruction that just caused a ri exception
 568         * and put the referenced address to addr.
 569         */
 570
 571        /* sign extend offset */
 572        offset = opcode & OFFSET;
 573        offset <<= 16;
 574        offset >>= 16;
 575
 576        vaddr = (unsigned long __user *)
 577                ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
 578        reg = (opcode & RT) >> 16;
 579
 580        if ((unsigned long)vaddr & 3)
 581                return SIGBUS;
 582
 583        preempt_disable();
 584
 585        if (ll_bit == 0 || ll_task != current) {
 586                regs->regs[reg] = 0;
 587                preempt_enable();
 588                return 0;
 589        }
 590
 591        preempt_enable();
 592
 593        if (put_user(regs->regs[reg], vaddr))
 594                return SIGSEGV;
 595
 596        regs->regs[reg] = 1;
 597
 598        return 0;
 599}
 600
 601/*
 602 * ll uses the opcode of lwc0 and sc uses the opcode of swc0.  That is both
 603 * opcodes are supposed to result in coprocessor unusable exceptions if
 604 * executed on ll/sc-less processors.  That's the theory.  In practice a
 605 * few processors such as NEC's VR4100 throw reserved instruction exceptions
 606 * instead, so we're doing the emulation thing in both exception handlers.
 607 */
 608static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
 609{
 610        if ((opcode & OPCODE) == LL) {
 611                perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
 612                                1, regs, 0);
 613                return simulate_ll(regs, opcode);
 614        }
 615        if ((opcode & OPCODE) == SC) {
 616                perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
 617                                1, regs, 0);
 618                return simulate_sc(regs, opcode);
 619        }
 620
 621        return -1;                      /* Must be something else ... */
 622}
 623
 624/*
 625 * Simulate trapping 'rdhwr' instructions to provide user accessible
 626 * registers not implemented in hardware.
 627 */
 628static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
 629{
 630        struct thread_info *ti = task_thread_info(current);
 631
 632        perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
 633                        1, regs, 0);
 634        switch (rd) {
 635        case MIPS_HWR_CPUNUM:           /* CPU number */
 636                regs->regs[rt] = smp_processor_id();
 637                return 0;
 638        case MIPS_HWR_SYNCISTEP:        /* SYNCI length */
 639                regs->regs[rt] = min(current_cpu_data.dcache.linesz,
 640                                     current_cpu_data.icache.linesz);
 641                return 0;
 642        case MIPS_HWR_CC:               /* Read count register */
 643                regs->regs[rt] = read_c0_count();
 644                return 0;
 645        case MIPS_HWR_CCRES:            /* Count register resolution */
 646                switch (current_cpu_type()) {
 647                case CPU_20KC:
 648                case CPU_25KF:
 649                        regs->regs[rt] = 1;
 650                        break;
 651                default:
 652                        regs->regs[rt] = 2;
 653                }
 654                return 0;
 655        case MIPS_HWR_ULR:              /* Read UserLocal register */
 656                regs->regs[rt] = ti->tp_value;
 657                return 0;
 658        default:
 659                return -1;
 660        }
 661}
 662
 663static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
 664{
 665        if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
 666                int rd = (opcode & RD) >> 11;
 667                int rt = (opcode & RT) >> 16;
 668
 669                simulate_rdhwr(regs, rd, rt);
 670                return 0;
 671        }
 672
 673        /* Not ours.  */
 674        return -1;
 675}
 676
 677static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned int opcode)
 678{
 679        if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
 680                int rd = (opcode & MM_RS) >> 16;
 681                int rt = (opcode & MM_RT) >> 21;
 682                simulate_rdhwr(regs, rd, rt);
 683                return 0;
 684        }
 685
 686        /* Not ours.  */
 687        return -1;
 688}
 689
 690static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
 691{
 692        if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
 693                perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
 694                                1, regs, 0);
 695                return 0;
 696        }
 697
 698        return -1;                      /* Must be something else ... */
 699}
 700
 701asmlinkage void do_ov(struct pt_regs *regs)
 702{
 703        enum ctx_state prev_state;
 704
 705        prev_state = exception_enter();
 706        die_if_kernel("Integer overflow", regs);
 707
 708        force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->cp0_epc);
 709        exception_exit(prev_state);
 710}
 711
 712#ifdef CONFIG_MIPS_FP_SUPPORT
 713
 714/*
 715 * Send SIGFPE according to FCSR Cause bits, which must have already
 716 * been masked against Enable bits.  This is impotant as Inexact can
 717 * happen together with Overflow or Underflow, and `ptrace' can set
 718 * any bits.
 719 */
 720void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
 721                     struct task_struct *tsk)
 722{
 723        int si_code = FPE_FLTUNK;
 724
 725        if (fcr31 & FPU_CSR_INV_X)
 726                si_code = FPE_FLTINV;
 727        else if (fcr31 & FPU_CSR_DIV_X)
 728                si_code = FPE_FLTDIV;
 729        else if (fcr31 & FPU_CSR_OVF_X)
 730                si_code = FPE_FLTOVF;
 731        else if (fcr31 & FPU_CSR_UDF_X)
 732                si_code = FPE_FLTUND;
 733        else if (fcr31 & FPU_CSR_INE_X)
 734                si_code = FPE_FLTRES;
 735
 736        force_sig_fault_to_task(SIGFPE, si_code, fault_addr, tsk);
 737}
 738
 739int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
 740{
 741        int si_code;
 742        struct vm_area_struct *vma;
 743
 744        switch (sig) {
 745        case 0:
 746                return 0;
 747
 748        case SIGFPE:
 749                force_fcr31_sig(fcr31, fault_addr, current);
 750                return 1;
 751
 752        case SIGBUS:
 753                force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr);
 754                return 1;
 755
 756        case SIGSEGV:
 757                down_read(&current->mm->mmap_sem);
 758                vma = find_vma(current->mm, (unsigned long)fault_addr);
 759                if (vma && (vma->vm_start <= (unsigned long)fault_addr))
 760                        si_code = SEGV_ACCERR;
 761                else
 762                        si_code = SEGV_MAPERR;
 763                up_read(&current->mm->mmap_sem);
 764                force_sig_fault(SIGSEGV, si_code, fault_addr);
 765                return 1;
 766
 767        default:
 768                force_sig(sig);
 769                return 1;
 770        }
 771}
 772
 773static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
 774                       unsigned long old_epc, unsigned long old_ra)
 775{
 776        union mips_instruction inst = { .word = opcode };
 777        void __user *fault_addr;
 778        unsigned long fcr31;
 779        int sig;
 780
 781        /* If it's obviously not an FP instruction, skip it */
 782        switch (inst.i_format.opcode) {
 783        case cop1_op:
 784        case cop1x_op:
 785        case lwc1_op:
 786        case ldc1_op:
 787        case swc1_op:
 788        case sdc1_op:
 789                break;
 790
 791        default:
 792                return -1;
 793        }
 794
 795        /*
 796         * do_ri skipped over the instruction via compute_return_epc, undo
 797         * that for the FPU emulator.
 798         */
 799        regs->cp0_epc = old_epc;
 800        regs->regs[31] = old_ra;
 801
 802        /* Run the emulator */
 803        sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
 804                                       &fault_addr);
 805
 806        /*
 807         * We can't allow the emulated instruction to leave any
 808         * enabled Cause bits set in $fcr31.
 809         */
 810        fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
 811        current->thread.fpu.fcr31 &= ~fcr31;
 812
 813        /* Restore the hardware register state */
 814        own_fpu(1);
 815
 816        /* Send a signal if required.  */
 817        process_fpemu_return(sig, fault_addr, fcr31);
 818
 819        return 0;
 820}
 821
 822/*
 823 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
 824 */
 825asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
 826{
 827        enum ctx_state prev_state;
 828        void __user *fault_addr;
 829        int sig;
 830
 831        prev_state = exception_enter();
 832        if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
 833                       SIGFPE) == NOTIFY_STOP)
 834                goto out;
 835
 836        /* Clear FCSR.Cause before enabling interrupts */
 837        write_32bit_cp1_register(CP1_STATUS, fcr31 & ~mask_fcr31_x(fcr31));
 838        local_irq_enable();
 839
 840        die_if_kernel("FP exception in kernel code", regs);
 841
 842        if (fcr31 & FPU_CSR_UNI_X) {
 843                /*
 844                 * Unimplemented operation exception.  If we've got the full
 845                 * software emulator on-board, let's use it...
 846                 *
 847                 * Force FPU to dump state into task/thread context.  We're
 848                 * moving a lot of data here for what is probably a single
 849                 * instruction, but the alternative is to pre-decode the FP
 850                 * register operands before invoking the emulator, which seems
 851                 * a bit extreme for what should be an infrequent event.
 852                 */
 853
 854                /* Run the emulator */
 855                sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
 856                                               &fault_addr);
 857
 858                /*
 859                 * We can't allow the emulated instruction to leave any
 860                 * enabled Cause bits set in $fcr31.
 861                 */
 862                fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
 863                current->thread.fpu.fcr31 &= ~fcr31;
 864
 865                /* Restore the hardware register state */
 866                own_fpu(1);     /* Using the FPU again.  */
 867        } else {
 868                sig = SIGFPE;
 869                fault_addr = (void __user *) regs->cp0_epc;
 870        }
 871
 872        /* Send a signal if required.  */
 873        process_fpemu_return(sig, fault_addr, fcr31);
 874
 875out:
 876        exception_exit(prev_state);
 877}
 878
 879/*
 880 * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
 881 * emulated more than some threshold number of instructions, force migration to
 882 * a "CPU" that has FP support.
 883 */
 884static void mt_ase_fp_affinity(void)
 885{
 886#ifdef CONFIG_MIPS_MT_FPAFF
 887        if (mt_fpemul_threshold > 0 &&
 888             ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
 889                /*
 890                 * If there's no FPU present, or if the application has already
 891                 * restricted the allowed set to exclude any CPUs with FPUs,
 892                 * we'll skip the procedure.
 893                 */
 894                if (cpumask_intersects(&current->cpus_mask, &mt_fpu_cpumask)) {
 895                        cpumask_t tmask;
 896
 897                        current->thread.user_cpus_allowed
 898                                = current->cpus_mask;
 899                        cpumask_and(&tmask, &current->cpus_mask,
 900                                    &mt_fpu_cpumask);
 901                        set_cpus_allowed_ptr(current, &tmask);
 902                        set_thread_flag(TIF_FPUBOUND);
 903                }
 904        }
 905#endif /* CONFIG_MIPS_MT_FPAFF */
 906}
 907
 908#else /* !CONFIG_MIPS_FP_SUPPORT */
 909
 910static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
 911                       unsigned long old_epc, unsigned long old_ra)
 912{
 913        return -1;
 914}
 915
 916#endif /* !CONFIG_MIPS_FP_SUPPORT */
 917
 918void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
 919        const char *str)
 920{
 921        char b[40];
 922
 923#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
 924        if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr,
 925                         SIGTRAP) == NOTIFY_STOP)
 926                return;
 927#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
 928
 929        if (notify_die(DIE_TRAP, str, regs, code, current->thread.trap_nr,
 930                       SIGTRAP) == NOTIFY_STOP)
 931                return;
 932
 933        /*
 934         * A short test says that IRIX 5.3 sends SIGTRAP for all trap
 935         * insns, even for trap and break codes that indicate arithmetic
 936         * failures.  Weird ...
 937         * But should we continue the brokenness???  --macro
 938         */
 939        switch (code) {
 940        case BRK_OVERFLOW:
 941        case BRK_DIVZERO:
 942                scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
 943                die_if_kernel(b, regs);
 944                force_sig_fault(SIGFPE,
 945                                code == BRK_DIVZERO ? FPE_INTDIV : FPE_INTOVF,
 946                                (void __user *) regs->cp0_epc);
 947                break;
 948        case BRK_BUG:
 949                die_if_kernel("Kernel bug detected", regs);
 950                force_sig(SIGTRAP);
 951                break;
 952        case BRK_MEMU:
 953                /*
 954                 * This breakpoint code is used by the FPU emulator to retake
 955                 * control of the CPU after executing the instruction from the
 956                 * delay slot of an emulated branch.
 957                 *
 958                 * Terminate if exception was recognized as a delay slot return
 959                 * otherwise handle as normal.
 960                 */
 961                if (do_dsemulret(regs))
 962                        return;
 963
 964                die_if_kernel("Math emu break/trap", regs);
 965                force_sig(SIGTRAP);
 966                break;
 967        default:
 968                scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
 969                die_if_kernel(b, regs);
 970                if (si_code) {
 971                        force_sig_fault(SIGTRAP, si_code, NULL);
 972                } else {
 973                        force_sig(SIGTRAP);
 974                }
 975        }
 976}
 977
 978asmlinkage void do_bp(struct pt_regs *regs)
 979{
 980        unsigned long epc = msk_isa16_mode(exception_epc(regs));
 981        unsigned int opcode, bcode;
 982        enum ctx_state prev_state;
 983        mm_segment_t seg;
 984
 985        seg = get_fs();
 986        if (!user_mode(regs))
 987                set_fs(KERNEL_DS);
 988
 989        prev_state = exception_enter();
 990        current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
 991        if (get_isa16_mode(regs->cp0_epc)) {
 992                u16 instr[2];
 993
 994                if (__get_user(instr[0], (u16 __user *)epc))
 995                        goto out_sigsegv;
 996
 997                if (!cpu_has_mmips) {
 998                        /* MIPS16e mode */
 999                        bcode = (instr[0] >> 5) & 0x3f;
1000                } else if (mm_insn_16bit(instr[0])) {
1001                        /* 16-bit microMIPS BREAK */
1002                        bcode = instr[0] & 0xf;
1003                } else {
1004                        /* 32-bit microMIPS BREAK */
1005                        if (__get_user(instr[1], (u16 __user *)(epc + 2)))
1006                                goto out_sigsegv;
1007                        opcode = (instr[0] << 16) | instr[1];
1008                        bcode = (opcode >> 6) & ((1 << 20) - 1);
1009                }
1010        } else {
1011                if (__get_user(opcode, (unsigned int __user *)epc))
1012                        goto out_sigsegv;
1013                bcode = (opcode >> 6) & ((1 << 20) - 1);
1014        }
1015
1016        /*
1017         * There is the ancient bug in the MIPS assemblers that the break
1018         * code starts left to bit 16 instead to bit 6 in the opcode.
1019         * Gas is bug-compatible, but not always, grrr...
1020         * We handle both cases with a simple heuristics.  --macro
1021         */
1022        if (bcode >= (1 << 10))
1023                bcode = ((bcode & ((1 << 10) - 1)) << 10) | (bcode >> 10);
1024
1025        /*
1026         * notify the kprobe handlers, if instruction is likely to
1027         * pertain to them.
1028         */
1029        switch (bcode) {
1030        case BRK_UPROBE:
1031                if (notify_die(DIE_UPROBE, "uprobe", regs, bcode,
1032                               current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1033                        goto out;
1034                else
1035                        break;
1036        case BRK_UPROBE_XOL:
1037                if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, bcode,
1038                               current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1039                        goto out;
1040                else
1041                        break;
1042        case BRK_KPROBE_BP:
1043                if (notify_die(DIE_BREAK, "debug", regs, bcode,
1044                               current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1045                        goto out;
1046                else
1047                        break;
1048        case BRK_KPROBE_SSTEPBP:
1049                if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
1050                               current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1051                        goto out;
1052                else
1053                        break;
1054        default:
1055                break;
1056        }
1057
1058        do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break");
1059
1060out:
1061        set_fs(seg);
1062        exception_exit(prev_state);
1063        return;
1064
1065out_sigsegv:
1066        force_sig(SIGSEGV);
1067        goto out;
1068}
1069
1070asmlinkage void do_tr(struct pt_regs *regs)
1071{
1072        u32 opcode, tcode = 0;
1073        enum ctx_state prev_state;
1074        u16 instr[2];
1075        mm_segment_t seg;
1076        unsigned long epc = msk_isa16_mode(exception_epc(regs));
1077
1078        seg = get_fs();
1079        if (!user_mode(regs))
1080                set_fs(KERNEL_DS);
1081
1082        prev_state = exception_enter();
1083        current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1084        if (get_isa16_mode(regs->cp0_epc)) {
1085                if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
1086                    __get_user(instr[1], (u16 __user *)(epc + 2)))
1087                        goto out_sigsegv;
1088                opcode = (instr[0] << 16) | instr[1];
1089                /* Immediate versions don't provide a code.  */
1090                if (!(opcode & OPCODE))
1091                        tcode = (opcode >> 12) & ((1 << 4) - 1);
1092        } else {
1093                if (__get_user(opcode, (u32 __user *)epc))
1094                        goto out_sigsegv;
1095                /* Immediate versions don't provide a code.  */
1096                if (!(opcode & OPCODE))
1097                        tcode = (opcode >> 6) & ((1 << 10) - 1);
1098        }
1099
1100        do_trap_or_bp(regs, tcode, 0, "Trap");
1101
1102out:
1103        set_fs(seg);
1104        exception_exit(prev_state);
1105        return;
1106
1107out_sigsegv:
1108        force_sig(SIGSEGV);
1109        goto out;
1110}
1111
1112asmlinkage void do_ri(struct pt_regs *regs)
1113{
1114        unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
1115        unsigned long old_epc = regs->cp0_epc;
1116        unsigned long old31 = regs->regs[31];
1117        enum ctx_state prev_state;
1118        unsigned int opcode = 0;
1119        int status = -1;
1120
1121        /*
1122         * Avoid any kernel code. Just emulate the R2 instruction
1123         * as quickly as possible.
1124         */
1125        if (mipsr2_emulation && cpu_has_mips_r6 &&
1126            likely(user_mode(regs)) &&
1127            likely(get_user(opcode, epc) >= 0)) {
1128                unsigned long fcr31 = 0;
1129
1130                status = mipsr2_decoder(regs, opcode, &fcr31);
1131                switch (status) {
1132                case 0:
1133                case SIGEMT:
1134                        return;
1135                case SIGILL:
1136                        goto no_r2_instr;
1137                default:
1138                        process_fpemu_return(status,
1139                                             &current->thread.cp0_baduaddr,
1140                                             fcr31);
1141                        return;
1142                }
1143        }
1144
1145no_r2_instr:
1146
1147        prev_state = exception_enter();
1148        current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1149
1150        if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
1151                       SIGILL) == NOTIFY_STOP)
1152                goto out;
1153
1154        die_if_kernel("Reserved instruction in kernel code", regs);
1155
1156        if (unlikely(compute_return_epc(regs) < 0))
1157                goto out;
1158
1159        if (!get_isa16_mode(regs->cp0_epc)) {
1160                if (unlikely(get_user(opcode, epc) < 0))
1161                        status = SIGSEGV;
1162
1163                if (!cpu_has_llsc && status < 0)
1164                        status = simulate_llsc(regs, opcode);
1165
1166                if (status < 0)
1167                        status = simulate_rdhwr_normal(regs, opcode);
1168
1169                if (status < 0)
1170                        status = simulate_sync(regs, opcode);
1171
1172                if (status < 0)
1173                        status = simulate_fp(regs, opcode, old_epc, old31);
1174        } else if (cpu_has_mmips) {
1175                unsigned short mmop[2] = { 0 };
1176
1177                if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
1178                        status = SIGSEGV;
1179                if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
1180                        status = SIGSEGV;
1181                opcode = mmop[0];
1182                opcode = (opcode << 16) | mmop[1];
1183
1184                if (status < 0)
1185                        status = simulate_rdhwr_mm(regs, opcode);
1186        }
1187
1188        if (status < 0)
1189                status = SIGILL;
1190
1191        if (unlikely(status > 0)) {
1192                regs->cp0_epc = old_epc;                /* Undo skip-over.  */
1193                regs->regs[31] = old31;
1194                force_sig(status);
1195        }
1196
1197out:
1198        exception_exit(prev_state);
1199}
1200
1201/*
1202 * No lock; only written during early bootup by CPU 0.
1203 */
1204static RAW_NOTIFIER_HEAD(cu2_chain);
1205
1206int __ref register_cu2_notifier(struct notifier_block *nb)
1207{
1208        return raw_notifier_chain_register(&cu2_chain, nb);
1209}
1210
1211int cu2_notifier_call_chain(unsigned long val, void *v)
1212{
1213        return raw_notifier_call_chain(&cu2_chain, val, v);
1214}
1215
1216static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
1217        void *data)
1218{
1219        struct pt_regs *regs = data;
1220
1221        die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
1222                              "instruction", regs);
1223        force_sig(SIGILL);
1224
1225        return NOTIFY_OK;
1226}
1227
1228#ifdef CONFIG_MIPS_FP_SUPPORT
1229
1230static int enable_restore_fp_context(int msa)
1231{
1232        int err, was_fpu_owner, prior_msa;
1233        bool first_fp;
1234
1235        /* Initialize context if it hasn't been used already */
1236        first_fp = init_fp_ctx(current);
1237
1238        if (first_fp) {
1239                preempt_disable();
1240                err = own_fpu_inatomic(1);
1241                if (msa && !err) {
1242                        enable_msa();
1243                        set_thread_flag(TIF_USEDMSA);
1244                        set_thread_flag(TIF_MSA_CTX_LIVE);
1245                }
1246                preempt_enable();
1247                return err;
1248        }
1249
1250        /*
1251         * This task has formerly used the FP context.
1252         *
1253         * If this thread has no live MSA vector context then we can simply
1254         * restore the scalar FP context. If it has live MSA vector context
1255         * (that is, it has or may have used MSA since last performing a
1256         * function call) then we'll need to restore the vector context. This
1257         * applies even if we're currently only executing a scalar FP
1258         * instruction. This is because if we were to later execute an MSA
1259         * instruction then we'd either have to:
1260         *
1261         *  - Restore the vector context & clobber any registers modified by
1262         *    scalar FP instructions between now & then.
1263         *
1264         * or
1265         *
1266         *  - Not restore the vector context & lose the most significant bits
1267         *    of all vector registers.
1268         *
1269         * Neither of those options is acceptable. We cannot restore the least
1270         * significant bits of the registers now & only restore the most
1271         * significant bits later because the most significant bits of any
1272         * vector registers whose aliased FP register is modified now will have
1273         * been zeroed. We'd have no way to know that when restoring the vector
1274         * context & thus may load an outdated value for the most significant
1275         * bits of a vector register.
1276         */
1277        if (!msa && !thread_msa_context_live())
1278                return own_fpu(1);
1279
1280        /*
1281         * This task is using or has previously used MSA. Thus we require
1282         * that Status.FR == 1.
1283         */
1284        preempt_disable();
1285        was_fpu_owner = is_fpu_owner();
1286        err = own_fpu_inatomic(0);
1287        if (err)
1288                goto out;
1289
1290        enable_msa();
1291        write_msa_csr(current->thread.fpu.msacsr);
1292        set_thread_flag(TIF_USEDMSA);
1293
1294        /*
1295         * If this is the first time that the task is using MSA and it has
1296         * previously used scalar FP in this time slice then we already nave
1297         * FP context which we shouldn't clobber. We do however need to clear
1298         * the upper 64b of each vector register so that this task has no
1299         * opportunity to see data left behind by another.
1300         */
1301        prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
1302        if (!prior_msa && was_fpu_owner) {
1303                init_msa_upper();
1304
1305                goto out;
1306        }
1307
1308        if (!prior_msa) {
1309                /*
1310                 * Restore the least significant 64b of each vector register
1311                 * from the existing scalar FP context.
1312                 */
1313                _restore_fp(current);
1314
1315                /*
1316                 * The task has not formerly used MSA, so clear the upper 64b
1317                 * of each vector register such that it cannot see data left
1318                 * behind by another task.
1319                 */
1320                init_msa_upper();
1321        } else {
1322                /* We need to restore the vector context. */
1323                restore_msa(current);
1324
1325                /* Restore the scalar FP control & status register */
1326                if (!was_fpu_owner)
1327                        write_32bit_cp1_register(CP1_STATUS,
1328                                                 current->thread.fpu.fcr31);
1329        }
1330
1331out:
1332        preempt_enable();
1333
1334        return 0;
1335}
1336
1337#else /* !CONFIG_MIPS_FP_SUPPORT */
1338
1339static int enable_restore_fp_context(int msa)
1340{
1341        return SIGILL;
1342}
1343
1344#endif /* CONFIG_MIPS_FP_SUPPORT */
1345
1346asmlinkage void do_cpu(struct pt_regs *regs)
1347{
1348        enum ctx_state prev_state;
1349        unsigned int __user *epc;
1350        unsigned long old_epc, old31;
1351        unsigned int opcode;
1352        unsigned int cpid;
1353        int status;
1354
1355        prev_state = exception_enter();
1356        cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
1357
1358        if (cpid != 2)
1359                die_if_kernel("do_cpu invoked from kernel context!", regs);
1360
1361        switch (cpid) {
1362        case 0:
1363                epc = (unsigned int __user *)exception_epc(regs);
1364                old_epc = regs->cp0_epc;
1365                old31 = regs->regs[31];
1366                opcode = 0;
1367                status = -1;
1368
1369                if (unlikely(compute_return_epc(regs) < 0))
1370                        break;
1371
1372                if (!get_isa16_mode(regs->cp0_epc)) {
1373                        if (unlikely(get_user(opcode, epc) < 0))
1374                                status = SIGSEGV;
1375
1376                        if (!cpu_has_llsc && status < 0)
1377                                status = simulate_llsc(regs, opcode);
1378                }
1379
1380                if (status < 0)
1381                        status = SIGILL;
1382
1383                if (unlikely(status > 0)) {
1384                        regs->cp0_epc = old_epc;        /* Undo skip-over.  */
1385                        regs->regs[31] = old31;
1386                        force_sig(status);
1387                }
1388
1389                break;
1390
1391#ifdef CONFIG_MIPS_FP_SUPPORT
1392        case 3:
1393                /*
1394                 * The COP3 opcode space and consequently the CP0.Status.CU3
1395                 * bit and the CP0.Cause.CE=3 encoding have been removed as
1396                 * of the MIPS III ISA.  From the MIPS IV and MIPS32r2 ISAs
1397                 * up the space has been reused for COP1X instructions, that
1398                 * are enabled by the CP0.Status.CU1 bit and consequently
1399                 * use the CP0.Cause.CE=1 encoding for Coprocessor Unusable
1400                 * exceptions.  Some FPU-less processors that implement one
1401                 * of these ISAs however use this code erroneously for COP1X
1402                 * instructions.  Therefore we redirect this trap to the FP
1403                 * emulator too.
1404                 */
1405                if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) {
1406                        force_sig(SIGILL);
1407                        break;
1408                }
1409                /* Fall through.  */
1410
1411        case 1: {
1412                void __user *fault_addr;
1413                unsigned long fcr31;
1414                int err, sig;
1415
1416                err = enable_restore_fp_context(0);
1417
1418                if (raw_cpu_has_fpu && !err)
1419                        break;
1420
1421                sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
1422                                               &fault_addr);
1423
1424                /*
1425                 * We can't allow the emulated instruction to leave
1426                 * any enabled Cause bits set in $fcr31.
1427                 */
1428                fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
1429                current->thread.fpu.fcr31 &= ~fcr31;
1430
1431                /* Send a signal if required.  */
1432                if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
1433                        mt_ase_fp_affinity();
1434
1435                break;
1436        }
1437#else /* CONFIG_MIPS_FP_SUPPORT */
1438        case 1:
1439        case 3:
1440                force_sig(SIGILL);
1441                break;
1442#endif /* CONFIG_MIPS_FP_SUPPORT */
1443
1444        case 2:
1445                raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
1446                break;
1447        }
1448
1449        exception_exit(prev_state);
1450}
1451
1452asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr)
1453{
1454        enum ctx_state prev_state;
1455
1456        prev_state = exception_enter();
1457        current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1458        if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
1459                       current->thread.trap_nr, SIGFPE) == NOTIFY_STOP)
1460                goto out;
1461
1462        /* Clear MSACSR.Cause before enabling interrupts */
1463        write_msa_csr(msacsr & ~MSA_CSR_CAUSEF);
1464        local_irq_enable();
1465
1466        die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
1467        force_sig(SIGFPE);
1468out:
1469        exception_exit(prev_state);
1470}
1471
1472asmlinkage void do_msa(struct pt_regs *regs)
1473{
1474        enum ctx_state prev_state;
1475        int err;
1476
1477        prev_state = exception_enter();
1478
1479        if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
1480                force_sig(SIGILL);
1481                goto out;
1482        }
1483
1484        die_if_kernel("do_msa invoked from kernel context!", regs);
1485
1486        err = enable_restore_fp_context(1);
1487        if (err)
1488                force_sig(SIGILL);
1489out:
1490        exception_exit(prev_state);
1491}
1492
1493asmlinkage void do_mdmx(struct pt_regs *regs)
1494{
1495        enum ctx_state prev_state;
1496
1497        prev_state = exception_enter();
1498        force_sig(SIGILL);
1499        exception_exit(prev_state);
1500}
1501
1502/*
1503 * Called with interrupts disabled.
1504 */
1505asmlinkage void do_watch(struct pt_regs *regs)
1506{
1507        enum ctx_state prev_state;
1508
1509        prev_state = exception_enter();
1510        /*
1511         * Clear WP (bit 22) bit of cause register so we don't loop
1512         * forever.
1513         */
1514        clear_c0_cause(CAUSEF_WP);
1515
1516        /*
1517         * If the current thread has the watch registers loaded, save
1518         * their values and send SIGTRAP.  Otherwise another thread
1519         * left the registers set, clear them and continue.
1520         */
1521        if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
1522                mips_read_watch_registers();
1523                local_irq_enable();
1524                force_sig_fault(SIGTRAP, TRAP_HWBKPT, NULL);
1525        } else {
1526                mips_clear_watch_registers();
1527                local_irq_enable();
1528        }
1529        exception_exit(prev_state);
1530}
1531
1532asmlinkage void do_mcheck(struct pt_regs *regs)
1533{
1534        int multi_match = regs->cp0_status & ST0_TS;
1535        enum ctx_state prev_state;
1536        mm_segment_t old_fs = get_fs();
1537
1538        prev_state = exception_enter();
1539        show_regs(regs);
1540
1541        if (multi_match) {
1542                dump_tlb_regs();
1543                pr_info("\n");
1544                dump_tlb_all();
1545        }
1546
1547        if (!user_mode(regs))
1548                set_fs(KERNEL_DS);
1549
1550        show_code((unsigned int __user *) regs->cp0_epc);
1551
1552        set_fs(old_fs);
1553
1554        /*
1555         * Some chips may have other causes of machine check (e.g. SB1
1556         * graduation timer)
1557         */
1558        panic("Caught Machine Check exception - %scaused by multiple "
1559              "matching entries in the TLB.",
1560              (multi_match) ? "" : "not ");
1561}
1562
1563asmlinkage void do_mt(struct pt_regs *regs)
1564{
1565        int subcode;
1566
1567        subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
1568                        >> VPECONTROL_EXCPT_SHIFT;
1569        switch (subcode) {
1570        case 0:
1571                printk(KERN_DEBUG "Thread Underflow\n");
1572                break;
1573        case 1:
1574                printk(KERN_DEBUG "Thread Overflow\n");
1575                break;
1576        case 2:
1577                printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
1578                break;
1579        case 3:
1580                printk(KERN_DEBUG "Gating Storage Exception\n");
1581                break;
1582        case 4:
1583                printk(KERN_DEBUG "YIELD Scheduler Exception\n");
1584                break;
1585        case 5:
1586                printk(KERN_DEBUG "Gating Storage Scheduler Exception\n");
1587                break;
1588        default:
1589                printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
1590                        subcode);
1591                break;
1592        }
1593        die_if_kernel("MIPS MT Thread exception in kernel", regs);
1594
1595        force_sig(SIGILL);
1596}
1597
1598
1599asmlinkage void do_dsp(struct pt_regs *regs)
1600{
1601        if (cpu_has_dsp)
1602                panic("Unexpected DSP exception");
1603
1604        force_sig(SIGILL);
1605}
1606
1607asmlinkage void do_reserved(struct pt_regs *regs)
1608{
1609        /*
1610         * Game over - no way to handle this if it ever occurs.  Most probably
1611         * caused by a new unknown cpu type or after another deadly
1612         * hard/software error.
1613         */
1614        show_regs(regs);
1615        panic("Caught reserved exception %ld - should not happen.",
1616              (regs->cp0_cause & 0x7f) >> 2);
1617}
1618
1619static int __initdata l1parity = 1;
1620static int __init nol1parity(char *s)
1621{
1622        l1parity = 0;
1623        return 1;
1624}
1625__setup("nol1par", nol1parity);
1626static int __initdata l2parity = 1;
1627static int __init nol2parity(char *s)
1628{
1629        l2parity = 0;
1630        return 1;
1631}
1632__setup("nol2par", nol2parity);
1633
1634/*
1635 * Some MIPS CPUs can enable/disable for cache parity detection, but do
1636 * it different ways.
1637 */
1638static inline void parity_protection_init(void)
1639{
1640#define ERRCTL_PE       0x80000000
1641#define ERRCTL_L2P      0x00800000
1642
1643        if (mips_cm_revision() >= CM_REV_CM3) {
1644                ulong gcr_ectl, cp0_ectl;
1645
1646                /*
1647                 * With CM3 systems we need to ensure that the L1 & L2
1648                 * parity enables are set to the same value, since this
1649                 * is presumed by the hardware engineers.
1650                 *
1651                 * If the user disabled either of L1 or L2 ECC checking,
1652                 * disable both.
1653                 */
1654                l1parity &= l2parity;
1655                l2parity &= l1parity;
1656
1657                /* Probe L1 ECC support */
1658                cp0_ectl = read_c0_ecc();
1659                write_c0_ecc(cp0_ectl | ERRCTL_PE);
1660                back_to_back_c0_hazard();
1661                cp0_ectl = read_c0_ecc();
1662
1663                /* Probe L2 ECC support */
1664                gcr_ectl = read_gcr_err_control();
1665
1666                if (!(gcr_ectl & CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT) ||
1667                    !(cp0_ectl & ERRCTL_PE)) {
1668                        /*
1669                         * One of L1 or L2 ECC checking isn't supported,
1670                         * so we cannot enable either.
1671                         */
1672                        l1parity = l2parity = 0;
1673                }
1674
1675                /* Configure L1 ECC checking */
1676                if (l1parity)
1677                        cp0_ectl |= ERRCTL_PE;
1678                else
1679                        cp0_ectl &= ~ERRCTL_PE;
1680                write_c0_ecc(cp0_ectl);
1681                back_to_back_c0_hazard();
1682                WARN_ON(!!(read_c0_ecc() & ERRCTL_PE) != l1parity);
1683
1684                /* Configure L2 ECC checking */
1685                if (l2parity)
1686                        gcr_ectl |= CM_GCR_ERR_CONTROL_L2_ECC_EN;
1687                else
1688                        gcr_ectl &= ~CM_GCR_ERR_CONTROL_L2_ECC_EN;
1689                write_gcr_err_control(gcr_ectl);
1690                gcr_ectl = read_gcr_err_control();
1691                gcr_ectl &= CM_GCR_ERR_CONTROL_L2_ECC_EN;
1692                WARN_ON(!!gcr_ectl != l2parity);
1693
1694                pr_info("Cache parity protection %sabled\n",
1695                        l1parity ? "en" : "dis");
1696                return;
1697        }
1698
1699        switch (current_cpu_type()) {
1700        case CPU_24K:
1701        case CPU_34K:
1702        case CPU_74K:
1703        case CPU_1004K:
1704        case CPU_1074K:
1705        case CPU_INTERAPTIV:
1706        case CPU_PROAPTIV:
1707        case CPU_P5600:
1708        case CPU_QEMU_GENERIC:
1709        case CPU_P6600:
1710                {
1711                        unsigned long errctl;
1712                        unsigned int l1parity_present, l2parity_present;
1713
1714                        errctl = read_c0_ecc();
1715                        errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
1716
1717                        /* probe L1 parity support */
1718                        write_c0_ecc(errctl | ERRCTL_PE);
1719                        back_to_back_c0_hazard();
1720                        l1parity_present = (read_c0_ecc() & ERRCTL_PE);
1721
1722                        /* probe L2 parity support */
1723                        write_c0_ecc(errctl|ERRCTL_L2P);
1724                        back_to_back_c0_hazard();
1725                        l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
1726
1727                        if (l1parity_present && l2parity_present) {
1728                                if (l1parity)
1729                                        errctl |= ERRCTL_PE;
1730                                if (l1parity ^ l2parity)
1731                                        errctl |= ERRCTL_L2P;
1732                        } else if (l1parity_present) {
1733                                if (l1parity)
1734                                        errctl |= ERRCTL_PE;
1735                        } else if (l2parity_present) {
1736                                if (l2parity)
1737                                        errctl |= ERRCTL_L2P;
1738                        } else {
1739                                /* No parity available */
1740                        }
1741
1742                        printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
1743
1744                        write_c0_ecc(errctl);
1745                        back_to_back_c0_hazard();
1746                        errctl = read_c0_ecc();
1747                        printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
1748
1749                        if (l1parity_present)
1750                                printk(KERN_INFO "Cache parity protection %sabled\n",
1751                                       (errctl & ERRCTL_PE) ? "en" : "dis");
1752
1753                        if (l2parity_present) {
1754                                if (l1parity_present && l1parity)
1755                                        errctl ^= ERRCTL_L2P;
1756                                printk(KERN_INFO "L2 cache parity protection %sabled\n",
1757                                       (errctl & ERRCTL_L2P) ? "en" : "dis");
1758                        }
1759                }
1760                break;
1761
1762        case CPU_5KC:
1763        case CPU_5KE:
1764        case CPU_LOONGSON1:
1765                write_c0_ecc(0x80000000);
1766                back_to_back_c0_hazard();
1767                /* Set the PE bit (bit 31) in the c0_errctl register. */
1768                printk(KERN_INFO "Cache parity protection %sabled\n",
1769                       (read_c0_ecc() & 0x80000000) ? "en" : "dis");
1770                break;
1771        case CPU_20KC:
1772        case CPU_25KF:
1773                /* Clear the DE bit (bit 16) in the c0_status register. */
1774                printk(KERN_INFO "Enable cache parity protection for "
1775                       "MIPS 20KC/25KF CPUs.\n");
1776                clear_c0_status(ST0_DE);
1777                break;
1778        default:
1779                break;
1780        }
1781}
1782
1783asmlinkage void cache_parity_error(void)
1784{
1785        const int field = 2 * sizeof(unsigned long);
1786        unsigned int reg_val;
1787
1788        /* For the moment, report the problem and hang. */
1789        printk("Cache error exception:\n");
1790        printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1791        reg_val = read_c0_cacheerr();
1792        printk("c0_cacheerr == %08x\n", reg_val);
1793
1794        printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1795               reg_val & (1<<30) ? "secondary" : "primary",
1796               reg_val & (1<<31) ? "data" : "insn");
1797        if ((cpu_has_mips_r2_r6) &&
1798            ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1799                pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
1800                        reg_val & (1<<29) ? "ED " : "",
1801                        reg_val & (1<<28) ? "ET " : "",
1802                        reg_val & (1<<27) ? "ES " : "",
1803                        reg_val & (1<<26) ? "EE " : "",
1804                        reg_val & (1<<25) ? "EB " : "",
1805                        reg_val & (1<<24) ? "EI " : "",
1806                        reg_val & (1<<23) ? "E1 " : "",
1807                        reg_val & (1<<22) ? "E0 " : "");
1808        } else {
1809                pr_err("Error bits: %s%s%s%s%s%s%s\n",
1810                        reg_val & (1<<29) ? "ED " : "",
1811                        reg_val & (1<<28) ? "ET " : "",
1812                        reg_val & (1<<26) ? "EE " : "",
1813                        reg_val & (1<<25) ? "EB " : "",
1814                        reg_val & (1<<24) ? "EI " : "",
1815                        reg_val & (1<<23) ? "E1 " : "",
1816                        reg_val & (1<<22) ? "E0 " : "");
1817        }
1818        printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
1819
1820#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1821        if (reg_val & (1<<22))
1822                printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
1823
1824        if (reg_val & (1<<23))
1825                printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
1826#endif
1827
1828        panic("Can't handle the cache error!");
1829}
1830
1831asmlinkage void do_ftlb(void)
1832{
1833        const int field = 2 * sizeof(unsigned long);
1834        unsigned int reg_val;
1835
1836        /* For the moment, report the problem and hang. */
1837        if ((cpu_has_mips_r2_r6) &&
1838            (((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS) ||
1839            ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_LOONGSON))) {
1840                pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
1841                       read_c0_ecc());
1842                pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1843                reg_val = read_c0_cacheerr();
1844                pr_err("c0_cacheerr == %08x\n", reg_val);
1845
1846                if ((reg_val & 0xc0000000) == 0xc0000000) {
1847                        pr_err("Decoded c0_cacheerr: FTLB parity error\n");
1848                } else {
1849                        pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1850                               reg_val & (1<<30) ? "secondary" : "primary",
1851                               reg_val & (1<<31) ? "data" : "insn");
1852                }
1853        } else {
1854                pr_err("FTLB error exception\n");
1855        }
1856        /* Just print the cacheerr bits for now */
1857        cache_parity_error();
1858}
1859
1860/*
1861 * SDBBP EJTAG debug exception handler.
1862 * We skip the instruction and return to the next instruction.
1863 */
1864void ejtag_exception_handler(struct pt_regs *regs)
1865{
1866        const int field = 2 * sizeof(unsigned long);
1867        unsigned long depc, old_epc, old_ra;
1868        unsigned int debug;
1869
1870        printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
1871        depc = read_c0_depc();
1872        debug = read_c0_debug();
1873        printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
1874        if (debug & 0x80000000) {
1875                /*
1876                 * In branch delay slot.
1877                 * We cheat a little bit here and use EPC to calculate the
1878                 * debug return address (DEPC). EPC is restored after the
1879                 * calculation.
1880                 */
1881                old_epc = regs->cp0_epc;
1882                old_ra = regs->regs[31];
1883                regs->cp0_epc = depc;
1884                compute_return_epc(regs);
1885                depc = regs->cp0_epc;
1886                regs->cp0_epc = old_epc;
1887                regs->regs[31] = old_ra;
1888        } else
1889                depc += 4;
1890        write_c0_depc(depc);
1891
1892#if 0
1893        printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
1894        write_c0_debug(debug | 0x100);
1895#endif
1896}
1897
1898/*
1899 * NMI exception handler.
1900 * No lock; only written during early bootup by CPU 0.
1901 */
1902static RAW_NOTIFIER_HEAD(nmi_chain);
1903
1904int register_nmi_notifier(struct notifier_block *nb)
1905{
1906        return raw_notifier_chain_register(&nmi_chain, nb);
1907}
1908
1909void __noreturn nmi_exception_handler(struct pt_regs *regs)
1910{
1911        char str[100];
1912
1913        nmi_enter();
1914        raw_notifier_call_chain(&nmi_chain, 0, regs);
1915        bust_spinlocks(1);
1916        snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
1917                 smp_processor_id(), regs->cp0_epc);
1918        regs->cp0_epc = read_c0_errorepc();
1919        die(str, regs);
1920        nmi_exit();
1921}
1922
1923#define VECTORSPACING 0x100     /* for EI/VI mode */
1924
1925unsigned long ebase;
1926EXPORT_SYMBOL_GPL(ebase);
1927unsigned long exception_handlers[32];
1928unsigned long vi_handlers[64];
1929
1930void __init *set_except_vector(int n, void *addr)
1931{
1932        unsigned long handler = (unsigned long) addr;
1933        unsigned long old_handler;
1934
1935#ifdef CONFIG_CPU_MICROMIPS
1936        /*
1937         * Only the TLB handlers are cache aligned with an even
1938         * address. All other handlers are on an odd address and
1939         * require no modification. Otherwise, MIPS32 mode will
1940         * be entered when handling any TLB exceptions. That
1941         * would be bad...since we must stay in microMIPS mode.
1942         */
1943        if (!(handler & 0x1))
1944                handler |= 1;
1945#endif
1946        old_handler = xchg(&exception_handlers[n], handler);
1947
1948        if (n == 0 && cpu_has_divec) {
1949#ifdef CONFIG_CPU_MICROMIPS
1950                unsigned long jump_mask = ~((1 << 27) - 1);
1951#else
1952                unsigned long jump_mask = ~((1 << 28) - 1);
1953#endif
1954                u32 *buf = (u32 *)(ebase + 0x200);
1955                unsigned int k0 = 26;
1956                if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
1957                        uasm_i_j(&buf, handler & ~jump_mask);
1958                        uasm_i_nop(&buf);
1959                } else {
1960                        UASM_i_LA(&buf, k0, handler);
1961                        uasm_i_jr(&buf, k0);
1962                        uasm_i_nop(&buf);
1963                }
1964                local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
1965        }
1966        return (void *)old_handler;
1967}
1968
1969static void do_default_vi(void)
1970{
1971        show_regs(get_irq_regs());
1972        panic("Caught unexpected vectored interrupt.");
1973}
1974
1975static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1976{
1977        unsigned long handler;
1978        unsigned long old_handler = vi_handlers[n];
1979        int srssets = current_cpu_data.srsets;
1980        u16 *h;
1981        unsigned char *b;
1982
1983        BUG_ON(!cpu_has_veic && !cpu_has_vint);
1984
1985        if (addr == NULL) {
1986                handler = (unsigned long) do_default_vi;
1987                srs = 0;
1988        } else
1989                handler = (unsigned long) addr;
1990        vi_handlers[n] = handler;
1991
1992        b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
1993
1994        if (srs >= srssets)
1995                panic("Shadow register set %d not supported", srs);
1996
1997        if (cpu_has_veic) {
1998                if (board_bind_eic_interrupt)
1999                        board_bind_eic_interrupt(n, srs);
2000        } else if (cpu_has_vint) {
2001                /* SRSMap is only defined if shadow sets are implemented */
2002                if (srssets > 1)
2003                        change_c0_srsmap(0xf << n*4, srs << n*4);
2004        }
2005
2006        if (srs == 0) {
2007                /*
2008                 * If no shadow set is selected then use the default handler
2009                 * that does normal register saving and standard interrupt exit
2010                 */
2011                extern char except_vec_vi, except_vec_vi_lui;
2012                extern char except_vec_vi_ori, except_vec_vi_end;
2013                extern char rollback_except_vec_vi;
2014                char *vec_start = using_rollback_handler() ?
2015                        &rollback_except_vec_vi : &except_vec_vi;
2016#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
2017                const int lui_offset = &except_vec_vi_lui - vec_start + 2;
2018                const int ori_offset = &except_vec_vi_ori - vec_start + 2;
2019#else
2020                const int lui_offset = &except_vec_vi_lui - vec_start;
2021                const int ori_offset = &except_vec_vi_ori - vec_start;
2022#endif
2023                const int handler_len = &except_vec_vi_end - vec_start;
2024
2025                if (handler_len > VECTORSPACING) {
2026                        /*
2027                         * Sigh... panicing won't help as the console
2028                         * is probably not configured :(
2029                         */
2030                        panic("VECTORSPACING too small");
2031                }
2032
2033                set_handler(((unsigned long)b - ebase), vec_start,
2034#ifdef CONFIG_CPU_MICROMIPS
2035                                (handler_len - 1));
2036#else
2037                                handler_len);
2038#endif
2039                h = (u16 *)(b + lui_offset);
2040                *h = (handler >> 16) & 0xffff;
2041                h = (u16 *)(b + ori_offset);
2042                *h = (handler & 0xffff);
2043                local_flush_icache_range((unsigned long)b,
2044                                         (unsigned long)(b+handler_len));
2045        }
2046        else {
2047                /*
2048                 * In other cases jump directly to the interrupt handler. It
2049                 * is the handler's responsibility to save registers if required
2050                 * (eg hi/lo) and return from the exception using "eret".
2051                 */
2052                u32 insn;
2053
2054                h = (u16 *)b;
2055                /* j handler */
2056#ifdef CONFIG_CPU_MICROMIPS
2057                insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
2058#else
2059                insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
2060#endif
2061                h[0] = (insn >> 16) & 0xffff;
2062                h[1] = insn & 0xffff;
2063                h[2] = 0;
2064                h[3] = 0;
2065                local_flush_icache_range((unsigned long)b,
2066                                         (unsigned long)(b+8));
2067        }
2068
2069        return (void *)old_handler;
2070}
2071
2072void *set_vi_handler(int n, vi_handler_t addr)
2073{
2074        return set_vi_srs_handler(n, addr, 0);
2075}
2076
2077extern void tlb_init(void);
2078
2079/*
2080 * Timer interrupt
2081 */
2082int cp0_compare_irq;
2083EXPORT_SYMBOL_GPL(cp0_compare_irq);
2084int cp0_compare_irq_shift;
2085
2086/*
2087 * Performance counter IRQ or -1 if shared with timer
2088 */
2089int cp0_perfcount_irq;
2090EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
2091
2092/*
2093 * Fast debug channel IRQ or -1 if not present
2094 */
2095int cp0_fdc_irq;
2096EXPORT_SYMBOL_GPL(cp0_fdc_irq);
2097
2098static int noulri;
2099
2100static int __init ulri_disable(char *s)
2101{
2102        pr_info("Disabling ulri\n");
2103        noulri = 1;
2104
2105        return 1;
2106}
2107__setup("noulri", ulri_disable);
2108
2109/* configure STATUS register */
2110static void configure_status(void)
2111{
2112        /*
2113         * Disable coprocessors and select 32-bit or 64-bit addressing
2114         * and the 16/32 or 32/32 FPR register model.  Reset the BEV
2115         * flag that some firmware may have left set and the TS bit (for
2116         * IP27).  Set XX for ISA IV code to work.
2117         */
2118        unsigned int status_set = ST0_CU0;
2119#ifdef CONFIG_64BIT
2120        status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
2121#endif
2122        if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
2123                status_set |= ST0_XX;
2124        if (cpu_has_dsp)
2125                status_set |= ST0_MX;
2126
2127        change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
2128                         status_set);
2129}
2130
2131unsigned int hwrena;
2132EXPORT_SYMBOL_GPL(hwrena);
2133
2134/* configure HWRENA register */
2135static void configure_hwrena(void)
2136{
2137        hwrena = cpu_hwrena_impl_bits;
2138
2139        if (cpu_has_mips_r2_r6)
2140                hwrena |= MIPS_HWRENA_CPUNUM |
2141                          MIPS_HWRENA_SYNCISTEP |
2142                          MIPS_HWRENA_CC |
2143                          MIPS_HWRENA_CCRES;
2144
2145        if (!noulri && cpu_has_userlocal)
2146                hwrena |= MIPS_HWRENA_ULR;
2147
2148        if (hwrena)
2149                write_c0_hwrena(hwrena);
2150}
2151
2152static void configure_exception_vector(void)
2153{
2154        if (cpu_has_mips_r2_r6) {
2155                unsigned long sr = set_c0_status(ST0_BEV);
2156                /* If available, use WG to set top bits of EBASE */
2157                if (cpu_has_ebase_wg) {
2158#ifdef CONFIG_64BIT
2159                        write_c0_ebase_64(ebase | MIPS_EBASE_WG);
2160#else
2161                        write_c0_ebase(ebase | MIPS_EBASE_WG);
2162#endif
2163                }
2164                write_c0_ebase(ebase);
2165                write_c0_status(sr);
2166        }
2167        if (cpu_has_veic || cpu_has_vint) {
2168                /* Setting vector spacing enables EI/VI mode  */
2169                change_c0_intctl(0x3e0, VECTORSPACING);
2170        }
2171        if (cpu_has_divec) {
2172                if (cpu_has_mipsmt) {
2173                        unsigned int vpflags = dvpe();
2174                        set_c0_cause(CAUSEF_IV);
2175                        evpe(vpflags);
2176                } else
2177                        set_c0_cause(CAUSEF_IV);
2178        }
2179}
2180
2181void per_cpu_trap_init(bool is_boot_cpu)
2182{
2183        unsigned int cpu = smp_processor_id();
2184
2185        configure_status();
2186        configure_hwrena();
2187
2188        configure_exception_vector();
2189
2190        /*
2191         * Before R2 both interrupt numbers were fixed to 7, so on R2 only:
2192         *
2193         *  o read IntCtl.IPTI to determine the timer interrupt
2194         *  o read IntCtl.IPPCI to determine the performance counter interrupt
2195         *  o read IntCtl.IPFDC to determine the fast debug channel interrupt
2196         */
2197        if (cpu_has_mips_r2_r6) {
2198                cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
2199                cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
2200                cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
2201                cp0_fdc_irq = (read_c0_intctl() >> INTCTLB_IPFDC) & 7;
2202                if (!cp0_fdc_irq)
2203                        cp0_fdc_irq = -1;
2204
2205        } else {
2206                cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
2207                cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
2208                cp0_perfcount_irq = -1;
2209                cp0_fdc_irq = -1;
2210        }
2211
2212        if (cpu_has_mmid)
2213                cpu_data[cpu].asid_cache = 0;
2214        else if (!cpu_data[cpu].asid_cache)
2215                cpu_data[cpu].asid_cache = asid_first_version(cpu);
2216
2217        mmgrab(&init_mm);
2218        current->active_mm = &init_mm;
2219        BUG_ON(current->mm);
2220        enter_lazy_tlb(&init_mm, current);
2221
2222        /* Boot CPU's cache setup in setup_arch(). */
2223        if (!is_boot_cpu)
2224                cpu_cache_init();
2225        tlb_init();
2226        TLBMISS_HANDLER_SETUP();
2227}
2228
2229/* Install CPU exception handler */
2230void set_handler(unsigned long offset, void *addr, unsigned long size)
2231{
2232#ifdef CONFIG_CPU_MICROMIPS
2233        memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
2234#else
2235        memcpy((void *)(ebase + offset), addr, size);
2236#endif
2237        local_flush_icache_range(ebase + offset, ebase + offset + size);
2238}
2239
2240static const char panic_null_cerr[] =
2241        "Trying to set NULL cache error exception handler\n";
2242
2243/*
2244 * Install uncached CPU exception handler.
2245 * This is suitable only for the cache error exception which is the only
2246 * exception handler that is being run uncached.
2247 */
2248void set_uncached_handler(unsigned long offset, void *addr,
2249        unsigned long size)
2250{
2251        unsigned long uncached_ebase = CKSEG1ADDR(ebase);
2252
2253        if (!addr)
2254                panic(panic_null_cerr);
2255
2256        memcpy((void *)(uncached_ebase + offset), addr, size);
2257}
2258
2259static int __initdata rdhwr_noopt;
2260static int __init set_rdhwr_noopt(char *str)
2261{
2262        rdhwr_noopt = 1;
2263        return 1;
2264}
2265
2266__setup("rdhwr_noopt", set_rdhwr_noopt);
2267
2268void __init trap_init(void)
2269{
2270        extern char except_vec3_generic;
2271        extern char except_vec4;
2272        extern char except_vec3_r4000;
2273        unsigned long i, vec_size;
2274        phys_addr_t ebase_pa;
2275
2276        check_wait();
2277
2278        if (!cpu_has_mips_r2_r6) {
2279                ebase = CAC_BASE;
2280                ebase_pa = virt_to_phys((void *)ebase);
2281                vec_size = 0x400;
2282
2283                memblock_reserve(ebase_pa, vec_size);
2284        } else {
2285                if (cpu_has_veic || cpu_has_vint)
2286                        vec_size = 0x200 + VECTORSPACING*64;
2287                else
2288                        vec_size = PAGE_SIZE;
2289
2290                ebase_pa = memblock_phys_alloc(vec_size, 1 << fls(vec_size));
2291                if (!ebase_pa)
2292                        panic("%s: Failed to allocate %lu bytes align=0x%x\n",
2293                              __func__, vec_size, 1 << fls(vec_size));
2294
2295                /*
2296                 * Try to ensure ebase resides in KSeg0 if possible.
2297                 *
2298                 * It shouldn't generally be in XKPhys on MIPS64 to avoid
2299                 * hitting a poorly defined exception base for Cache Errors.
2300                 * The allocation is likely to be in the low 512MB of physical,
2301                 * in which case we should be able to convert to KSeg0.
2302                 *
2303                 * EVA is special though as it allows segments to be rearranged
2304                 * and to become uncached during cache error handling.
2305                 */
2306                if (!IS_ENABLED(CONFIG_EVA) && !WARN_ON(ebase_pa >= 0x20000000))
2307                        ebase = CKSEG0ADDR(ebase_pa);
2308                else
2309                        ebase = (unsigned long)phys_to_virt(ebase_pa);
2310        }
2311
2312        if (cpu_has_mmips) {
2313                unsigned int config3 = read_c0_config3();
2314
2315                if (IS_ENABLED(CONFIG_CPU_MICROMIPS))
2316                        write_c0_config3(config3 | MIPS_CONF3_ISA_OE);
2317                else
2318                        write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE);
2319        }
2320
2321        if (board_ebase_setup)
2322                board_ebase_setup();
2323        per_cpu_trap_init(true);
2324        memblock_set_bottom_up(false);
2325
2326        /*
2327         * Copy the generic exception handlers to their final destination.
2328         * This will be overridden later as suitable for a particular
2329         * configuration.
2330         */
2331        set_handler(0x180, &except_vec3_generic, 0x80);
2332
2333        /*
2334         * Setup default vectors
2335         */
2336        for (i = 0; i <= 31; i++)
2337                set_except_vector(i, handle_reserved);
2338
2339        /*
2340         * Copy the EJTAG debug exception vector handler code to it's final
2341         * destination.
2342         */
2343        if (cpu_has_ejtag && board_ejtag_handler_setup)
2344                board_ejtag_handler_setup();
2345
2346        /*
2347         * Only some CPUs have the watch exceptions.
2348         */
2349        if (cpu_has_watch)
2350                set_except_vector(EXCCODE_WATCH, handle_watch);
2351
2352        /*
2353         * Initialise interrupt handlers
2354         */
2355        if (cpu_has_veic || cpu_has_vint) {
2356                int nvec = cpu_has_veic ? 64 : 8;
2357                for (i = 0; i < nvec; i++)
2358                        set_vi_handler(i, NULL);
2359        }
2360        else if (cpu_has_divec)
2361                set_handler(0x200, &except_vec4, 0x8);
2362
2363        /*
2364         * Some CPUs can enable/disable for cache parity detection, but does
2365         * it different ways.
2366         */
2367        parity_protection_init();
2368
2369        /*
2370         * The Data Bus Errors / Instruction Bus Errors are signaled
2371         * by external hardware.  Therefore these two exceptions
2372         * may have board specific handlers.
2373         */
2374        if (board_be_init)
2375                board_be_init();
2376
2377        set_except_vector(EXCCODE_INT, using_rollback_handler() ?
2378                                        rollback_handle_int : handle_int);
2379        set_except_vector(EXCCODE_MOD, handle_tlbm);
2380        set_except_vector(EXCCODE_TLBL, handle_tlbl);
2381        set_except_vector(EXCCODE_TLBS, handle_tlbs);
2382
2383        set_except_vector(EXCCODE_ADEL, handle_adel);
2384        set_except_vector(EXCCODE_ADES, handle_ades);
2385
2386        set_except_vector(EXCCODE_IBE, handle_ibe);
2387        set_except_vector(EXCCODE_DBE, handle_dbe);
2388
2389        set_except_vector(EXCCODE_SYS, handle_sys);
2390        set_except_vector(EXCCODE_BP, handle_bp);
2391
2392        if (rdhwr_noopt)
2393                set_except_vector(EXCCODE_RI, handle_ri);
2394        else {
2395                if (cpu_has_vtag_icache)
2396                        set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2397                else if (current_cpu_type() == CPU_LOONGSON3)
2398                        set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2399                else
2400                        set_except_vector(EXCCODE_RI, handle_ri_rdhwr);
2401        }
2402
2403        set_except_vector(EXCCODE_CPU, handle_cpu);
2404        set_except_vector(EXCCODE_OV, handle_ov);
2405        set_except_vector(EXCCODE_TR, handle_tr);
2406        set_except_vector(EXCCODE_MSAFPE, handle_msa_fpe);
2407
2408        if (board_nmi_handler_setup)
2409                board_nmi_handler_setup();
2410
2411        if (cpu_has_fpu && !cpu_has_nofpuex)
2412                set_except_vector(EXCCODE_FPE, handle_fpe);
2413
2414        set_except_vector(MIPS_EXCCODE_TLBPAR, handle_ftlb);
2415
2416        if (cpu_has_rixiex) {
2417                set_except_vector(EXCCODE_TLBRI, tlb_do_page_fault_0);
2418                set_except_vector(EXCCODE_TLBXI, tlb_do_page_fault_0);
2419        }
2420
2421        set_except_vector(EXCCODE_MSADIS, handle_msa);
2422        set_except_vector(EXCCODE_MDMX, handle_mdmx);
2423
2424        if (cpu_has_mcheck)
2425                set_except_vector(EXCCODE_MCHECK, handle_mcheck);
2426
2427        if (cpu_has_mipsmt)
2428                set_except_vector(EXCCODE_THREAD, handle_mt);
2429
2430        set_except_vector(EXCCODE_DSPDIS, handle_dsp);
2431
2432        if (board_cache_error_setup)
2433                board_cache_error_setup();
2434
2435        if (cpu_has_vce)
2436                /* Special exception: R4[04]00 uses also the divec space. */
2437                set_handler(0x180, &except_vec3_r4000, 0x100);
2438        else if (cpu_has_4kex)
2439                set_handler(0x180, &except_vec3_generic, 0x80);
2440        else
2441                set_handler(0x080, &except_vec3_generic, 0x80);
2442
2443        local_flush_icache_range(ebase, ebase + vec_size);
2444
2445        sort_extable(__start___dbe_table, __stop___dbe_table);
2446
2447        cu2_notifier(default_cu2_call, 0x80000000);     /* Run last  */
2448}
2449
2450static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd,
2451                            void *v)
2452{
2453        switch (cmd) {
2454        case CPU_PM_ENTER_FAILED:
2455        case CPU_PM_EXIT:
2456                configure_status();
2457                configure_hwrena();
2458                configure_exception_vector();
2459
2460                /* Restore register with CPU number for TLB handlers */
2461                TLBMISS_HANDLER_RESTORE();
2462
2463                break;
2464        }
2465
2466        return NOTIFY_OK;
2467}
2468
2469static struct notifier_block trap_pm_notifier_block = {
2470        .notifier_call = trap_pm_notifier,
2471};
2472
2473static int __init trap_pm_init(void)
2474{
2475        return cpu_pm_register_notifier(&trap_pm_notifier_block);
2476}
2477arch_initcall(trap_pm_init);
2478