linux/arch/mips/kernel/traps.c
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
   7 * Copyright (C) 1995, 1996 Paul M. Antoine
   8 * Copyright (C) 1998 Ulf Carlsson
   9 * Copyright (C) 1999 Silicon Graphics, Inc.
  10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
  11 * Copyright (C) 2002, 2003, 2004, 2005, 2007  Maciej W. Rozycki
  12 * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc.  All rights reserved.
  13 * Copyright (C) 2014, Imagination Technologies Ltd.
  14 */
  15#include <linux/bitops.h>
  16#include <linux/bug.h>
  17#include <linux/compiler.h>
  18#include <linux/context_tracking.h>
  19#include <linux/cpu_pm.h>
  20#include <linux/kexec.h>
  21#include <linux/init.h>
  22#include <linux/kernel.h>
  23#include <linux/module.h>
  24#include <linux/extable.h>
  25#include <linux/mm.h>
  26#include <linux/sched/mm.h>
  27#include <linux/sched/debug.h>
  28#include <linux/smp.h>
  29#include <linux/spinlock.h>
  30#include <linux/kallsyms.h>
  31#include <linux/memblock.h>
  32#include <linux/interrupt.h>
  33#include <linux/ptrace.h>
  34#include <linux/kgdb.h>
  35#include <linux/kdebug.h>
  36#include <linux/kprobes.h>
  37#include <linux/notifier.h>
  38#include <linux/kdb.h>
  39#include <linux/irq.h>
  40#include <linux/perf_event.h>
  41
  42#include <asm/addrspace.h>
  43#include <asm/bootinfo.h>
  44#include <asm/branch.h>
  45#include <asm/break.h>
  46#include <asm/cop2.h>
  47#include <asm/cpu.h>
  48#include <asm/cpu-type.h>
  49#include <asm/dsp.h>
  50#include <asm/fpu.h>
  51#include <asm/fpu_emulator.h>
  52#include <asm/idle.h>
  53#include <asm/isa-rev.h>
  54#include <asm/mips-cps.h>
  55#include <asm/mips-r2-to-r6-emul.h>
  56#include <asm/mipsregs.h>
  57#include <asm/mipsmtregs.h>
  58#include <asm/module.h>
  59#include <asm/msa.h>
  60#include <asm/ptrace.h>
  61#include <asm/sections.h>
  62#include <asm/siginfo.h>
  63#include <asm/tlbdebug.h>
  64#include <asm/traps.h>
  65#include <linux/uaccess.h>
  66#include <asm/watch.h>
  67#include <asm/mmu_context.h>
  68#include <asm/types.h>
  69#include <asm/stacktrace.h>
  70#include <asm/tlbex.h>
  71#include <asm/uasm.h>
  72
  73#include <asm/mach-loongson64/cpucfg-emul.h>
  74
  75#include "access-helper.h"
  76
  77extern void check_wait(void);
  78extern asmlinkage void rollback_handle_int(void);
  79extern asmlinkage void handle_int(void);
  80extern asmlinkage void handle_adel(void);
  81extern asmlinkage void handle_ades(void);
  82extern asmlinkage void handle_ibe(void);
  83extern asmlinkage void handle_dbe(void);
  84extern asmlinkage void handle_sys(void);
  85extern asmlinkage void handle_bp(void);
  86extern asmlinkage void handle_ri(void);
  87extern asmlinkage void handle_ri_rdhwr_tlbp(void);
  88extern asmlinkage void handle_ri_rdhwr(void);
  89extern asmlinkage void handle_cpu(void);
  90extern asmlinkage void handle_ov(void);
  91extern asmlinkage void handle_tr(void);
  92extern asmlinkage void handle_msa_fpe(void);
  93extern asmlinkage void handle_fpe(void);
  94extern asmlinkage void handle_ftlb(void);
  95extern asmlinkage void handle_gsexc(void);
  96extern asmlinkage void handle_msa(void);
  97extern asmlinkage void handle_mdmx(void);
  98extern asmlinkage void handle_watch(void);
  99extern asmlinkage void handle_mt(void);
 100extern asmlinkage void handle_dsp(void);
 101extern asmlinkage void handle_mcheck(void);
 102extern asmlinkage void handle_reserved(void);
 103extern void tlb_do_page_fault_0(void);
 104
 105void (*board_be_init)(void);
 106int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
 107void (*board_nmi_handler_setup)(void);
 108void (*board_ejtag_handler_setup)(void);
 109void (*board_bind_eic_interrupt)(int irq, int regset);
 110void (*board_ebase_setup)(void);
 111void(*board_cache_error_setup)(void);
 112
 113static void show_raw_backtrace(unsigned long reg29, const char *loglvl,
 114                               bool user)
 115{
 116        unsigned long *sp = (unsigned long *)(reg29 & ~3);
 117        unsigned long addr;
 118
 119        printk("%sCall Trace:", loglvl);
 120#ifdef CONFIG_KALLSYMS
 121        printk("%s\n", loglvl);
 122#endif
 123        while (!kstack_end(sp)) {
 124                if (__get_addr(&addr, sp++, user)) {
 125                        printk("%s (Bad stack address)", loglvl);
 126                        break;
 127                }
 128                if (__kernel_text_address(addr))
 129                        print_ip_sym(loglvl, addr);
 130        }
 131        printk("%s\n", loglvl);
 132}
 133
 134#ifdef CONFIG_KALLSYMS
 135int raw_show_trace;
 136static int __init set_raw_show_trace(char *str)
 137{
 138        raw_show_trace = 1;
 139        return 1;
 140}
 141__setup("raw_show_trace", set_raw_show_trace);
 142#endif
 143
 144static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
 145                           const char *loglvl, bool user)
 146{
 147        unsigned long sp = regs->regs[29];
 148        unsigned long ra = regs->regs[31];
 149        unsigned long pc = regs->cp0_epc;
 150
 151        if (!task)
 152                task = current;
 153
 154        if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) {
 155                show_raw_backtrace(sp, loglvl, user);
 156                return;
 157        }
 158        printk("%sCall Trace:\n", loglvl);
 159        do {
 160                print_ip_sym(loglvl, pc);
 161                pc = unwind_stack(task, &sp, pc, &ra);
 162        } while (pc);
 163        pr_cont("\n");
 164}
 165
 166/*
 167 * This routine abuses get_user()/put_user() to reference pointers
 168 * with at least a bit of error checking ...
 169 */
 170static void show_stacktrace(struct task_struct *task,
 171        const struct pt_regs *regs, const char *loglvl, bool user)
 172{
 173        const int field = 2 * sizeof(unsigned long);
 174        unsigned long stackdata;
 175        int i;
 176        unsigned long *sp = (unsigned long *)regs->regs[29];
 177
 178        printk("%sStack :", loglvl);
 179        i = 0;
 180        while ((unsigned long) sp & (PAGE_SIZE - 1)) {
 181                if (i && ((i % (64 / field)) == 0)) {
 182                        pr_cont("\n");
 183                        printk("%s       ", loglvl);
 184                }
 185                if (i > 39) {
 186                        pr_cont(" ...");
 187                        break;
 188                }
 189
 190                if (__get_addr(&stackdata, sp++, user)) {
 191                        pr_cont(" (Bad stack address)");
 192                        break;
 193                }
 194
 195                pr_cont(" %0*lx", field, stackdata);
 196                i++;
 197        }
 198        pr_cont("\n");
 199        show_backtrace(task, regs, loglvl, user);
 200}
 201
 202void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
 203{
 204        struct pt_regs regs;
 205
 206        regs.cp0_status = KSU_KERNEL;
 207        if (sp) {
 208                regs.regs[29] = (unsigned long)sp;
 209                regs.regs[31] = 0;
 210                regs.cp0_epc = 0;
 211        } else {
 212                if (task && task != current) {
 213                        regs.regs[29] = task->thread.reg29;
 214                        regs.regs[31] = 0;
 215                        regs.cp0_epc = task->thread.reg31;
 216                } else {
 217                        prepare_frametrace(&regs);
 218                }
 219        }
 220        show_stacktrace(task, &regs, loglvl, false);
 221}
 222
 223static void show_code(void *pc, bool user)
 224{
 225        long i;
 226        unsigned short *pc16 = NULL;
 227
 228        printk("Code:");
 229
 230        if ((unsigned long)pc & 1)
 231                pc16 = (u16 *)((unsigned long)pc & ~1);
 232
 233        for(i = -3 ; i < 6 ; i++) {
 234                if (pc16) {
 235                        u16 insn16;
 236
 237                        if (__get_inst16(&insn16, pc16 + i, user))
 238                                goto bad_address;
 239
 240                        pr_cont("%c%04x%c", (i?' ':'<'), insn16, (i?' ':'>'));
 241                } else {
 242                        u32 insn32;
 243
 244                        if (__get_inst32(&insn32, (u32 *)pc + i, user))
 245                                goto bad_address;
 246
 247                        pr_cont("%c%08x%c", (i?' ':'<'), insn32, (i?' ':'>'));
 248                }
 249        }
 250        pr_cont("\n");
 251        return;
 252
 253bad_address:
 254        pr_cont(" (Bad address in epc)\n\n");
 255}
 256
 257static void __show_regs(const struct pt_regs *regs)
 258{
 259        const int field = 2 * sizeof(unsigned long);
 260        unsigned int cause = regs->cp0_cause;
 261        unsigned int exccode;
 262        int i;
 263
 264        show_regs_print_info(KERN_DEFAULT);
 265
 266        /*
 267         * Saved main processor registers
 268         */
 269        for (i = 0; i < 32; ) {
 270                if ((i % 4) == 0)
 271                        printk("$%2d   :", i);
 272                if (i == 0)
 273                        pr_cont(" %0*lx", field, 0UL);
 274                else if (i == 26 || i == 27)
 275                        pr_cont(" %*s", field, "");
 276                else
 277                        pr_cont(" %0*lx", field, regs->regs[i]);
 278
 279                i++;
 280                if ((i % 4) == 0)
 281                        pr_cont("\n");
 282        }
 283
 284#ifdef CONFIG_CPU_HAS_SMARTMIPS
 285        printk("Acx    : %0*lx\n", field, regs->acx);
 286#endif
 287        if (MIPS_ISA_REV < 6) {
 288                printk("Hi    : %0*lx\n", field, regs->hi);
 289                printk("Lo    : %0*lx\n", field, regs->lo);
 290        }
 291
 292        /*
 293         * Saved cp0 registers
 294         */
 295        printk("epc   : %0*lx %pS\n", field, regs->cp0_epc,
 296               (void *) regs->cp0_epc);
 297        printk("ra    : %0*lx %pS\n", field, regs->regs[31],
 298               (void *) regs->regs[31]);
 299
 300        printk("Status: %08x    ", (uint32_t) regs->cp0_status);
 301
 302        if (cpu_has_3kex) {
 303                if (regs->cp0_status & ST0_KUO)
 304                        pr_cont("KUo ");
 305                if (regs->cp0_status & ST0_IEO)
 306                        pr_cont("IEo ");
 307                if (regs->cp0_status & ST0_KUP)
 308                        pr_cont("KUp ");
 309                if (regs->cp0_status & ST0_IEP)
 310                        pr_cont("IEp ");
 311                if (regs->cp0_status & ST0_KUC)
 312                        pr_cont("KUc ");
 313                if (regs->cp0_status & ST0_IEC)
 314                        pr_cont("IEc ");
 315        } else if (cpu_has_4kex) {
 316                if (regs->cp0_status & ST0_KX)
 317                        pr_cont("KX ");
 318                if (regs->cp0_status & ST0_SX)
 319                        pr_cont("SX ");
 320                if (regs->cp0_status & ST0_UX)
 321                        pr_cont("UX ");
 322                switch (regs->cp0_status & ST0_KSU) {
 323                case KSU_USER:
 324                        pr_cont("USER ");
 325                        break;
 326                case KSU_SUPERVISOR:
 327                        pr_cont("SUPERVISOR ");
 328                        break;
 329                case KSU_KERNEL:
 330                        pr_cont("KERNEL ");
 331                        break;
 332                default:
 333                        pr_cont("BAD_MODE ");
 334                        break;
 335                }
 336                if (regs->cp0_status & ST0_ERL)
 337                        pr_cont("ERL ");
 338                if (regs->cp0_status & ST0_EXL)
 339                        pr_cont("EXL ");
 340                if (regs->cp0_status & ST0_IE)
 341                        pr_cont("IE ");
 342        }
 343        pr_cont("\n");
 344
 345        exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
 346        printk("Cause : %08x (ExcCode %02x)\n", cause, exccode);
 347
 348        if (1 <= exccode && exccode <= 5)
 349                printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
 350
 351        printk("PrId  : %08x (%s)\n", read_c0_prid(),
 352               cpu_name_string());
 353}
 354
 355/*
 356 * FIXME: really the generic show_regs should take a const pointer argument.
 357 */
 358void show_regs(struct pt_regs *regs)
 359{
 360        __show_regs(regs);
 361        dump_stack();
 362}
 363
 364void show_registers(struct pt_regs *regs)
 365{
 366        const int field = 2 * sizeof(unsigned long);
 367
 368        __show_regs(regs);
 369        print_modules();
 370        printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
 371               current->comm, current->pid, current_thread_info(), current,
 372              field, current_thread_info()->tp_value);
 373        if (cpu_has_userlocal) {
 374                unsigned long tls;
 375
 376                tls = read_c0_userlocal();
 377                if (tls != current_thread_info()->tp_value)
 378                        printk("*HwTLS: %0*lx\n", field, tls);
 379        }
 380
 381        show_stacktrace(current, regs, KERN_DEFAULT, user_mode(regs));
 382        show_code((void *)regs->cp0_epc, user_mode(regs));
 383        printk("\n");
 384}
 385
 386static DEFINE_RAW_SPINLOCK(die_lock);
 387
 388void __noreturn die(const char *str, struct pt_regs *regs)
 389{
 390        static int die_counter;
 391        int sig = SIGSEGV;
 392
 393        oops_enter();
 394
 395        if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
 396                       SIGSEGV) == NOTIFY_STOP)
 397                sig = 0;
 398
 399        console_verbose();
 400        raw_spin_lock_irq(&die_lock);
 401        bust_spinlocks(1);
 402
 403        printk("%s[#%d]:\n", str, ++die_counter);
 404        show_registers(regs);
 405        add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
 406        raw_spin_unlock_irq(&die_lock);
 407
 408        oops_exit();
 409
 410        if (in_interrupt())
 411                panic("Fatal exception in interrupt");
 412
 413        if (panic_on_oops)
 414                panic("Fatal exception");
 415
 416        if (regs && kexec_should_crash(current))
 417                crash_kexec(regs);
 418
 419        do_exit(sig);
 420}
 421
 422extern struct exception_table_entry __start___dbe_table[];
 423extern struct exception_table_entry __stop___dbe_table[];
 424
 425__asm__(
 426"       .section        __dbe_table, \"a\"\n"
 427"       .previous                       \n");
 428
 429/* Given an address, look for it in the exception tables. */
 430static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
 431{
 432        const struct exception_table_entry *e;
 433
 434        e = search_extable(__start___dbe_table,
 435                           __stop___dbe_table - __start___dbe_table, addr);
 436        if (!e)
 437                e = search_module_dbetables(addr);
 438        return e;
 439}
 440
 441asmlinkage void do_be(struct pt_regs *regs)
 442{
 443        const int field = 2 * sizeof(unsigned long);
 444        const struct exception_table_entry *fixup = NULL;
 445        int data = regs->cp0_cause & 4;
 446        int action = MIPS_BE_FATAL;
 447        enum ctx_state prev_state;
 448
 449        prev_state = exception_enter();
 450        /* XXX For now.  Fixme, this searches the wrong table ...  */
 451        if (data && !user_mode(regs))
 452                fixup = search_dbe_tables(exception_epc(regs));
 453
 454        if (fixup)
 455                action = MIPS_BE_FIXUP;
 456
 457        if (board_be_handler)
 458                action = board_be_handler(regs, fixup != NULL);
 459        else
 460                mips_cm_error_report();
 461
 462        switch (action) {
 463        case MIPS_BE_DISCARD:
 464                goto out;
 465        case MIPS_BE_FIXUP:
 466                if (fixup) {
 467                        regs->cp0_epc = fixup->nextinsn;
 468                        goto out;
 469                }
 470                break;
 471        default:
 472                break;
 473        }
 474
 475        /*
 476         * Assume it would be too dangerous to continue ...
 477         */
 478        printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
 479               data ? "Data" : "Instruction",
 480               field, regs->cp0_epc, field, regs->regs[31]);
 481        if (notify_die(DIE_OOPS, "bus error", regs, 0, current->thread.trap_nr,
 482                       SIGBUS) == NOTIFY_STOP)
 483                goto out;
 484
 485        die_if_kernel("Oops", regs);
 486        force_sig(SIGBUS);
 487
 488out:
 489        exception_exit(prev_state);
 490}
 491
 492/*
 493 * ll/sc, rdhwr, sync emulation
 494 */
 495
 496#define OPCODE 0xfc000000
 497#define BASE   0x03e00000
 498#define RT     0x001f0000
 499#define OFFSET 0x0000ffff
 500#define LL     0xc0000000
 501#define SC     0xe0000000
 502#define SPEC0  0x00000000
 503#define SPEC3  0x7c000000
 504#define RD     0x0000f800
 505#define FUNC   0x0000003f
 506#define SYNC   0x0000000f
 507#define RDHWR  0x0000003b
 508
 509/*  microMIPS definitions   */
 510#define MM_POOL32A_FUNC 0xfc00ffff
 511#define MM_RDHWR        0x00006b3c
 512#define MM_RS           0x001f0000
 513#define MM_RT           0x03e00000
 514
 515/*
 516 * The ll_bit is cleared by r*_switch.S
 517 */
 518
 519unsigned int ll_bit;
 520struct task_struct *ll_task;
 521
 522static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
 523{
 524        unsigned long value, __user *vaddr;
 525        long offset;
 526
 527        /*
 528         * analyse the ll instruction that just caused a ri exception
 529         * and put the referenced address to addr.
 530         */
 531
 532        /* sign extend offset */
 533        offset = opcode & OFFSET;
 534        offset <<= 16;
 535        offset >>= 16;
 536
 537        vaddr = (unsigned long __user *)
 538                ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
 539
 540        if ((unsigned long)vaddr & 3)
 541                return SIGBUS;
 542        if (get_user(value, vaddr))
 543                return SIGSEGV;
 544
 545        preempt_disable();
 546
 547        if (ll_task == NULL || ll_task == current) {
 548                ll_bit = 1;
 549        } else {
 550                ll_bit = 0;
 551        }
 552        ll_task = current;
 553
 554        preempt_enable();
 555
 556        regs->regs[(opcode & RT) >> 16] = value;
 557
 558        return 0;
 559}
 560
 561static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
 562{
 563        unsigned long __user *vaddr;
 564        unsigned long reg;
 565        long offset;
 566
 567        /*
 568         * analyse the sc instruction that just caused a ri exception
 569         * and put the referenced address to addr.
 570         */
 571
 572        /* sign extend offset */
 573        offset = opcode & OFFSET;
 574        offset <<= 16;
 575        offset >>= 16;
 576
 577        vaddr = (unsigned long __user *)
 578                ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
 579        reg = (opcode & RT) >> 16;
 580
 581        if ((unsigned long)vaddr & 3)
 582                return SIGBUS;
 583
 584        preempt_disable();
 585
 586        if (ll_bit == 0 || ll_task != current) {
 587                regs->regs[reg] = 0;
 588                preempt_enable();
 589                return 0;
 590        }
 591
 592        preempt_enable();
 593
 594        if (put_user(regs->regs[reg], vaddr))
 595                return SIGSEGV;
 596
 597        regs->regs[reg] = 1;
 598
 599        return 0;
 600}
 601
 602/*
 603 * ll uses the opcode of lwc0 and sc uses the opcode of swc0.  That is both
 604 * opcodes are supposed to result in coprocessor unusable exceptions if
 605 * executed on ll/sc-less processors.  That's the theory.  In practice a
 606 * few processors such as NEC's VR4100 throw reserved instruction exceptions
 607 * instead, so we're doing the emulation thing in both exception handlers.
 608 */
 609static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
 610{
 611        if ((opcode & OPCODE) == LL) {
 612                perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
 613                                1, regs, 0);
 614                return simulate_ll(regs, opcode);
 615        }
 616        if ((opcode & OPCODE) == SC) {
 617                perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
 618                                1, regs, 0);
 619                return simulate_sc(regs, opcode);
 620        }
 621
 622        return -1;                      /* Must be something else ... */
 623}
 624
 625/*
 626 * Simulate trapping 'rdhwr' instructions to provide user accessible
 627 * registers not implemented in hardware.
 628 */
 629static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
 630{
 631        struct thread_info *ti = task_thread_info(current);
 632
 633        perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
 634                        1, regs, 0);
 635        switch (rd) {
 636        case MIPS_HWR_CPUNUM:           /* CPU number */
 637                regs->regs[rt] = smp_processor_id();
 638                return 0;
 639        case MIPS_HWR_SYNCISTEP:        /* SYNCI length */
 640                regs->regs[rt] = min(current_cpu_data.dcache.linesz,
 641                                     current_cpu_data.icache.linesz);
 642                return 0;
 643        case MIPS_HWR_CC:               /* Read count register */
 644                regs->regs[rt] = read_c0_count();
 645                return 0;
 646        case MIPS_HWR_CCRES:            /* Count register resolution */
 647                switch (current_cpu_type()) {
 648                case CPU_20KC:
 649                case CPU_25KF:
 650                        regs->regs[rt] = 1;
 651                        break;
 652                default:
 653                        regs->regs[rt] = 2;
 654                }
 655                return 0;
 656        case MIPS_HWR_ULR:              /* Read UserLocal register */
 657                regs->regs[rt] = ti->tp_value;
 658                return 0;
 659        default:
 660                return -1;
 661        }
 662}
 663
 664static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
 665{
 666        if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
 667                int rd = (opcode & RD) >> 11;
 668                int rt = (opcode & RT) >> 16;
 669
 670                simulate_rdhwr(regs, rd, rt);
 671                return 0;
 672        }
 673
 674        /* Not ours.  */
 675        return -1;
 676}
 677
 678static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned int opcode)
 679{
 680        if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
 681                int rd = (opcode & MM_RS) >> 16;
 682                int rt = (opcode & MM_RT) >> 21;
 683                simulate_rdhwr(regs, rd, rt);
 684                return 0;
 685        }
 686
 687        /* Not ours.  */
 688        return -1;
 689}
 690
 691static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
 692{
 693        if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
 694                perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
 695                                1, regs, 0);
 696                return 0;
 697        }
 698
 699        return -1;                      /* Must be something else ... */
 700}
 701
 702/*
 703 * Loongson-3 CSR instructions emulation
 704 */
 705
 706#ifdef CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION
 707
 708#define LWC2             0xc8000000
 709#define RS               BASE
 710#define CSR_OPCODE2      0x00000118
 711#define CSR_OPCODE2_MASK 0x000007ff
 712#define CSR_FUNC_MASK    RT
 713#define CSR_FUNC_CPUCFG  0x8
 714
 715static int simulate_loongson3_cpucfg(struct pt_regs *regs,
 716                                     unsigned int opcode)
 717{
 718        int op = opcode & OPCODE;
 719        int op2 = opcode & CSR_OPCODE2_MASK;
 720        int csr_func = (opcode & CSR_FUNC_MASK) >> 16;
 721
 722        if (op == LWC2 && op2 == CSR_OPCODE2 && csr_func == CSR_FUNC_CPUCFG) {
 723                int rd = (opcode & RD) >> 11;
 724                int rs = (opcode & RS) >> 21;
 725                __u64 sel = regs->regs[rs];
 726
 727                perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
 728
 729                /* Do not emulate on unsupported core models. */
 730                preempt_disable();
 731                if (!loongson3_cpucfg_emulation_enabled(&current_cpu_data)) {
 732                        preempt_enable();
 733                        return -1;
 734                }
 735                regs->regs[rd] = loongson3_cpucfg_read_synthesized(
 736                        &current_cpu_data, sel);
 737                preempt_enable();
 738                return 0;
 739        }
 740
 741        /* Not ours.  */
 742        return -1;
 743}
 744#endif /* CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION */
 745
 746asmlinkage void do_ov(struct pt_regs *regs)
 747{
 748        enum ctx_state prev_state;
 749
 750        prev_state = exception_enter();
 751        die_if_kernel("Integer overflow", regs);
 752
 753        force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->cp0_epc);
 754        exception_exit(prev_state);
 755}
 756
 757#ifdef CONFIG_MIPS_FP_SUPPORT
 758
 759/*
 760 * Send SIGFPE according to FCSR Cause bits, which must have already
 761 * been masked against Enable bits.  This is impotant as Inexact can
 762 * happen together with Overflow or Underflow, and `ptrace' can set
 763 * any bits.
 764 */
 765void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
 766                     struct task_struct *tsk)
 767{
 768        int si_code = FPE_FLTUNK;
 769
 770        if (fcr31 & FPU_CSR_INV_X)
 771                si_code = FPE_FLTINV;
 772        else if (fcr31 & FPU_CSR_DIV_X)
 773                si_code = FPE_FLTDIV;
 774        else if (fcr31 & FPU_CSR_OVF_X)
 775                si_code = FPE_FLTOVF;
 776        else if (fcr31 & FPU_CSR_UDF_X)
 777                si_code = FPE_FLTUND;
 778        else if (fcr31 & FPU_CSR_INE_X)
 779                si_code = FPE_FLTRES;
 780
 781        force_sig_fault_to_task(SIGFPE, si_code, fault_addr, tsk);
 782}
 783
 784int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
 785{
 786        int si_code;
 787
 788        switch (sig) {
 789        case 0:
 790                return 0;
 791
 792        case SIGFPE:
 793                force_fcr31_sig(fcr31, fault_addr, current);
 794                return 1;
 795
 796        case SIGBUS:
 797                force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr);
 798                return 1;
 799
 800        case SIGSEGV:
 801                mmap_read_lock(current->mm);
 802                if (vma_lookup(current->mm, (unsigned long)fault_addr))
 803                        si_code = SEGV_ACCERR;
 804                else
 805                        si_code = SEGV_MAPERR;
 806                mmap_read_unlock(current->mm);
 807                force_sig_fault(SIGSEGV, si_code, fault_addr);
 808                return 1;
 809
 810        default:
 811                force_sig(sig);
 812                return 1;
 813        }
 814}
 815
 816static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
 817                       unsigned long old_epc, unsigned long old_ra)
 818{
 819        union mips_instruction inst = { .word = opcode };
 820        void __user *fault_addr;
 821        unsigned long fcr31;
 822        int sig;
 823
 824        /* If it's obviously not an FP instruction, skip it */
 825        switch (inst.i_format.opcode) {
 826        case cop1_op:
 827        case cop1x_op:
 828        case lwc1_op:
 829        case ldc1_op:
 830        case swc1_op:
 831        case sdc1_op:
 832                break;
 833
 834        default:
 835                return -1;
 836        }
 837
 838        /*
 839         * do_ri skipped over the instruction via compute_return_epc, undo
 840         * that for the FPU emulator.
 841         */
 842        regs->cp0_epc = old_epc;
 843        regs->regs[31] = old_ra;
 844
 845        /* Run the emulator */
 846        sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
 847                                       &fault_addr);
 848
 849        /*
 850         * We can't allow the emulated instruction to leave any
 851         * enabled Cause bits set in $fcr31.
 852         */
 853        fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
 854        current->thread.fpu.fcr31 &= ~fcr31;
 855
 856        /* Restore the hardware register state */
 857        own_fpu(1);
 858
 859        /* Send a signal if required.  */
 860        process_fpemu_return(sig, fault_addr, fcr31);
 861
 862        return 0;
 863}
 864
 865/*
 866 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
 867 */
 868asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
 869{
 870        enum ctx_state prev_state;
 871        void __user *fault_addr;
 872        int sig;
 873
 874        prev_state = exception_enter();
 875        if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
 876                       SIGFPE) == NOTIFY_STOP)
 877                goto out;
 878
 879        /* Clear FCSR.Cause before enabling interrupts */
 880        write_32bit_cp1_register(CP1_STATUS, fcr31 & ~mask_fcr31_x(fcr31));
 881        local_irq_enable();
 882
 883        die_if_kernel("FP exception in kernel code", regs);
 884
 885        if (fcr31 & FPU_CSR_UNI_X) {
 886                /*
 887                 * Unimplemented operation exception.  If we've got the full
 888                 * software emulator on-board, let's use it...
 889                 *
 890                 * Force FPU to dump state into task/thread context.  We're
 891                 * moving a lot of data here for what is probably a single
 892                 * instruction, but the alternative is to pre-decode the FP
 893                 * register operands before invoking the emulator, which seems
 894                 * a bit extreme for what should be an infrequent event.
 895                 */
 896
 897                /* Run the emulator */
 898                sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
 899                                               &fault_addr);
 900
 901                /*
 902                 * We can't allow the emulated instruction to leave any
 903                 * enabled Cause bits set in $fcr31.
 904                 */
 905                fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
 906                current->thread.fpu.fcr31 &= ~fcr31;
 907
 908                /* Restore the hardware register state */
 909                own_fpu(1);     /* Using the FPU again.  */
 910        } else {
 911                sig = SIGFPE;
 912                fault_addr = (void __user *) regs->cp0_epc;
 913        }
 914
 915        /* Send a signal if required.  */
 916        process_fpemu_return(sig, fault_addr, fcr31);
 917
 918out:
 919        exception_exit(prev_state);
 920}
 921
 922/*
 923 * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
 924 * emulated more than some threshold number of instructions, force migration to
 925 * a "CPU" that has FP support.
 926 */
 927static void mt_ase_fp_affinity(void)
 928{
 929#ifdef CONFIG_MIPS_MT_FPAFF
 930        if (mt_fpemul_threshold > 0 &&
 931             ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
 932                /*
 933                 * If there's no FPU present, or if the application has already
 934                 * restricted the allowed set to exclude any CPUs with FPUs,
 935                 * we'll skip the procedure.
 936                 */
 937                if (cpumask_intersects(&current->cpus_mask, &mt_fpu_cpumask)) {
 938                        cpumask_t tmask;
 939
 940                        current->thread.user_cpus_allowed
 941                                = current->cpus_mask;
 942                        cpumask_and(&tmask, &current->cpus_mask,
 943                                    &mt_fpu_cpumask);
 944                        set_cpus_allowed_ptr(current, &tmask);
 945                        set_thread_flag(TIF_FPUBOUND);
 946                }
 947        }
 948#endif /* CONFIG_MIPS_MT_FPAFF */
 949}
 950
 951#else /* !CONFIG_MIPS_FP_SUPPORT */
 952
 953static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
 954                       unsigned long old_epc, unsigned long old_ra)
 955{
 956        return -1;
 957}
 958
 959#endif /* !CONFIG_MIPS_FP_SUPPORT */
 960
 961void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
 962        const char *str)
 963{
 964        char b[40];
 965
 966#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
 967        if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr,
 968                         SIGTRAP) == NOTIFY_STOP)
 969                return;
 970#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
 971
 972        if (notify_die(DIE_TRAP, str, regs, code, current->thread.trap_nr,
 973                       SIGTRAP) == NOTIFY_STOP)
 974                return;
 975
 976        /*
 977         * A short test says that IRIX 5.3 sends SIGTRAP for all trap
 978         * insns, even for trap and break codes that indicate arithmetic
 979         * failures.  Weird ...
 980         * But should we continue the brokenness???  --macro
 981         */
 982        switch (code) {
 983        case BRK_OVERFLOW:
 984        case BRK_DIVZERO:
 985                scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
 986                die_if_kernel(b, regs);
 987                force_sig_fault(SIGFPE,
 988                                code == BRK_DIVZERO ? FPE_INTDIV : FPE_INTOVF,
 989                                (void __user *) regs->cp0_epc);
 990                break;
 991        case BRK_BUG:
 992                die_if_kernel("Kernel bug detected", regs);
 993                force_sig(SIGTRAP);
 994                break;
 995        case BRK_MEMU:
 996                /*
 997                 * This breakpoint code is used by the FPU emulator to retake
 998                 * control of the CPU after executing the instruction from the
 999                 * delay slot of an emulated branch.
1000                 *
1001                 * Terminate if exception was recognized as a delay slot return
1002                 * otherwise handle as normal.
1003                 */
1004                if (do_dsemulret(regs))
1005                        return;
1006
1007                die_if_kernel("Math emu break/trap", regs);
1008                force_sig(SIGTRAP);
1009                break;
1010        default:
1011                scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
1012                die_if_kernel(b, regs);
1013                if (si_code) {
1014                        force_sig_fault(SIGTRAP, si_code, NULL);
1015                } else {
1016                        force_sig(SIGTRAP);
1017                }
1018        }
1019}
1020
1021asmlinkage void do_bp(struct pt_regs *regs)
1022{
1023        unsigned long epc = msk_isa16_mode(exception_epc(regs));
1024        unsigned int opcode, bcode;
1025        enum ctx_state prev_state;
1026        bool user = user_mode(regs);
1027
1028        prev_state = exception_enter();
1029        current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1030        if (get_isa16_mode(regs->cp0_epc)) {
1031                u16 instr[2];
1032
1033                if (__get_inst16(&instr[0], (u16 *)epc, user))
1034                        goto out_sigsegv;
1035
1036                if (!cpu_has_mmips) {
1037                        /* MIPS16e mode */
1038                        bcode = (instr[0] >> 5) & 0x3f;
1039                } else if (mm_insn_16bit(instr[0])) {
1040                        /* 16-bit microMIPS BREAK */
1041                        bcode = instr[0] & 0xf;
1042                } else {
1043                        /* 32-bit microMIPS BREAK */
1044                        if (__get_inst16(&instr[1], (u16 *)(epc + 2), user))
1045                                goto out_sigsegv;
1046                        opcode = (instr[0] << 16) | instr[1];
1047                        bcode = (opcode >> 6) & ((1 << 20) - 1);
1048                }
1049        } else {
1050                if (__get_inst32(&opcode, (u32 *)epc, user))
1051                        goto out_sigsegv;
1052                bcode = (opcode >> 6) & ((1 << 20) - 1);
1053        }
1054
1055        /*
1056         * There is the ancient bug in the MIPS assemblers that the break
1057         * code starts left to bit 16 instead to bit 6 in the opcode.
1058         * Gas is bug-compatible, but not always, grrr...
1059         * We handle both cases with a simple heuristics.  --macro
1060         */
1061        if (bcode >= (1 << 10))
1062                bcode = ((bcode & ((1 << 10) - 1)) << 10) | (bcode >> 10);
1063
1064        /*
1065         * notify the kprobe handlers, if instruction is likely to
1066         * pertain to them.
1067         */
1068        switch (bcode) {
1069        case BRK_UPROBE:
1070                if (notify_die(DIE_UPROBE, "uprobe", regs, bcode,
1071                               current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1072                        goto out;
1073                else
1074                        break;
1075        case BRK_UPROBE_XOL:
1076                if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, bcode,
1077                               current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1078                        goto out;
1079                else
1080                        break;
1081        case BRK_KPROBE_BP:
1082                if (notify_die(DIE_BREAK, "debug", regs, bcode,
1083                               current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1084                        goto out;
1085                else
1086                        break;
1087        case BRK_KPROBE_SSTEPBP:
1088                if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
1089                               current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
1090                        goto out;
1091                else
1092                        break;
1093        default:
1094                break;
1095        }
1096
1097        do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break");
1098
1099out:
1100        exception_exit(prev_state);
1101        return;
1102
1103out_sigsegv:
1104        force_sig(SIGSEGV);
1105        goto out;
1106}
1107
1108asmlinkage void do_tr(struct pt_regs *regs)
1109{
1110        u32 opcode, tcode = 0;
1111        enum ctx_state prev_state;
1112        u16 instr[2];
1113        bool user = user_mode(regs);
1114        unsigned long epc = msk_isa16_mode(exception_epc(regs));
1115
1116        prev_state = exception_enter();
1117        current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1118        if (get_isa16_mode(regs->cp0_epc)) {
1119                if (__get_inst16(&instr[0], (u16 *)(epc + 0), user) ||
1120                    __get_inst16(&instr[1], (u16 *)(epc + 2), user))
1121                        goto out_sigsegv;
1122                opcode = (instr[0] << 16) | instr[1];
1123                /* Immediate versions don't provide a code.  */
1124                if (!(opcode & OPCODE))
1125                        tcode = (opcode >> 12) & ((1 << 4) - 1);
1126        } else {
1127                if (__get_inst32(&opcode, (u32 *)epc, user))
1128                        goto out_sigsegv;
1129                /* Immediate versions don't provide a code.  */
1130                if (!(opcode & OPCODE))
1131                        tcode = (opcode >> 6) & ((1 << 10) - 1);
1132        }
1133
1134        do_trap_or_bp(regs, tcode, 0, "Trap");
1135
1136out:
1137        exception_exit(prev_state);
1138        return;
1139
1140out_sigsegv:
1141        force_sig(SIGSEGV);
1142        goto out;
1143}
1144
1145asmlinkage void do_ri(struct pt_regs *regs)
1146{
1147        unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
1148        unsigned long old_epc = regs->cp0_epc;
1149        unsigned long old31 = regs->regs[31];
1150        enum ctx_state prev_state;
1151        unsigned int opcode = 0;
1152        int status = -1;
1153
1154        /*
1155         * Avoid any kernel code. Just emulate the R2 instruction
1156         * as quickly as possible.
1157         */
1158        if (mipsr2_emulation && cpu_has_mips_r6 &&
1159            likely(user_mode(regs)) &&
1160            likely(get_user(opcode, epc) >= 0)) {
1161                unsigned long fcr31 = 0;
1162
1163                status = mipsr2_decoder(regs, opcode, &fcr31);
1164                switch (status) {
1165                case 0:
1166                case SIGEMT:
1167                        return;
1168                case SIGILL:
1169                        goto no_r2_instr;
1170                default:
1171                        process_fpemu_return(status,
1172                                             &current->thread.cp0_baduaddr,
1173                                             fcr31);
1174                        return;
1175                }
1176        }
1177
1178no_r2_instr:
1179
1180        prev_state = exception_enter();
1181        current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1182
1183        if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
1184                       SIGILL) == NOTIFY_STOP)
1185                goto out;
1186
1187        die_if_kernel("Reserved instruction in kernel code", regs);
1188
1189        if (unlikely(compute_return_epc(regs) < 0))
1190                goto out;
1191
1192        if (!get_isa16_mode(regs->cp0_epc)) {
1193                if (unlikely(get_user(opcode, epc) < 0))
1194                        status = SIGSEGV;
1195
1196                if (!cpu_has_llsc && status < 0)
1197                        status = simulate_llsc(regs, opcode);
1198
1199                if (status < 0)
1200                        status = simulate_rdhwr_normal(regs, opcode);
1201
1202                if (status < 0)
1203                        status = simulate_sync(regs, opcode);
1204
1205                if (status < 0)
1206                        status = simulate_fp(regs, opcode, old_epc, old31);
1207
1208#ifdef CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION
1209                if (status < 0)
1210                        status = simulate_loongson3_cpucfg(regs, opcode);
1211#endif
1212        } else if (cpu_has_mmips) {
1213                unsigned short mmop[2] = { 0 };
1214
1215                if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
1216                        status = SIGSEGV;
1217                if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
1218                        status = SIGSEGV;
1219                opcode = mmop[0];
1220                opcode = (opcode << 16) | mmop[1];
1221
1222                if (status < 0)
1223                        status = simulate_rdhwr_mm(regs, opcode);
1224        }
1225
1226        if (status < 0)
1227                status = SIGILL;
1228
1229        if (unlikely(status > 0)) {
1230                regs->cp0_epc = old_epc;                /* Undo skip-over.  */
1231                regs->regs[31] = old31;
1232                force_sig(status);
1233        }
1234
1235out:
1236        exception_exit(prev_state);
1237}
1238
1239/*
1240 * No lock; only written during early bootup by CPU 0.
1241 */
1242static RAW_NOTIFIER_HEAD(cu2_chain);
1243
1244int __ref register_cu2_notifier(struct notifier_block *nb)
1245{
1246        return raw_notifier_chain_register(&cu2_chain, nb);
1247}
1248
1249int cu2_notifier_call_chain(unsigned long val, void *v)
1250{
1251        return raw_notifier_call_chain(&cu2_chain, val, v);
1252}
1253
1254static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
1255        void *data)
1256{
1257        struct pt_regs *regs = data;
1258
1259        die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
1260                              "instruction", regs);
1261        force_sig(SIGILL);
1262
1263        return NOTIFY_OK;
1264}
1265
1266#ifdef CONFIG_MIPS_FP_SUPPORT
1267
1268static int enable_restore_fp_context(int msa)
1269{
1270        int err, was_fpu_owner, prior_msa;
1271        bool first_fp;
1272
1273        /* Initialize context if it hasn't been used already */
1274        first_fp = init_fp_ctx(current);
1275
1276        if (first_fp) {
1277                preempt_disable();
1278                err = own_fpu_inatomic(1);
1279                if (msa && !err) {
1280                        enable_msa();
1281                        /*
1282                         * with MSA enabled, userspace can see MSACSR
1283                         * and MSA regs, but the values in them are from
1284                         * other task before current task, restore them
1285                         * from saved fp/msa context
1286                         */
1287                        write_msa_csr(current->thread.fpu.msacsr);
1288                        /*
1289                         * own_fpu_inatomic(1) just restore low 64bit,
1290                         * fix the high 64bit
1291                         */
1292                        init_msa_upper();
1293                        set_thread_flag(TIF_USEDMSA);
1294                        set_thread_flag(TIF_MSA_CTX_LIVE);
1295                }
1296                preempt_enable();
1297                return err;
1298        }
1299
1300        /*
1301         * This task has formerly used the FP context.
1302         *
1303         * If this thread has no live MSA vector context then we can simply
1304         * restore the scalar FP context. If it has live MSA vector context
1305         * (that is, it has or may have used MSA since last performing a
1306         * function call) then we'll need to restore the vector context. This
1307         * applies even if we're currently only executing a scalar FP
1308         * instruction. This is because if we were to later execute an MSA
1309         * instruction then we'd either have to:
1310         *
1311         *  - Restore the vector context & clobber any registers modified by
1312         *    scalar FP instructions between now & then.
1313         *
1314         * or
1315         *
1316         *  - Not restore the vector context & lose the most significant bits
1317         *    of all vector registers.
1318         *
1319         * Neither of those options is acceptable. We cannot restore the least
1320         * significant bits of the registers now & only restore the most
1321         * significant bits later because the most significant bits of any
1322         * vector registers whose aliased FP register is modified now will have
1323         * been zeroed. We'd have no way to know that when restoring the vector
1324         * context & thus may load an outdated value for the most significant
1325         * bits of a vector register.
1326         */
1327        if (!msa && !thread_msa_context_live())
1328                return own_fpu(1);
1329
1330        /*
1331         * This task is using or has previously used MSA. Thus we require
1332         * that Status.FR == 1.
1333         */
1334        preempt_disable();
1335        was_fpu_owner = is_fpu_owner();
1336        err = own_fpu_inatomic(0);
1337        if (err)
1338                goto out;
1339
1340        enable_msa();
1341        write_msa_csr(current->thread.fpu.msacsr);
1342        set_thread_flag(TIF_USEDMSA);
1343
1344        /*
1345         * If this is the first time that the task is using MSA and it has
1346         * previously used scalar FP in this time slice then we already nave
1347         * FP context which we shouldn't clobber. We do however need to clear
1348         * the upper 64b of each vector register so that this task has no
1349         * opportunity to see data left behind by another.
1350         */
1351        prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
1352        if (!prior_msa && was_fpu_owner) {
1353                init_msa_upper();
1354
1355                goto out;
1356        }
1357
1358        if (!prior_msa) {
1359                /*
1360                 * Restore the least significant 64b of each vector register
1361                 * from the existing scalar FP context.
1362                 */
1363                _restore_fp(current);
1364
1365                /*
1366                 * The task has not formerly used MSA, so clear the upper 64b
1367                 * of each vector register such that it cannot see data left
1368                 * behind by another task.
1369                 */
1370                init_msa_upper();
1371        } else {
1372                /* We need to restore the vector context. */
1373                restore_msa(current);
1374
1375                /* Restore the scalar FP control & status register */
1376                if (!was_fpu_owner)
1377                        write_32bit_cp1_register(CP1_STATUS,
1378                                                 current->thread.fpu.fcr31);
1379        }
1380
1381out:
1382        preempt_enable();
1383
1384        return 0;
1385}
1386
1387#else /* !CONFIG_MIPS_FP_SUPPORT */
1388
1389static int enable_restore_fp_context(int msa)
1390{
1391        return SIGILL;
1392}
1393
1394#endif /* CONFIG_MIPS_FP_SUPPORT */
1395
1396asmlinkage void do_cpu(struct pt_regs *regs)
1397{
1398        enum ctx_state prev_state;
1399        unsigned int __user *epc;
1400        unsigned long old_epc, old31;
1401        unsigned int opcode;
1402        unsigned int cpid;
1403        int status;
1404
1405        prev_state = exception_enter();
1406        cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
1407
1408        if (cpid != 2)
1409                die_if_kernel("do_cpu invoked from kernel context!", regs);
1410
1411        switch (cpid) {
1412        case 0:
1413                epc = (unsigned int __user *)exception_epc(regs);
1414                old_epc = regs->cp0_epc;
1415                old31 = regs->regs[31];
1416                opcode = 0;
1417                status = -1;
1418
1419                if (unlikely(compute_return_epc(regs) < 0))
1420                        break;
1421
1422                if (!get_isa16_mode(regs->cp0_epc)) {
1423                        if (unlikely(get_user(opcode, epc) < 0))
1424                                status = SIGSEGV;
1425
1426                        if (!cpu_has_llsc && status < 0)
1427                                status = simulate_llsc(regs, opcode);
1428                }
1429
1430                if (status < 0)
1431                        status = SIGILL;
1432
1433                if (unlikely(status > 0)) {
1434                        regs->cp0_epc = old_epc;        /* Undo skip-over.  */
1435                        regs->regs[31] = old31;
1436                        force_sig(status);
1437                }
1438
1439                break;
1440
1441#ifdef CONFIG_MIPS_FP_SUPPORT
1442        case 3:
1443                /*
1444                 * The COP3 opcode space and consequently the CP0.Status.CU3
1445                 * bit and the CP0.Cause.CE=3 encoding have been removed as
1446                 * of the MIPS III ISA.  From the MIPS IV and MIPS32r2 ISAs
1447                 * up the space has been reused for COP1X instructions, that
1448                 * are enabled by the CP0.Status.CU1 bit and consequently
1449                 * use the CP0.Cause.CE=1 encoding for Coprocessor Unusable
1450                 * exceptions.  Some FPU-less processors that implement one
1451                 * of these ISAs however use this code erroneously for COP1X
1452                 * instructions.  Therefore we redirect this trap to the FP
1453                 * emulator too.
1454                 */
1455                if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) {
1456                        force_sig(SIGILL);
1457                        break;
1458                }
1459                fallthrough;
1460        case 1: {
1461                void __user *fault_addr;
1462                unsigned long fcr31;
1463                int err, sig;
1464
1465                err = enable_restore_fp_context(0);
1466
1467                if (raw_cpu_has_fpu && !err)
1468                        break;
1469
1470                sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
1471                                               &fault_addr);
1472
1473                /*
1474                 * We can't allow the emulated instruction to leave
1475                 * any enabled Cause bits set in $fcr31.
1476                 */
1477                fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
1478                current->thread.fpu.fcr31 &= ~fcr31;
1479
1480                /* Send a signal if required.  */
1481                if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
1482                        mt_ase_fp_affinity();
1483
1484                break;
1485        }
1486#else /* CONFIG_MIPS_FP_SUPPORT */
1487        case 1:
1488        case 3:
1489                force_sig(SIGILL);
1490                break;
1491#endif /* CONFIG_MIPS_FP_SUPPORT */
1492
1493        case 2:
1494                raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
1495                break;
1496        }
1497
1498        exception_exit(prev_state);
1499}
1500
1501asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr)
1502{
1503        enum ctx_state prev_state;
1504
1505        prev_state = exception_enter();
1506        current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
1507        if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
1508                       current->thread.trap_nr, SIGFPE) == NOTIFY_STOP)
1509                goto out;
1510
1511        /* Clear MSACSR.Cause before enabling interrupts */
1512        write_msa_csr(msacsr & ~MSA_CSR_CAUSEF);
1513        local_irq_enable();
1514
1515        die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
1516        force_sig(SIGFPE);
1517out:
1518        exception_exit(prev_state);
1519}
1520
1521asmlinkage void do_msa(struct pt_regs *regs)
1522{
1523        enum ctx_state prev_state;
1524        int err;
1525
1526        prev_state = exception_enter();
1527
1528        if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
1529                force_sig(SIGILL);
1530                goto out;
1531        }
1532
1533        die_if_kernel("do_msa invoked from kernel context!", regs);
1534
1535        err = enable_restore_fp_context(1);
1536        if (err)
1537                force_sig(SIGILL);
1538out:
1539        exception_exit(prev_state);
1540}
1541
1542asmlinkage void do_mdmx(struct pt_regs *regs)
1543{
1544        enum ctx_state prev_state;
1545
1546        prev_state = exception_enter();
1547        force_sig(SIGILL);
1548        exception_exit(prev_state);
1549}
1550
1551/*
1552 * Called with interrupts disabled.
1553 */
1554asmlinkage void do_watch(struct pt_regs *regs)
1555{
1556        enum ctx_state prev_state;
1557
1558        prev_state = exception_enter();
1559        /*
1560         * Clear WP (bit 22) bit of cause register so we don't loop
1561         * forever.
1562         */
1563        clear_c0_cause(CAUSEF_WP);
1564
1565        /*
1566         * If the current thread has the watch registers loaded, save
1567         * their values and send SIGTRAP.  Otherwise another thread
1568         * left the registers set, clear them and continue.
1569         */
1570        if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
1571                mips_read_watch_registers();
1572                local_irq_enable();
1573                force_sig_fault(SIGTRAP, TRAP_HWBKPT, NULL);
1574        } else {
1575                mips_clear_watch_registers();
1576                local_irq_enable();
1577        }
1578        exception_exit(prev_state);
1579}
1580
1581asmlinkage void do_mcheck(struct pt_regs *regs)
1582{
1583        int multi_match = regs->cp0_status & ST0_TS;
1584        enum ctx_state prev_state;
1585
1586        prev_state = exception_enter();
1587        show_regs(regs);
1588
1589        if (multi_match) {
1590                dump_tlb_regs();
1591                pr_info("\n");
1592                dump_tlb_all();
1593        }
1594
1595        show_code((void *)regs->cp0_epc, user_mode(regs));
1596
1597        /*
1598         * Some chips may have other causes of machine check (e.g. SB1
1599         * graduation timer)
1600         */
1601        panic("Caught Machine Check exception - %scaused by multiple "
1602              "matching entries in the TLB.",
1603              (multi_match) ? "" : "not ");
1604}
1605
1606asmlinkage void do_mt(struct pt_regs *regs)
1607{
1608        int subcode;
1609
1610        subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
1611                        >> VPECONTROL_EXCPT_SHIFT;
1612        switch (subcode) {
1613        case 0:
1614                printk(KERN_DEBUG "Thread Underflow\n");
1615                break;
1616        case 1:
1617                printk(KERN_DEBUG "Thread Overflow\n");
1618                break;
1619        case 2:
1620                printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
1621                break;
1622        case 3:
1623                printk(KERN_DEBUG "Gating Storage Exception\n");
1624                break;
1625        case 4:
1626                printk(KERN_DEBUG "YIELD Scheduler Exception\n");
1627                break;
1628        case 5:
1629                printk(KERN_DEBUG "Gating Storage Scheduler Exception\n");
1630                break;
1631        default:
1632                printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
1633                        subcode);
1634                break;
1635        }
1636        die_if_kernel("MIPS MT Thread exception in kernel", regs);
1637
1638        force_sig(SIGILL);
1639}
1640
1641
1642asmlinkage void do_dsp(struct pt_regs *regs)
1643{
1644        if (cpu_has_dsp)
1645                panic("Unexpected DSP exception");
1646
1647        force_sig(SIGILL);
1648}
1649
1650asmlinkage void do_reserved(struct pt_regs *regs)
1651{
1652        /*
1653         * Game over - no way to handle this if it ever occurs.  Most probably
1654         * caused by a new unknown cpu type or after another deadly
1655         * hard/software error.
1656         */
1657        show_regs(regs);
1658        panic("Caught reserved exception %ld - should not happen.",
1659              (regs->cp0_cause & 0x7f) >> 2);
1660}
1661
1662static int __initdata l1parity = 1;
1663static int __init nol1parity(char *s)
1664{
1665        l1parity = 0;
1666        return 1;
1667}
1668__setup("nol1par", nol1parity);
1669static int __initdata l2parity = 1;
1670static int __init nol2parity(char *s)
1671{
1672        l2parity = 0;
1673        return 1;
1674}
1675__setup("nol2par", nol2parity);
1676
1677/*
1678 * Some MIPS CPUs can enable/disable for cache parity detection, but do
1679 * it different ways.
1680 */
1681static inline __init void parity_protection_init(void)
1682{
1683#define ERRCTL_PE       0x80000000
1684#define ERRCTL_L2P      0x00800000
1685
1686        if (mips_cm_revision() >= CM_REV_CM3) {
1687                ulong gcr_ectl, cp0_ectl;
1688
1689                /*
1690                 * With CM3 systems we need to ensure that the L1 & L2
1691                 * parity enables are set to the same value, since this
1692                 * is presumed by the hardware engineers.
1693                 *
1694                 * If the user disabled either of L1 or L2 ECC checking,
1695                 * disable both.
1696                 */
1697                l1parity &= l2parity;
1698                l2parity &= l1parity;
1699
1700                /* Probe L1 ECC support */
1701                cp0_ectl = read_c0_ecc();
1702                write_c0_ecc(cp0_ectl | ERRCTL_PE);
1703                back_to_back_c0_hazard();
1704                cp0_ectl = read_c0_ecc();
1705
1706                /* Probe L2 ECC support */
1707                gcr_ectl = read_gcr_err_control();
1708
1709                if (!(gcr_ectl & CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT) ||
1710                    !(cp0_ectl & ERRCTL_PE)) {
1711                        /*
1712                         * One of L1 or L2 ECC checking isn't supported,
1713                         * so we cannot enable either.
1714                         */
1715                        l1parity = l2parity = 0;
1716                }
1717
1718                /* Configure L1 ECC checking */
1719                if (l1parity)
1720                        cp0_ectl |= ERRCTL_PE;
1721                else
1722                        cp0_ectl &= ~ERRCTL_PE;
1723                write_c0_ecc(cp0_ectl);
1724                back_to_back_c0_hazard();
1725                WARN_ON(!!(read_c0_ecc() & ERRCTL_PE) != l1parity);
1726
1727                /* Configure L2 ECC checking */
1728                if (l2parity)
1729                        gcr_ectl |= CM_GCR_ERR_CONTROL_L2_ECC_EN;
1730                else
1731                        gcr_ectl &= ~CM_GCR_ERR_CONTROL_L2_ECC_EN;
1732                write_gcr_err_control(gcr_ectl);
1733                gcr_ectl = read_gcr_err_control();
1734                gcr_ectl &= CM_GCR_ERR_CONTROL_L2_ECC_EN;
1735                WARN_ON(!!gcr_ectl != l2parity);
1736
1737                pr_info("Cache parity protection %sabled\n",
1738                        l1parity ? "en" : "dis");
1739                return;
1740        }
1741
1742        switch (current_cpu_type()) {
1743        case CPU_24K:
1744        case CPU_34K:
1745        case CPU_74K:
1746        case CPU_1004K:
1747        case CPU_1074K:
1748        case CPU_INTERAPTIV:
1749        case CPU_PROAPTIV:
1750        case CPU_P5600:
1751        case CPU_QEMU_GENERIC:
1752        case CPU_P6600:
1753                {
1754                        unsigned long errctl;
1755                        unsigned int l1parity_present, l2parity_present;
1756
1757                        errctl = read_c0_ecc();
1758                        errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
1759
1760                        /* probe L1 parity support */
1761                        write_c0_ecc(errctl | ERRCTL_PE);
1762                        back_to_back_c0_hazard();
1763                        l1parity_present = (read_c0_ecc() & ERRCTL_PE);
1764
1765                        /* probe L2 parity support */
1766                        write_c0_ecc(errctl|ERRCTL_L2P);
1767                        back_to_back_c0_hazard();
1768                        l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
1769
1770                        if (l1parity_present && l2parity_present) {
1771                                if (l1parity)
1772                                        errctl |= ERRCTL_PE;
1773                                if (l1parity ^ l2parity)
1774                                        errctl |= ERRCTL_L2P;
1775                        } else if (l1parity_present) {
1776                                if (l1parity)
1777                                        errctl |= ERRCTL_PE;
1778                        } else if (l2parity_present) {
1779                                if (l2parity)
1780                                        errctl |= ERRCTL_L2P;
1781                        } else {
1782                                /* No parity available */
1783                        }
1784
1785                        printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
1786
1787                        write_c0_ecc(errctl);
1788                        back_to_back_c0_hazard();
1789                        errctl = read_c0_ecc();
1790                        printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
1791
1792                        if (l1parity_present)
1793                                printk(KERN_INFO "Cache parity protection %sabled\n",
1794                                       (errctl & ERRCTL_PE) ? "en" : "dis");
1795
1796                        if (l2parity_present) {
1797                                if (l1parity_present && l1parity)
1798                                        errctl ^= ERRCTL_L2P;
1799                                printk(KERN_INFO "L2 cache parity protection %sabled\n",
1800                                       (errctl & ERRCTL_L2P) ? "en" : "dis");
1801                        }
1802                }
1803                break;
1804
1805        case CPU_5KC:
1806        case CPU_5KE:
1807        case CPU_LOONGSON32:
1808                write_c0_ecc(0x80000000);
1809                back_to_back_c0_hazard();
1810                /* Set the PE bit (bit 31) in the c0_errctl register. */
1811                printk(KERN_INFO "Cache parity protection %sabled\n",
1812                       (read_c0_ecc() & 0x80000000) ? "en" : "dis");
1813                break;
1814        case CPU_20KC:
1815        case CPU_25KF:
1816                /* Clear the DE bit (bit 16) in the c0_status register. */
1817                printk(KERN_INFO "Enable cache parity protection for "
1818                       "MIPS 20KC/25KF CPUs.\n");
1819                clear_c0_status(ST0_DE);
1820                break;
1821        default:
1822                break;
1823        }
1824}
1825
1826asmlinkage void cache_parity_error(void)
1827{
1828        const int field = 2 * sizeof(unsigned long);
1829        unsigned int reg_val;
1830
1831        /* For the moment, report the problem and hang. */
1832        printk("Cache error exception:\n");
1833        printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1834        reg_val = read_c0_cacheerr();
1835        printk("c0_cacheerr == %08x\n", reg_val);
1836
1837        printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1838               reg_val & (1<<30) ? "secondary" : "primary",
1839               reg_val & (1<<31) ? "data" : "insn");
1840        if ((cpu_has_mips_r2_r6) &&
1841            ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
1842                pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
1843                        reg_val & (1<<29) ? "ED " : "",
1844                        reg_val & (1<<28) ? "ET " : "",
1845                        reg_val & (1<<27) ? "ES " : "",
1846                        reg_val & (1<<26) ? "EE " : "",
1847                        reg_val & (1<<25) ? "EB " : "",
1848                        reg_val & (1<<24) ? "EI " : "",
1849                        reg_val & (1<<23) ? "E1 " : "",
1850                        reg_val & (1<<22) ? "E0 " : "");
1851        } else {
1852                pr_err("Error bits: %s%s%s%s%s%s%s\n",
1853                        reg_val & (1<<29) ? "ED " : "",
1854                        reg_val & (1<<28) ? "ET " : "",
1855                        reg_val & (1<<26) ? "EE " : "",
1856                        reg_val & (1<<25) ? "EB " : "",
1857                        reg_val & (1<<24) ? "EI " : "",
1858                        reg_val & (1<<23) ? "E1 " : "",
1859                        reg_val & (1<<22) ? "E0 " : "");
1860        }
1861        printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
1862
1863#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1864        if (reg_val & (1<<22))
1865                printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
1866
1867        if (reg_val & (1<<23))
1868                printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
1869#endif
1870
1871        panic("Can't handle the cache error!");
1872}
1873
1874asmlinkage void do_ftlb(void)
1875{
1876        const int field = 2 * sizeof(unsigned long);
1877        unsigned int reg_val;
1878
1879        /* For the moment, report the problem and hang. */
1880        if ((cpu_has_mips_r2_r6) &&
1881            (((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS) ||
1882            ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_LOONGSON))) {
1883                pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
1884                       read_c0_ecc());
1885                pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1886                reg_val = read_c0_cacheerr();
1887                pr_err("c0_cacheerr == %08x\n", reg_val);
1888
1889                if ((reg_val & 0xc0000000) == 0xc0000000) {
1890                        pr_err("Decoded c0_cacheerr: FTLB parity error\n");
1891                } else {
1892                        pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1893                               reg_val & (1<<30) ? "secondary" : "primary",
1894                               reg_val & (1<<31) ? "data" : "insn");
1895                }
1896        } else {
1897                pr_err("FTLB error exception\n");
1898        }
1899        /* Just print the cacheerr bits for now */
1900        cache_parity_error();
1901}
1902
1903asmlinkage void do_gsexc(struct pt_regs *regs, u32 diag1)
1904{
1905        u32 exccode = (diag1 & LOONGSON_DIAG1_EXCCODE) >>
1906                        LOONGSON_DIAG1_EXCCODE_SHIFT;
1907        enum ctx_state prev_state;
1908
1909        prev_state = exception_enter();
1910
1911        switch (exccode) {
1912        case 0x08:
1913                /* Undocumented exception, will trigger on certain
1914                 * also-undocumented instructions accessible from userspace.
1915                 * Processor state is not otherwise corrupted, but currently
1916                 * we don't know how to proceed. Maybe there is some
1917                 * undocumented control flag to enable the instructions?
1918                 */
1919                force_sig(SIGILL);
1920                break;
1921
1922        default:
1923                /* None of the other exceptions, documented or not, have
1924                 * further details given; none are encountered in the wild
1925                 * either. Panic in case some of them turn out to be fatal.
1926                 */
1927                show_regs(regs);
1928                panic("Unhandled Loongson exception - GSCause = %08x", diag1);
1929        }
1930
1931        exception_exit(prev_state);
1932}
1933
1934/*
1935 * SDBBP EJTAG debug exception handler.
1936 * We skip the instruction and return to the next instruction.
1937 */
1938void ejtag_exception_handler(struct pt_regs *regs)
1939{
1940        const int field = 2 * sizeof(unsigned long);
1941        unsigned long depc, old_epc, old_ra;
1942        unsigned int debug;
1943
1944        printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
1945        depc = read_c0_depc();
1946        debug = read_c0_debug();
1947        printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
1948        if (debug & 0x80000000) {
1949                /*
1950                 * In branch delay slot.
1951                 * We cheat a little bit here and use EPC to calculate the
1952                 * debug return address (DEPC). EPC is restored after the
1953                 * calculation.
1954                 */
1955                old_epc = regs->cp0_epc;
1956                old_ra = regs->regs[31];
1957                regs->cp0_epc = depc;
1958                compute_return_epc(regs);
1959                depc = regs->cp0_epc;
1960                regs->cp0_epc = old_epc;
1961                regs->regs[31] = old_ra;
1962        } else
1963                depc += 4;
1964        write_c0_depc(depc);
1965
1966#if 0
1967        printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
1968        write_c0_debug(debug | 0x100);
1969#endif
1970}
1971
1972/*
1973 * NMI exception handler.
1974 * No lock; only written during early bootup by CPU 0.
1975 */
1976static RAW_NOTIFIER_HEAD(nmi_chain);
1977
1978int register_nmi_notifier(struct notifier_block *nb)
1979{
1980        return raw_notifier_chain_register(&nmi_chain, nb);
1981}
1982
1983void __noreturn nmi_exception_handler(struct pt_regs *regs)
1984{
1985        char str[100];
1986
1987        nmi_enter();
1988        raw_notifier_call_chain(&nmi_chain, 0, regs);
1989        bust_spinlocks(1);
1990        snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
1991                 smp_processor_id(), regs->cp0_epc);
1992        regs->cp0_epc = read_c0_errorepc();
1993        die(str, regs);
1994        nmi_exit();
1995}
1996
1997unsigned long ebase;
1998EXPORT_SYMBOL_GPL(ebase);
1999unsigned long exception_handlers[32];
2000unsigned long vi_handlers[64];
2001
2002void reserve_exception_space(phys_addr_t addr, unsigned long size)
2003{
2004        memblock_reserve(addr, size);
2005}
2006
2007void __init *set_except_vector(int n, void *addr)
2008{
2009        unsigned long handler = (unsigned long) addr;
2010        unsigned long old_handler;
2011
2012#ifdef CONFIG_CPU_MICROMIPS
2013        /*
2014         * Only the TLB handlers are cache aligned with an even
2015         * address. All other handlers are on an odd address and
2016         * require no modification. Otherwise, MIPS32 mode will
2017         * be entered when handling any TLB exceptions. That
2018         * would be bad...since we must stay in microMIPS mode.
2019         */
2020        if (!(handler & 0x1))
2021                handler |= 1;
2022#endif
2023        old_handler = xchg(&exception_handlers[n], handler);
2024
2025        if (n == 0 && cpu_has_divec) {
2026#ifdef CONFIG_CPU_MICROMIPS
2027                unsigned long jump_mask = ~((1 << 27) - 1);
2028#else
2029                unsigned long jump_mask = ~((1 << 28) - 1);
2030#endif
2031                u32 *buf = (u32 *)(ebase + 0x200);
2032                unsigned int k0 = 26;
2033                if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
2034                        uasm_i_j(&buf, handler & ~jump_mask);
2035                        uasm_i_nop(&buf);
2036                } else {
2037                        UASM_i_LA(&buf, k0, handler);
2038                        uasm_i_jr(&buf, k0);
2039                        uasm_i_nop(&buf);
2040                }
2041                local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
2042        }
2043        return (void *)old_handler;
2044}
2045
2046static void do_default_vi(void)
2047{
2048        show_regs(get_irq_regs());
2049        panic("Caught unexpected vectored interrupt.");
2050}
2051
2052static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
2053{
2054        unsigned long handler;
2055        unsigned long old_handler = vi_handlers[n];
2056        int srssets = current_cpu_data.srsets;
2057        u16 *h;
2058        unsigned char *b;
2059
2060        BUG_ON(!cpu_has_veic && !cpu_has_vint);
2061
2062        if (addr == NULL) {
2063                handler = (unsigned long) do_default_vi;
2064                srs = 0;
2065        } else
2066                handler = (unsigned long) addr;
2067        vi_handlers[n] = handler;
2068
2069        b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
2070
2071        if (srs >= srssets)
2072                panic("Shadow register set %d not supported", srs);
2073
2074        if (cpu_has_veic) {
2075                if (board_bind_eic_interrupt)
2076                        board_bind_eic_interrupt(n, srs);
2077        } else if (cpu_has_vint) {
2078                /* SRSMap is only defined if shadow sets are implemented */
2079                if (srssets > 1)
2080                        change_c0_srsmap(0xf << n*4, srs << n*4);
2081        }
2082
2083        if (srs == 0) {
2084                /*
2085                 * If no shadow set is selected then use the default handler
2086                 * that does normal register saving and standard interrupt exit
2087                 */
2088                extern char except_vec_vi, except_vec_vi_lui;
2089                extern char except_vec_vi_ori, except_vec_vi_end;
2090                extern char rollback_except_vec_vi;
2091                char *vec_start = using_rollback_handler() ?
2092                        &rollback_except_vec_vi : &except_vec_vi;
2093#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
2094                const int lui_offset = &except_vec_vi_lui - vec_start + 2;
2095                const int ori_offset = &except_vec_vi_ori - vec_start + 2;
2096#else
2097                const int lui_offset = &except_vec_vi_lui - vec_start;
2098                const int ori_offset = &except_vec_vi_ori - vec_start;
2099#endif
2100                const int handler_len = &except_vec_vi_end - vec_start;
2101
2102                if (handler_len > VECTORSPACING) {
2103                        /*
2104                         * Sigh... panicing won't help as the console
2105                         * is probably not configured :(
2106                         */
2107                        panic("VECTORSPACING too small");
2108                }
2109
2110                set_handler(((unsigned long)b - ebase), vec_start,
2111#ifdef CONFIG_CPU_MICROMIPS
2112                                (handler_len - 1));
2113#else
2114                                handler_len);
2115#endif
2116                h = (u16 *)(b + lui_offset);
2117                *h = (handler >> 16) & 0xffff;
2118                h = (u16 *)(b + ori_offset);
2119                *h = (handler & 0xffff);
2120                local_flush_icache_range((unsigned long)b,
2121                                         (unsigned long)(b+handler_len));
2122        }
2123        else {
2124                /*
2125                 * In other cases jump directly to the interrupt handler. It
2126                 * is the handler's responsibility to save registers if required
2127                 * (eg hi/lo) and return from the exception using "eret".
2128                 */
2129                u32 insn;
2130
2131                h = (u16 *)b;
2132                /* j handler */
2133#ifdef CONFIG_CPU_MICROMIPS
2134                insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
2135#else
2136                insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
2137#endif
2138                h[0] = (insn >> 16) & 0xffff;
2139                h[1] = insn & 0xffff;
2140                h[2] = 0;
2141                h[3] = 0;
2142                local_flush_icache_range((unsigned long)b,
2143                                         (unsigned long)(b+8));
2144        }
2145
2146        return (void *)old_handler;
2147}
2148
2149void *set_vi_handler(int n, vi_handler_t addr)
2150{
2151        return set_vi_srs_handler(n, addr, 0);
2152}
2153
2154extern void tlb_init(void);
2155
2156/*
2157 * Timer interrupt
2158 */
2159int cp0_compare_irq;
2160EXPORT_SYMBOL_GPL(cp0_compare_irq);
2161int cp0_compare_irq_shift;
2162
2163/*
2164 * Performance counter IRQ or -1 if shared with timer
2165 */
2166int cp0_perfcount_irq;
2167EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
2168
2169/*
2170 * Fast debug channel IRQ or -1 if not present
2171 */
2172int cp0_fdc_irq;
2173EXPORT_SYMBOL_GPL(cp0_fdc_irq);
2174
2175static int noulri;
2176
2177static int __init ulri_disable(char *s)
2178{
2179        pr_info("Disabling ulri\n");
2180        noulri = 1;
2181
2182        return 1;
2183}
2184__setup("noulri", ulri_disable);
2185
2186/* configure STATUS register */
2187static void configure_status(void)
2188{
2189        /*
2190         * Disable coprocessors and select 32-bit or 64-bit addressing
2191         * and the 16/32 or 32/32 FPR register model.  Reset the BEV
2192         * flag that some firmware may have left set and the TS bit (for
2193         * IP27).  Set XX for ISA IV code to work.
2194         */
2195        unsigned int status_set = ST0_KERNEL_CUMASK;
2196#ifdef CONFIG_64BIT
2197        status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
2198#endif
2199        if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
2200                status_set |= ST0_XX;
2201        if (cpu_has_dsp)
2202                status_set |= ST0_MX;
2203
2204        change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
2205                         status_set);
2206        back_to_back_c0_hazard();
2207}
2208
2209unsigned int hwrena;
2210EXPORT_SYMBOL_GPL(hwrena);
2211
2212/* configure HWRENA register */
2213static void configure_hwrena(void)
2214{
2215        hwrena = cpu_hwrena_impl_bits;
2216
2217        if (cpu_has_mips_r2_r6)
2218                hwrena |= MIPS_HWRENA_CPUNUM |
2219                          MIPS_HWRENA_SYNCISTEP |
2220                          MIPS_HWRENA_CC |
2221                          MIPS_HWRENA_CCRES;
2222
2223        if (!noulri && cpu_has_userlocal)
2224                hwrena |= MIPS_HWRENA_ULR;
2225
2226        if (hwrena)
2227                write_c0_hwrena(hwrena);
2228}
2229
2230static void configure_exception_vector(void)
2231{
2232        if (cpu_has_mips_r2_r6) {
2233                unsigned long sr = set_c0_status(ST0_BEV);
2234                /* If available, use WG to set top bits of EBASE */
2235                if (cpu_has_ebase_wg) {
2236#ifdef CONFIG_64BIT
2237                        write_c0_ebase_64(ebase | MIPS_EBASE_WG);
2238#else
2239                        write_c0_ebase(ebase | MIPS_EBASE_WG);
2240#endif
2241                }
2242                write_c0_ebase(ebase);
2243                write_c0_status(sr);
2244        }
2245        if (cpu_has_veic || cpu_has_vint) {
2246                /* Setting vector spacing enables EI/VI mode  */
2247                change_c0_intctl(0x3e0, VECTORSPACING);
2248        }
2249        if (cpu_has_divec) {
2250                if (cpu_has_mipsmt) {
2251                        unsigned int vpflags = dvpe();
2252                        set_c0_cause(CAUSEF_IV);
2253                        evpe(vpflags);
2254                } else
2255                        set_c0_cause(CAUSEF_IV);
2256        }
2257}
2258
2259void per_cpu_trap_init(bool is_boot_cpu)
2260{
2261        unsigned int cpu = smp_processor_id();
2262
2263        configure_status();
2264        configure_hwrena();
2265
2266        configure_exception_vector();
2267
2268        /*
2269         * Before R2 both interrupt numbers were fixed to 7, so on R2 only:
2270         *
2271         *  o read IntCtl.IPTI to determine the timer interrupt
2272         *  o read IntCtl.IPPCI to determine the performance counter interrupt
2273         *  o read IntCtl.IPFDC to determine the fast debug channel interrupt
2274         */
2275        if (cpu_has_mips_r2_r6) {
2276                cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
2277                cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
2278                cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
2279                cp0_fdc_irq = (read_c0_intctl() >> INTCTLB_IPFDC) & 7;
2280                if (!cp0_fdc_irq)
2281                        cp0_fdc_irq = -1;
2282
2283        } else {
2284                cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
2285                cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
2286                cp0_perfcount_irq = -1;
2287                cp0_fdc_irq = -1;
2288        }
2289
2290        if (cpu_has_mmid)
2291                cpu_data[cpu].asid_cache = 0;
2292        else if (!cpu_data[cpu].asid_cache)
2293                cpu_data[cpu].asid_cache = asid_first_version(cpu);
2294
2295        mmgrab(&init_mm);
2296        current->active_mm = &init_mm;
2297        BUG_ON(current->mm);
2298        enter_lazy_tlb(&init_mm, current);
2299
2300        /* Boot CPU's cache setup in setup_arch(). */
2301        if (!is_boot_cpu)
2302                cpu_cache_init();
2303        tlb_init();
2304        TLBMISS_HANDLER_SETUP();
2305}
2306
2307/* Install CPU exception handler */
2308void set_handler(unsigned long offset, void *addr, unsigned long size)
2309{
2310#ifdef CONFIG_CPU_MICROMIPS
2311        memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
2312#else
2313        memcpy((void *)(ebase + offset), addr, size);
2314#endif
2315        local_flush_icache_range(ebase + offset, ebase + offset + size);
2316}
2317
2318static const char panic_null_cerr[] =
2319        "Trying to set NULL cache error exception handler\n";
2320
2321/*
2322 * Install uncached CPU exception handler.
2323 * This is suitable only for the cache error exception which is the only
2324 * exception handler that is being run uncached.
2325 */
2326void set_uncached_handler(unsigned long offset, void *addr,
2327        unsigned long size)
2328{
2329        unsigned long uncached_ebase = CKSEG1ADDR(ebase);
2330
2331        if (!addr)
2332                panic(panic_null_cerr);
2333
2334        memcpy((void *)(uncached_ebase + offset), addr, size);
2335}
2336
2337static int __initdata rdhwr_noopt;
2338static int __init set_rdhwr_noopt(char *str)
2339{
2340        rdhwr_noopt = 1;
2341        return 1;
2342}
2343
2344__setup("rdhwr_noopt", set_rdhwr_noopt);
2345
2346void __init trap_init(void)
2347{
2348        extern char except_vec3_generic;
2349        extern char except_vec4;
2350        extern char except_vec3_r4000;
2351        unsigned long i, vec_size;
2352        phys_addr_t ebase_pa;
2353
2354        check_wait();
2355
2356        if (!cpu_has_mips_r2_r6) {
2357                ebase = CAC_BASE;
2358                vec_size = 0x400;
2359        } else {
2360                if (cpu_has_veic || cpu_has_vint)
2361                        vec_size = 0x200 + VECTORSPACING*64;
2362                else
2363                        vec_size = PAGE_SIZE;
2364
2365                ebase_pa = memblock_phys_alloc(vec_size, 1 << fls(vec_size));
2366                if (!ebase_pa)
2367                        panic("%s: Failed to allocate %lu bytes align=0x%x\n",
2368                              __func__, vec_size, 1 << fls(vec_size));
2369
2370                /*
2371                 * Try to ensure ebase resides in KSeg0 if possible.
2372                 *
2373                 * It shouldn't generally be in XKPhys on MIPS64 to avoid
2374                 * hitting a poorly defined exception base for Cache Errors.
2375                 * The allocation is likely to be in the low 512MB of physical,
2376                 * in which case we should be able to convert to KSeg0.
2377                 *
2378                 * EVA is special though as it allows segments to be rearranged
2379                 * and to become uncached during cache error handling.
2380                 */
2381                if (!IS_ENABLED(CONFIG_EVA) && !WARN_ON(ebase_pa >= 0x20000000))
2382                        ebase = CKSEG0ADDR(ebase_pa);
2383                else
2384                        ebase = (unsigned long)phys_to_virt(ebase_pa);
2385        }
2386
2387        if (cpu_has_mmips) {
2388                unsigned int config3 = read_c0_config3();
2389
2390                if (IS_ENABLED(CONFIG_CPU_MICROMIPS))
2391                        write_c0_config3(config3 | MIPS_CONF3_ISA_OE);
2392                else
2393                        write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE);
2394        }
2395
2396        if (board_ebase_setup)
2397                board_ebase_setup();
2398        per_cpu_trap_init(true);
2399        memblock_set_bottom_up(false);
2400
2401        /*
2402         * Copy the generic exception handlers to their final destination.
2403         * This will be overridden later as suitable for a particular
2404         * configuration.
2405         */
2406        set_handler(0x180, &except_vec3_generic, 0x80);
2407
2408        /*
2409         * Setup default vectors
2410         */
2411        for (i = 0; i <= 31; i++)
2412                set_except_vector(i, handle_reserved);
2413
2414        /*
2415         * Copy the EJTAG debug exception vector handler code to it's final
2416         * destination.
2417         */
2418        if (cpu_has_ejtag && board_ejtag_handler_setup)
2419                board_ejtag_handler_setup();
2420
2421        /*
2422         * Only some CPUs have the watch exceptions.
2423         */
2424        if (cpu_has_watch)
2425                set_except_vector(EXCCODE_WATCH, handle_watch);
2426
2427        /*
2428         * Initialise interrupt handlers
2429         */
2430        if (cpu_has_veic || cpu_has_vint) {
2431                int nvec = cpu_has_veic ? 64 : 8;
2432                for (i = 0; i < nvec; i++)
2433                        set_vi_handler(i, NULL);
2434        }
2435        else if (cpu_has_divec)
2436                set_handler(0x200, &except_vec4, 0x8);
2437
2438        /*
2439         * Some CPUs can enable/disable for cache parity detection, but does
2440         * it different ways.
2441         */
2442        parity_protection_init();
2443
2444        /*
2445         * The Data Bus Errors / Instruction Bus Errors are signaled
2446         * by external hardware.  Therefore these two exceptions
2447         * may have board specific handlers.
2448         */
2449        if (board_be_init)
2450                board_be_init();
2451
2452        set_except_vector(EXCCODE_INT, using_rollback_handler() ?
2453                                        rollback_handle_int : handle_int);
2454        set_except_vector(EXCCODE_MOD, handle_tlbm);
2455        set_except_vector(EXCCODE_TLBL, handle_tlbl);
2456        set_except_vector(EXCCODE_TLBS, handle_tlbs);
2457
2458        set_except_vector(EXCCODE_ADEL, handle_adel);
2459        set_except_vector(EXCCODE_ADES, handle_ades);
2460
2461        set_except_vector(EXCCODE_IBE, handle_ibe);
2462        set_except_vector(EXCCODE_DBE, handle_dbe);
2463
2464        set_except_vector(EXCCODE_SYS, handle_sys);
2465        set_except_vector(EXCCODE_BP, handle_bp);
2466
2467        if (rdhwr_noopt)
2468                set_except_vector(EXCCODE_RI, handle_ri);
2469        else {
2470                if (cpu_has_vtag_icache)
2471                        set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2472                else if (current_cpu_type() == CPU_LOONGSON64)
2473                        set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
2474                else
2475                        set_except_vector(EXCCODE_RI, handle_ri_rdhwr);
2476        }
2477
2478        set_except_vector(EXCCODE_CPU, handle_cpu);
2479        set_except_vector(EXCCODE_OV, handle_ov);
2480        set_except_vector(EXCCODE_TR, handle_tr);
2481        set_except_vector(EXCCODE_MSAFPE, handle_msa_fpe);
2482
2483        if (board_nmi_handler_setup)
2484                board_nmi_handler_setup();
2485
2486        if (cpu_has_fpu && !cpu_has_nofpuex)
2487                set_except_vector(EXCCODE_FPE, handle_fpe);
2488
2489        if (cpu_has_ftlbparex)
2490                set_except_vector(MIPS_EXCCODE_TLBPAR, handle_ftlb);
2491
2492        if (cpu_has_gsexcex)
2493                set_except_vector(LOONGSON_EXCCODE_GSEXC, handle_gsexc);
2494
2495        if (cpu_has_rixiex) {
2496                set_except_vector(EXCCODE_TLBRI, tlb_do_page_fault_0);
2497                set_except_vector(EXCCODE_TLBXI, tlb_do_page_fault_0);
2498        }
2499
2500        set_except_vector(EXCCODE_MSADIS, handle_msa);
2501        set_except_vector(EXCCODE_MDMX, handle_mdmx);
2502
2503        if (cpu_has_mcheck)
2504                set_except_vector(EXCCODE_MCHECK, handle_mcheck);
2505
2506        if (cpu_has_mipsmt)
2507                set_except_vector(EXCCODE_THREAD, handle_mt);
2508
2509        set_except_vector(EXCCODE_DSPDIS, handle_dsp);
2510
2511        if (board_cache_error_setup)
2512                board_cache_error_setup();
2513
2514        if (cpu_has_vce)
2515                /* Special exception: R4[04]00 uses also the divec space. */
2516                set_handler(0x180, &except_vec3_r4000, 0x100);
2517        else if (cpu_has_4kex)
2518                set_handler(0x180, &except_vec3_generic, 0x80);
2519        else
2520                set_handler(0x080, &except_vec3_generic, 0x80);
2521
2522        local_flush_icache_range(ebase, ebase + vec_size);
2523
2524        sort_extable(__start___dbe_table, __stop___dbe_table);
2525
2526        cu2_notifier(default_cu2_call, 0x80000000);     /* Run last  */
2527}
2528
2529static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd,
2530                            void *v)
2531{
2532        switch (cmd) {
2533        case CPU_PM_ENTER_FAILED:
2534        case CPU_PM_EXIT:
2535                configure_status();
2536                configure_hwrena();
2537                configure_exception_vector();
2538
2539                /* Restore register with CPU number for TLB handlers */
2540                TLBMISS_HANDLER_RESTORE();
2541
2542                break;
2543        }
2544
2545        return NOTIFY_OK;
2546}
2547
2548static struct notifier_block trap_pm_notifier_block = {
2549        .notifier_call = trap_pm_notifier,
2550};
2551
2552static int __init trap_pm_init(void)
2553{
2554        return cpu_pm_register_notifier(&trap_pm_notifier_block);
2555}
2556arch_initcall(trap_pm_init);
2557