linux/arch/powerpc/kernel/traps.c
<<
>>
Prefs
   1/*
   2 *  Copyright (C) 1995-1996  Gary Thomas (gdt@linuxppc.org)
   3 *  Copyright 2007-2010 Freescale Semiconductor, Inc.
   4 *
   5 *  This program is free software; you can redistribute it and/or
   6 *  modify it under the terms of the GNU General Public License
   7 *  as published by the Free Software Foundation; either version
   8 *  2 of the License, or (at your option) any later version.
   9 *
  10 *  Modified by Cort Dougan (cort@cs.nmt.edu)
  11 *  and Paul Mackerras (paulus@samba.org)
  12 */
  13
  14/*
  15 * This file handles the architecture-dependent parts of hardware exceptions
  16 */
  17
  18#include <linux/errno.h>
  19#include <linux/sched.h>
  20#include <linux/sched/debug.h>
  21#include <linux/kernel.h>
  22#include <linux/mm.h>
  23#include <linux/pkeys.h>
  24#include <linux/stddef.h>
  25#include <linux/unistd.h>
  26#include <linux/ptrace.h>
  27#include <linux/user.h>
  28#include <linux/interrupt.h>
  29#include <linux/init.h>
  30#include <linux/extable.h>
  31#include <linux/module.h>       /* print_modules */
  32#include <linux/prctl.h>
  33#include <linux/delay.h>
  34#include <linux/kprobes.h>
  35#include <linux/kexec.h>
  36#include <linux/backlight.h>
  37#include <linux/bug.h>
  38#include <linux/kdebug.h>
  39#include <linux/ratelimit.h>
  40#include <linux/context_tracking.h>
  41#include <linux/smp.h>
  42#include <linux/console.h>
  43#include <linux/kmsg_dump.h>
  44
  45#include <asm/emulated_ops.h>
  46#include <asm/pgtable.h>
  47#include <linux/uaccess.h>
  48#include <asm/debugfs.h>
  49#include <asm/io.h>
  50#include <asm/machdep.h>
  51#include <asm/rtas.h>
  52#include <asm/pmc.h>
  53#include <asm/reg.h>
  54#ifdef CONFIG_PMAC_BACKLIGHT
  55#include <asm/backlight.h>
  56#endif
  57#ifdef CONFIG_PPC64
  58#include <asm/firmware.h>
  59#include <asm/processor.h>
  60#include <asm/tm.h>
  61#endif
  62#include <asm/kexec.h>
  63#include <asm/ppc-opcode.h>
  64#include <asm/rio.h>
  65#include <asm/fadump.h>
  66#include <asm/switch_to.h>
  67#include <asm/tm.h>
  68#include <asm/debug.h>
  69#include <asm/asm-prototypes.h>
  70#include <asm/hmi.h>
  71#include <sysdev/fsl_pci.h>
  72#include <asm/kprobes.h>
  73
  74#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE)
  75int (*__debugger)(struct pt_regs *regs) __read_mostly;
  76int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly;
  77int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly;
  78int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly;
  79int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly;
  80int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly;
  81int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly;
  82
  83EXPORT_SYMBOL(__debugger);
  84EXPORT_SYMBOL(__debugger_ipi);
  85EXPORT_SYMBOL(__debugger_bpt);
  86EXPORT_SYMBOL(__debugger_sstep);
  87EXPORT_SYMBOL(__debugger_iabr_match);
  88EXPORT_SYMBOL(__debugger_break_match);
  89EXPORT_SYMBOL(__debugger_fault_handler);
  90#endif
  91
  92/* Transactional Memory trap debug */
  93#ifdef TM_DEBUG_SW
  94#define TM_DEBUG(x...) printk(KERN_INFO x)
  95#else
  96#define TM_DEBUG(x...) do { } while(0)
  97#endif
  98
  99/*
 100 * Trap & Exception support
 101 */
 102
 103#ifdef CONFIG_PMAC_BACKLIGHT
 104static void pmac_backlight_unblank(void)
 105{
 106        mutex_lock(&pmac_backlight_mutex);
 107        if (pmac_backlight) {
 108                struct backlight_properties *props;
 109
 110                props = &pmac_backlight->props;
 111                props->brightness = props->max_brightness;
 112                props->power = FB_BLANK_UNBLANK;
 113                backlight_update_status(pmac_backlight);
 114        }
 115        mutex_unlock(&pmac_backlight_mutex);
 116}
 117#else
 118static inline void pmac_backlight_unblank(void) { }
 119#endif
 120
 121/*
 122 * If oops/die is expected to crash the machine, return true here.
 123 *
 124 * This should not be expected to be 100% accurate, there may be
 125 * notifiers registered or other unexpected conditions that may bring
 126 * down the kernel. Or if the current process in the kernel is holding
 127 * locks or has other critical state, the kernel may become effectively
 128 * unusable anyway.
 129 */
 130bool die_will_crash(void)
 131{
 132        if (should_fadump_crash())
 133                return true;
 134        if (kexec_should_crash(current))
 135                return true;
 136        if (in_interrupt() || panic_on_oops ||
 137                        !current->pid || is_global_init(current))
 138                return true;
 139
 140        return false;
 141}
 142
 143static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
 144static int die_owner = -1;
 145static unsigned int die_nest_count;
 146static int die_counter;
 147
 148extern void panic_flush_kmsg_start(void)
 149{
 150        /*
 151         * These are mostly taken from kernel/panic.c, but tries to do
 152         * relatively minimal work. Don't use delay functions (TB may
 153         * be broken), don't crash dump (need to set a firmware log),
 154         * don't run notifiers. We do want to get some information to
 155         * Linux console.
 156         */
 157        console_verbose();
 158        bust_spinlocks(1);
 159}
 160
 161extern void panic_flush_kmsg_end(void)
 162{
 163        printk_safe_flush_on_panic();
 164        kmsg_dump(KMSG_DUMP_PANIC);
 165        bust_spinlocks(0);
 166        debug_locks_off();
 167        console_flush_on_panic();
 168}
 169
 170static unsigned long oops_begin(struct pt_regs *regs)
 171{
 172        int cpu;
 173        unsigned long flags;
 174
 175        oops_enter();
 176
 177        /* racy, but better than risking deadlock. */
 178        raw_local_irq_save(flags);
 179        cpu = smp_processor_id();
 180        if (!arch_spin_trylock(&die_lock)) {
 181                if (cpu == die_owner)
 182                        /* nested oops. should stop eventually */;
 183                else
 184                        arch_spin_lock(&die_lock);
 185        }
 186        die_nest_count++;
 187        die_owner = cpu;
 188        console_verbose();
 189        bust_spinlocks(1);
 190        if (machine_is(powermac))
 191                pmac_backlight_unblank();
 192        return flags;
 193}
 194NOKPROBE_SYMBOL(oops_begin);
 195
 196static void oops_end(unsigned long flags, struct pt_regs *regs,
 197                               int signr)
 198{
 199        bust_spinlocks(0);
 200        add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
 201        die_nest_count--;
 202        oops_exit();
 203        printk("\n");
 204        if (!die_nest_count) {
 205                /* Nest count reaches zero, release the lock. */
 206                die_owner = -1;
 207                arch_spin_unlock(&die_lock);
 208        }
 209        raw_local_irq_restore(flags);
 210
 211        /*
 212         * system_reset_excption handles debugger, crash dump, panic, for 0x100
 213         */
 214        if (TRAP(regs) == 0x100)
 215                return;
 216
 217        crash_fadump(regs, "die oops");
 218
 219        if (kexec_should_crash(current))
 220                crash_kexec(regs);
 221
 222        if (!signr)
 223                return;
 224
 225        /*
 226         * While our oops output is serialised by a spinlock, output
 227         * from panic() called below can race and corrupt it. If we
 228         * know we are going to panic, delay for 1 second so we have a
 229         * chance to get clean backtraces from all CPUs that are oopsing.
 230         */
 231        if (in_interrupt() || panic_on_oops || !current->pid ||
 232            is_global_init(current)) {
 233                mdelay(MSEC_PER_SEC);
 234        }
 235
 236        if (in_interrupt())
 237                panic("Fatal exception in interrupt");
 238        if (panic_on_oops)
 239                panic("Fatal exception");
 240        do_exit(signr);
 241}
 242NOKPROBE_SYMBOL(oops_end);
 243
 244static int __die(const char *str, struct pt_regs *regs, long err)
 245{
 246        printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
 247
 248        if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
 249                printk("LE ");
 250        else
 251                printk("BE ");
 252
 253        if (IS_ENABLED(CONFIG_PREEMPT))
 254                pr_cont("PREEMPT ");
 255
 256        if (IS_ENABLED(CONFIG_SMP))
 257                pr_cont("SMP NR_CPUS=%d ", NR_CPUS);
 258
 259        if (debug_pagealloc_enabled())
 260                pr_cont("DEBUG_PAGEALLOC ");
 261
 262        if (IS_ENABLED(CONFIG_NUMA))
 263                pr_cont("NUMA ");
 264
 265        pr_cont("%s\n", ppc_md.name ? ppc_md.name : "");
 266
 267        if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP)
 268                return 1;
 269
 270        print_modules();
 271        show_regs(regs);
 272
 273        return 0;
 274}
 275NOKPROBE_SYMBOL(__die);
 276
 277void die(const char *str, struct pt_regs *regs, long err)
 278{
 279        unsigned long flags;
 280
 281        /*
 282         * system_reset_excption handles debugger, crash dump, panic, for 0x100
 283         */
 284        if (TRAP(regs) != 0x100) {
 285                if (debugger(regs))
 286                        return;
 287        }
 288
 289        flags = oops_begin(regs);
 290        if (__die(str, regs, err))
 291                err = 0;
 292        oops_end(flags, regs, err);
 293}
 294NOKPROBE_SYMBOL(die);
 295
 296void user_single_step_siginfo(struct task_struct *tsk,
 297                                struct pt_regs *regs, siginfo_t *info)
 298{
 299        info->si_signo = SIGTRAP;
 300        info->si_code = TRAP_TRACE;
 301        info->si_addr = (void __user *)regs->nip;
 302}
 303
 304
 305void _exception_pkey(int signr, struct pt_regs *regs, int code,
 306                unsigned long addr, int key)
 307{
 308        siginfo_t info;
 309        const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \
 310                        "at %08lx nip %08lx lr %08lx code %x\n";
 311        const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \
 312                        "at %016lx nip %016lx lr %016lx code %x\n";
 313
 314        if (!user_mode(regs)) {
 315                die("Exception in kernel mode", regs, signr);
 316                return;
 317        }
 318
 319        if (show_unhandled_signals && unhandled_signal(current, signr)) {
 320                printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
 321                                   current->comm, current->pid, signr,
 322                                   addr, regs->nip, regs->link, code);
 323        }
 324
 325        if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs))
 326                local_irq_enable();
 327
 328        current->thread.trap_nr = code;
 329
 330        /*
 331         * Save all the pkey registers AMR/IAMR/UAMOR. Eg: Core dumps need
 332         * to capture the content, if the task gets killed.
 333         */
 334        thread_pkey_regs_save(&current->thread);
 335
 336        clear_siginfo(&info);
 337        info.si_signo = signr;
 338        info.si_code = code;
 339        info.si_addr = (void __user *) addr;
 340        info.si_pkey = key;
 341
 342        force_sig_info(signr, &info, current);
 343}
 344
 345void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
 346{
 347        _exception_pkey(signr, regs, code, addr, 0);
 348}
 349
 350void system_reset_exception(struct pt_regs *regs)
 351{
 352        /*
 353         * Avoid crashes in case of nested NMI exceptions. Recoverability
 354         * is determined by RI and in_nmi
 355         */
 356        bool nested = in_nmi();
 357        if (!nested)
 358                nmi_enter();
 359
 360        __this_cpu_inc(irq_stat.sreset_irqs);
 361
 362        /* See if any machine dependent calls */
 363        if (ppc_md.system_reset_exception) {
 364                if (ppc_md.system_reset_exception(regs))
 365                        goto out;
 366        }
 367
 368        if (debugger(regs))
 369                goto out;
 370
 371        /*
 372         * A system reset is a request to dump, so we always send
 373         * it through the crashdump code (if fadump or kdump are
 374         * registered).
 375         */
 376        crash_fadump(regs, "System Reset");
 377
 378        crash_kexec(regs);
 379
 380        /*
 381         * We aren't the primary crash CPU. We need to send it
 382         * to a holding pattern to avoid it ending up in the panic
 383         * code.
 384         */
 385        crash_kexec_secondary(regs);
 386
 387        /*
 388         * No debugger or crash dump registered, print logs then
 389         * panic.
 390         */
 391        die("System Reset", regs, SIGABRT);
 392
 393        mdelay(2*MSEC_PER_SEC); /* Wait a little while for others to print */
 394        add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
 395        nmi_panic(regs, "System Reset");
 396
 397out:
 398#ifdef CONFIG_PPC_BOOK3S_64
 399        BUG_ON(get_paca()->in_nmi == 0);
 400        if (get_paca()->in_nmi > 1)
 401                nmi_panic(regs, "Unrecoverable nested System Reset");
 402#endif
 403        /* Must die if the interrupt is not recoverable */
 404        if (!(regs->msr & MSR_RI))
 405                nmi_panic(regs, "Unrecoverable System Reset");
 406
 407        if (!nested)
 408                nmi_exit();
 409
 410        /* What should we do here? We could issue a shutdown or hard reset. */
 411}
 412
 413/*
 414 * I/O accesses can cause machine checks on powermacs.
 415 * Check if the NIP corresponds to the address of a sync
 416 * instruction for which there is an entry in the exception
 417 * table.
 418 * Note that the 601 only takes a machine check on TEA
 419 * (transfer error ack) signal assertion, and does not
 420 * set any of the top 16 bits of SRR1.
 421 *  -- paulus.
 422 */
 423static inline int check_io_access(struct pt_regs *regs)
 424{
 425#ifdef CONFIG_PPC32
 426        unsigned long msr = regs->msr;
 427        const struct exception_table_entry *entry;
 428        unsigned int *nip = (unsigned int *)regs->nip;
 429
 430        if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
 431            && (entry = search_exception_tables(regs->nip)) != NULL) {
 432                /*
 433                 * Check that it's a sync instruction, or somewhere
 434                 * in the twi; isync; nop sequence that inb/inw/inl uses.
 435                 * As the address is in the exception table
 436                 * we should be able to read the instr there.
 437                 * For the debug message, we look at the preceding
 438                 * load or store.
 439                 */
 440                if (*nip == PPC_INST_NOP)
 441                        nip -= 2;
 442                else if (*nip == PPC_INST_ISYNC)
 443                        --nip;
 444                if (*nip == PPC_INST_SYNC || (*nip >> 26) == OP_TRAP) {
 445                        unsigned int rb;
 446
 447                        --nip;
 448                        rb = (*nip >> 11) & 0x1f;
 449                        printk(KERN_DEBUG "%s bad port %lx at %p\n",
 450                               (*nip & 0x100)? "OUT to": "IN from",
 451                               regs->gpr[rb] - _IO_BASE, nip);
 452                        regs->msr |= MSR_RI;
 453                        regs->nip = extable_fixup(entry);
 454                        return 1;
 455                }
 456        }
 457#endif /* CONFIG_PPC32 */
 458        return 0;
 459}
 460
 461#ifdef CONFIG_PPC_ADV_DEBUG_REGS
 462/* On 4xx, the reason for the machine check or program exception
 463   is in the ESR. */
 464#define get_reason(regs)        ((regs)->dsisr)
 465#define REASON_FP               ESR_FP
 466#define REASON_ILLEGAL          (ESR_PIL | ESR_PUO)
 467#define REASON_PRIVILEGED       ESR_PPR
 468#define REASON_TRAP             ESR_PTR
 469
 470/* single-step stuff */
 471#define single_stepping(regs)   (current->thread.debug.dbcr0 & DBCR0_IC)
 472#define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC)
 473#define clear_br_trace(regs)    do {} while(0)
 474#else
 475/* On non-4xx, the reason for the machine check or program
 476   exception is in the MSR. */
 477#define get_reason(regs)        ((regs)->msr)
 478#define REASON_TM               SRR1_PROGTM
 479#define REASON_FP               SRR1_PROGFPE
 480#define REASON_ILLEGAL          SRR1_PROGILL
 481#define REASON_PRIVILEGED       SRR1_PROGPRIV
 482#define REASON_TRAP             SRR1_PROGTRAP
 483
 484#define single_stepping(regs)   ((regs)->msr & MSR_SE)
 485#define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
 486#define clear_br_trace(regs)    ((regs)->msr &= ~MSR_BE)
 487#endif
 488
 489#if defined(CONFIG_E500)
 490int machine_check_e500mc(struct pt_regs *regs)
 491{
 492        unsigned long mcsr = mfspr(SPRN_MCSR);
 493        unsigned long pvr = mfspr(SPRN_PVR);
 494        unsigned long reason = mcsr;
 495        int recoverable = 1;
 496
 497        if (reason & MCSR_LD) {
 498                recoverable = fsl_rio_mcheck_exception(regs);
 499                if (recoverable == 1)
 500                        goto silent_out;
 501        }
 502
 503        printk("Machine check in kernel mode.\n");
 504        printk("Caused by (from MCSR=%lx): ", reason);
 505
 506        if (reason & MCSR_MCP)
 507                printk("Machine Check Signal\n");
 508
 509        if (reason & MCSR_ICPERR) {
 510                printk("Instruction Cache Parity Error\n");
 511
 512                /*
 513                 * This is recoverable by invalidating the i-cache.
 514                 */
 515                mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI);
 516                while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI)
 517                        ;
 518
 519                /*
 520                 * This will generally be accompanied by an instruction
 521                 * fetch error report -- only treat MCSR_IF as fatal
 522                 * if it wasn't due to an L1 parity error.
 523                 */
 524                reason &= ~MCSR_IF;
 525        }
 526
 527        if (reason & MCSR_DCPERR_MC) {
 528                printk("Data Cache Parity Error\n");
 529
 530                /*
 531                 * In write shadow mode we auto-recover from the error, but it
 532                 * may still get logged and cause a machine check.  We should
 533                 * only treat the non-write shadow case as non-recoverable.
 534                 */
 535                /* On e6500 core, L1 DCWS (Data cache write shadow mode) bit
 536                 * is not implemented but L1 data cache always runs in write
 537                 * shadow mode. Hence on data cache parity errors HW will
 538                 * automatically invalidate the L1 Data Cache.
 539                 */
 540                if (PVR_VER(pvr) != PVR_VER_E6500) {
 541                        if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS))
 542                                recoverable = 0;
 543                }
 544        }
 545
 546        if (reason & MCSR_L2MMU_MHIT) {
 547                printk("Hit on multiple TLB entries\n");
 548                recoverable = 0;
 549        }
 550
 551        if (reason & MCSR_NMI)
 552                printk("Non-maskable interrupt\n");
 553
 554        if (reason & MCSR_IF) {
 555                printk("Instruction Fetch Error Report\n");
 556                recoverable = 0;
 557        }
 558
 559        if (reason & MCSR_LD) {
 560                printk("Load Error Report\n");
 561                recoverable = 0;
 562        }
 563
 564        if (reason & MCSR_ST) {
 565                printk("Store Error Report\n");
 566                recoverable = 0;
 567        }
 568
 569        if (reason & MCSR_LDG) {
 570                printk("Guarded Load Error Report\n");
 571                recoverable = 0;
 572        }
 573
 574        if (reason & MCSR_TLBSYNC)
 575                printk("Simultaneous tlbsync operations\n");
 576
 577        if (reason & MCSR_BSL2_ERR) {
 578                printk("Level 2 Cache Error\n");
 579                recoverable = 0;
 580        }
 581
 582        if (reason & MCSR_MAV) {
 583                u64 addr;
 584
 585                addr = mfspr(SPRN_MCAR);
 586                addr |= (u64)mfspr(SPRN_MCARU) << 32;
 587
 588                printk("Machine Check %s Address: %#llx\n",
 589                       reason & MCSR_MEA ? "Effective" : "Physical", addr);
 590        }
 591
 592silent_out:
 593        mtspr(SPRN_MCSR, mcsr);
 594        return mfspr(SPRN_MCSR) == 0 && recoverable;
 595}
 596
 597int machine_check_e500(struct pt_regs *regs)
 598{
 599        unsigned long reason = mfspr(SPRN_MCSR);
 600
 601        if (reason & MCSR_BUS_RBERR) {
 602                if (fsl_rio_mcheck_exception(regs))
 603                        return 1;
 604                if (fsl_pci_mcheck_exception(regs))
 605                        return 1;
 606        }
 607
 608        printk("Machine check in kernel mode.\n");
 609        printk("Caused by (from MCSR=%lx): ", reason);
 610
 611        if (reason & MCSR_MCP)
 612                printk("Machine Check Signal\n");
 613        if (reason & MCSR_ICPERR)
 614                printk("Instruction Cache Parity Error\n");
 615        if (reason & MCSR_DCP_PERR)
 616                printk("Data Cache Push Parity Error\n");
 617        if (reason & MCSR_DCPERR)
 618                printk("Data Cache Parity Error\n");
 619        if (reason & MCSR_BUS_IAERR)
 620                printk("Bus - Instruction Address Error\n");
 621        if (reason & MCSR_BUS_RAERR)
 622                printk("Bus - Read Address Error\n");
 623        if (reason & MCSR_BUS_WAERR)
 624                printk("Bus - Write Address Error\n");
 625        if (reason & MCSR_BUS_IBERR)
 626                printk("Bus - Instruction Data Error\n");
 627        if (reason & MCSR_BUS_RBERR)
 628                printk("Bus - Read Data Bus Error\n");
 629        if (reason & MCSR_BUS_WBERR)
 630                printk("Bus - Write Data Bus Error\n");
 631        if (reason & MCSR_BUS_IPERR)
 632                printk("Bus - Instruction Parity Error\n");
 633        if (reason & MCSR_BUS_RPERR)
 634                printk("Bus - Read Parity Error\n");
 635
 636        return 0;
 637}
 638
 639int machine_check_generic(struct pt_regs *regs)
 640{
 641        return 0;
 642}
 643#elif defined(CONFIG_E200)
 644int machine_check_e200(struct pt_regs *regs)
 645{
 646        unsigned long reason = mfspr(SPRN_MCSR);
 647
 648        printk("Machine check in kernel mode.\n");
 649        printk("Caused by (from MCSR=%lx): ", reason);
 650
 651        if (reason & MCSR_MCP)
 652                printk("Machine Check Signal\n");
 653        if (reason & MCSR_CP_PERR)
 654                printk("Cache Push Parity Error\n");
 655        if (reason & MCSR_CPERR)
 656                printk("Cache Parity Error\n");
 657        if (reason & MCSR_EXCP_ERR)
 658                printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
 659        if (reason & MCSR_BUS_IRERR)
 660                printk("Bus - Read Bus Error on instruction fetch\n");
 661        if (reason & MCSR_BUS_DRERR)
 662                printk("Bus - Read Bus Error on data load\n");
 663        if (reason & MCSR_BUS_WRERR)
 664                printk("Bus - Write Bus Error on buffered store or cache line push\n");
 665
 666        return 0;
 667}
 668#elif defined(CONFIG_PPC32)
 669int machine_check_generic(struct pt_regs *regs)
 670{
 671        unsigned long reason = regs->msr;
 672
 673        printk("Machine check in kernel mode.\n");
 674        printk("Caused by (from SRR1=%lx): ", reason);
 675        switch (reason & 0x601F0000) {
 676        case 0x80000:
 677                printk("Machine check signal\n");
 678                break;
 679        case 0:         /* for 601 */
 680        case 0x40000:
 681        case 0x140000:  /* 7450 MSS error and TEA */
 682                printk("Transfer error ack signal\n");
 683                break;
 684        case 0x20000:
 685                printk("Data parity error signal\n");
 686                break;
 687        case 0x10000:
 688                printk("Address parity error signal\n");
 689                break;
 690        case 0x20000000:
 691                printk("L1 Data Cache error\n");
 692                break;
 693        case 0x40000000:
 694                printk("L1 Instruction Cache error\n");
 695                break;
 696        case 0x00100000:
 697                printk("L2 data cache parity error\n");
 698                break;
 699        default:
 700                printk("Unknown values in msr\n");
 701        }
 702        return 0;
 703}
 704#endif /* everything else */
 705
 706void machine_check_exception(struct pt_regs *regs)
 707{
 708        int recover = 0;
 709        bool nested = in_nmi();
 710        if (!nested)
 711                nmi_enter();
 712
 713        /* 64s accounts the mce in machine_check_early when in HVMODE */
 714        if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !cpu_has_feature(CPU_FTR_HVMODE))
 715                __this_cpu_inc(irq_stat.mce_exceptions);
 716
 717        add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
 718
 719        /* See if any machine dependent calls. In theory, we would want
 720         * to call the CPU first, and call the ppc_md. one if the CPU
 721         * one returns a positive number. However there is existing code
 722         * that assumes the board gets a first chance, so let's keep it
 723         * that way for now and fix things later. --BenH.
 724         */
 725        if (ppc_md.machine_check_exception)
 726                recover = ppc_md.machine_check_exception(regs);
 727        else if (cur_cpu_spec->machine_check)
 728                recover = cur_cpu_spec->machine_check(regs);
 729
 730        if (recover > 0)
 731                goto bail;
 732
 733        if (debugger_fault_handler(regs))
 734                goto bail;
 735
 736        if (check_io_access(regs))
 737                goto bail;
 738
 739        die("Machine check", regs, SIGBUS);
 740
 741        /* Must die if the interrupt is not recoverable */
 742        if (!(regs->msr & MSR_RI))
 743                nmi_panic(regs, "Unrecoverable Machine check");
 744
 745bail:
 746        if (!nested)
 747                nmi_exit();
 748}
 749
 750void SMIException(struct pt_regs *regs)
 751{
 752        die("System Management Interrupt", regs, SIGABRT);
 753}
 754
 755#ifdef CONFIG_VSX
 756static void p9_hmi_special_emu(struct pt_regs *regs)
 757{
 758        unsigned int ra, rb, t, i, sel, instr, rc;
 759        const void __user *addr;
 760        u8 vbuf[16], *vdst;
 761        unsigned long ea, msr, msr_mask;
 762        bool swap;
 763
 764        if (__get_user_inatomic(instr, (unsigned int __user *)regs->nip))
 765                return;
 766
 767        /*
 768         * lxvb16x      opcode: 0x7c0006d8
 769         * lxvd2x       opcode: 0x7c000698
 770         * lxvh8x       opcode: 0x7c000658
 771         * lxvw4x       opcode: 0x7c000618
 772         */
 773        if ((instr & 0xfc00073e) != 0x7c000618) {
 774                pr_devel("HMI vec emu: not vector CI %i:%s[%d] nip=%016lx"
 775                         " instr=%08x\n",
 776                         smp_processor_id(), current->comm, current->pid,
 777                         regs->nip, instr);
 778                return;
 779        }
 780
 781        /* Grab vector registers into the task struct */
 782        msr = regs->msr; /* Grab msr before we flush the bits */
 783        flush_vsx_to_thread(current);
 784        enable_kernel_altivec();
 785
 786        /*
 787         * Is userspace running with a different endian (this is rare but
 788         * not impossible)
 789         */
 790        swap = (msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
 791
 792        /* Decode the instruction */
 793        ra = (instr >> 16) & 0x1f;
 794        rb = (instr >> 11) & 0x1f;
 795        t = (instr >> 21) & 0x1f;
 796        if (instr & 1)
 797                vdst = (u8 *)&current->thread.vr_state.vr[t];
 798        else
 799                vdst = (u8 *)&current->thread.fp_state.fpr[t][0];
 800
 801        /* Grab the vector address */
 802        ea = regs->gpr[rb] + (ra ? regs->gpr[ra] : 0);
 803        if (is_32bit_task())
 804                ea &= 0xfffffffful;
 805        addr = (__force const void __user *)ea;
 806
 807        /* Check it */
 808        if (!access_ok(VERIFY_READ, addr, 16)) {
 809                pr_devel("HMI vec emu: bad access %i:%s[%d] nip=%016lx"
 810                         " instr=%08x addr=%016lx\n",
 811                         smp_processor_id(), current->comm, current->pid,
 812                         regs->nip, instr, (unsigned long)addr);
 813                return;
 814        }
 815
 816        /* Read the vector */
 817        rc = 0;
 818        if ((unsigned long)addr & 0xfUL)
 819                /* unaligned case */
 820                rc = __copy_from_user_inatomic(vbuf, addr, 16);
 821        else
 822                __get_user_atomic_128_aligned(vbuf, addr, rc);
 823        if (rc) {
 824                pr_devel("HMI vec emu: page fault %i:%s[%d] nip=%016lx"
 825                         " instr=%08x addr=%016lx\n",
 826                         smp_processor_id(), current->comm, current->pid,
 827                         regs->nip, instr, (unsigned long)addr);
 828                return;
 829        }
 830
 831        pr_devel("HMI vec emu: emulated vector CI %i:%s[%d] nip=%016lx"
 832                 " instr=%08x addr=%016lx\n",
 833                 smp_processor_id(), current->comm, current->pid, regs->nip,
 834                 instr, (unsigned long) addr);
 835
 836        /* Grab instruction "selector" */
 837        sel = (instr >> 6) & 3;
 838
 839        /*
 840         * Check to make sure the facility is actually enabled. This
 841         * could happen if we get a false positive hit.
 842         *
 843         * lxvd2x/lxvw4x always check MSR VSX sel = 0,2
 844         * lxvh8x/lxvb16x check MSR VSX or VEC depending on VSR used sel = 1,3
 845         */
 846        msr_mask = MSR_VSX;
 847        if ((sel & 1) && (instr & 1)) /* lxvh8x & lxvb16x + VSR >= 32 */
 848                msr_mask = MSR_VEC;
 849        if (!(msr & msr_mask)) {
 850                pr_devel("HMI vec emu: MSR fac clear %i:%s[%d] nip=%016lx"
 851                         " instr=%08x msr:%016lx\n",
 852                         smp_processor_id(), current->comm, current->pid,
 853                         regs->nip, instr, msr);
 854                return;
 855        }
 856
 857        /* Do logging here before we modify sel based on endian */
 858        switch (sel) {
 859        case 0: /* lxvw4x */
 860                PPC_WARN_EMULATED(lxvw4x, regs);
 861                break;
 862        case 1: /* lxvh8x */
 863                PPC_WARN_EMULATED(lxvh8x, regs);
 864                break;
 865        case 2: /* lxvd2x */
 866                PPC_WARN_EMULATED(lxvd2x, regs);
 867                break;
 868        case 3: /* lxvb16x */
 869                PPC_WARN_EMULATED(lxvb16x, regs);
 870                break;
 871        }
 872
 873#ifdef __LITTLE_ENDIAN__
 874        /*
 875         * An LE kernel stores the vector in the task struct as an LE
 876         * byte array (effectively swapping both the components and
 877         * the content of the components). Those instructions expect
 878         * the components to remain in ascending address order, so we
 879         * swap them back.
 880         *
 881         * If we are running a BE user space, the expectation is that
 882         * of a simple memcpy, so forcing the emulation to look like
 883         * a lxvb16x should do the trick.
 884         */
 885        if (swap)
 886                sel = 3;
 887
 888        switch (sel) {
 889        case 0: /* lxvw4x */
 890                for (i = 0; i < 4; i++)
 891                        ((u32 *)vdst)[i] = ((u32 *)vbuf)[3-i];
 892                break;
 893        case 1: /* lxvh8x */
 894                for (i = 0; i < 8; i++)
 895                        ((u16 *)vdst)[i] = ((u16 *)vbuf)[7-i];
 896                break;
 897        case 2: /* lxvd2x */
 898                for (i = 0; i < 2; i++)
 899                        ((u64 *)vdst)[i] = ((u64 *)vbuf)[1-i];
 900                break;
 901        case 3: /* lxvb16x */
 902                for (i = 0; i < 16; i++)
 903                        vdst[i] = vbuf[15-i];
 904                break;
 905        }
 906#else /* __LITTLE_ENDIAN__ */
 907        /* On a big endian kernel, a BE userspace only needs a memcpy */
 908        if (!swap)
 909                sel = 3;
 910
 911        /* Otherwise, we need to swap the content of the components */
 912        switch (sel) {
 913        case 0: /* lxvw4x */
 914                for (i = 0; i < 4; i++)
 915                        ((u32 *)vdst)[i] = cpu_to_le32(((u32 *)vbuf)[i]);
 916                break;
 917        case 1: /* lxvh8x */
 918                for (i = 0; i < 8; i++)
 919                        ((u16 *)vdst)[i] = cpu_to_le16(((u16 *)vbuf)[i]);
 920                break;
 921        case 2: /* lxvd2x */
 922                for (i = 0; i < 2; i++)
 923                        ((u64 *)vdst)[i] = cpu_to_le64(((u64 *)vbuf)[i]);
 924                break;
 925        case 3: /* lxvb16x */
 926                memcpy(vdst, vbuf, 16);
 927                break;
 928        }
 929#endif /* !__LITTLE_ENDIAN__ */
 930
 931        /* Go to next instruction */
 932        regs->nip += 4;
 933}
 934#endif /* CONFIG_VSX */
 935
 936void handle_hmi_exception(struct pt_regs *regs)
 937{
 938        struct pt_regs *old_regs;
 939
 940        old_regs = set_irq_regs(regs);
 941        irq_enter();
 942
 943#ifdef CONFIG_VSX
 944        /* Real mode flagged P9 special emu is needed */
 945        if (local_paca->hmi_p9_special_emu) {
 946                local_paca->hmi_p9_special_emu = 0;
 947
 948                /*
 949                 * We don't want to take page faults while doing the
 950                 * emulation, we just replay the instruction if necessary.
 951                 */
 952                pagefault_disable();
 953                p9_hmi_special_emu(regs);
 954                pagefault_enable();
 955        }
 956#endif /* CONFIG_VSX */
 957
 958        if (ppc_md.handle_hmi_exception)
 959                ppc_md.handle_hmi_exception(regs);
 960
 961        irq_exit();
 962        set_irq_regs(old_regs);
 963}
 964
 965void unknown_exception(struct pt_regs *regs)
 966{
 967        enum ctx_state prev_state = exception_enter();
 968
 969        printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
 970               regs->nip, regs->msr, regs->trap);
 971
 972        _exception(SIGTRAP, regs, TRAP_UNK, 0);
 973
 974        exception_exit(prev_state);
 975}
 976
 977void instruction_breakpoint_exception(struct pt_regs *regs)
 978{
 979        enum ctx_state prev_state = exception_enter();
 980
 981        if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
 982                                        5, SIGTRAP) == NOTIFY_STOP)
 983                goto bail;
 984        if (debugger_iabr_match(regs))
 985                goto bail;
 986        _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
 987
 988bail:
 989        exception_exit(prev_state);
 990}
 991
 992void RunModeException(struct pt_regs *regs)
 993{
 994        _exception(SIGTRAP, regs, TRAP_UNK, 0);
 995}
 996
 997void single_step_exception(struct pt_regs *regs)
 998{
 999        enum ctx_state prev_state = exception_enter();
1000
1001        clear_single_step(regs);
1002        clear_br_trace(regs);
1003
1004        if (kprobe_post_handler(regs))
1005                return;
1006
1007        if (notify_die(DIE_SSTEP, "single_step", regs, 5,
1008                                        5, SIGTRAP) == NOTIFY_STOP)
1009                goto bail;
1010        if (debugger_sstep(regs))
1011                goto bail;
1012
1013        _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
1014
1015bail:
1016        exception_exit(prev_state);
1017}
1018NOKPROBE_SYMBOL(single_step_exception);
1019
1020/*
1021 * After we have successfully emulated an instruction, we have to
1022 * check if the instruction was being single-stepped, and if so,
1023 * pretend we got a single-step exception.  This was pointed out
1024 * by Kumar Gala.  -- paulus
1025 */
1026static void emulate_single_step(struct pt_regs *regs)
1027{
1028        if (single_stepping(regs))
1029                single_step_exception(regs);
1030}
1031
1032static inline int __parse_fpscr(unsigned long fpscr)
1033{
1034        int ret = FPE_FLTUNK;
1035
1036        /* Invalid operation */
1037        if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
1038                ret = FPE_FLTINV;
1039
1040        /* Overflow */
1041        else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
1042                ret = FPE_FLTOVF;
1043
1044        /* Underflow */
1045        else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
1046                ret = FPE_FLTUND;
1047
1048        /* Divide by zero */
1049        else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
1050                ret = FPE_FLTDIV;
1051
1052        /* Inexact result */
1053        else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
1054                ret = FPE_FLTRES;
1055
1056        return ret;
1057}
1058
1059static void parse_fpe(struct pt_regs *regs)
1060{
1061        int code = 0;
1062
1063        flush_fp_to_thread(current);
1064
1065        code = __parse_fpscr(current->thread.fp_state.fpscr);
1066
1067        _exception(SIGFPE, regs, code, regs->nip);
1068}
1069
1070/*
1071 * Illegal instruction emulation support.  Originally written to
1072 * provide the PVR to user applications using the mfspr rd, PVR.
1073 * Return non-zero if we can't emulate, or -EFAULT if the associated
1074 * memory access caused an access fault.  Return zero on success.
1075 *
1076 * There are a couple of ways to do this, either "decode" the instruction
1077 * or directly match lots of bits.  In this case, matching lots of
1078 * bits is faster and easier.
1079 *
1080 */
1081static int emulate_string_inst(struct pt_regs *regs, u32 instword)
1082{
1083        u8 rT = (instword >> 21) & 0x1f;
1084        u8 rA = (instword >> 16) & 0x1f;
1085        u8 NB_RB = (instword >> 11) & 0x1f;
1086        u32 num_bytes;
1087        unsigned long EA;
1088        int pos = 0;
1089
1090        /* Early out if we are an invalid form of lswx */
1091        if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX)
1092                if ((rT == rA) || (rT == NB_RB))
1093                        return -EINVAL;
1094
1095        EA = (rA == 0) ? 0 : regs->gpr[rA];
1096
1097        switch (instword & PPC_INST_STRING_MASK) {
1098                case PPC_INST_LSWX:
1099                case PPC_INST_STSWX:
1100                        EA += NB_RB;
1101                        num_bytes = regs->xer & 0x7f;
1102                        break;
1103                case PPC_INST_LSWI:
1104                case PPC_INST_STSWI:
1105                        num_bytes = (NB_RB == 0) ? 32 : NB_RB;
1106                        break;
1107                default:
1108                        return -EINVAL;
1109        }
1110
1111        while (num_bytes != 0)
1112        {
1113                u8 val;
1114                u32 shift = 8 * (3 - (pos & 0x3));
1115
1116                /* if process is 32-bit, clear upper 32 bits of EA */
1117                if ((regs->msr & MSR_64BIT) == 0)
1118                        EA &= 0xFFFFFFFF;
1119
1120                switch ((instword & PPC_INST_STRING_MASK)) {
1121                        case PPC_INST_LSWX:
1122                        case PPC_INST_LSWI:
1123                                if (get_user(val, (u8 __user *)EA))
1124                                        return -EFAULT;
1125                                /* first time updating this reg,
1126                                 * zero it out */
1127                                if (pos == 0)
1128                                        regs->gpr[rT] = 0;
1129                                regs->gpr[rT] |= val << shift;
1130                                break;
1131                        case PPC_INST_STSWI:
1132                        case PPC_INST_STSWX:
1133                                val = regs->gpr[rT] >> shift;
1134                                if (put_user(val, (u8 __user *)EA))
1135                                        return -EFAULT;
1136                                break;
1137                }
1138                /* move EA to next address */
1139                EA += 1;
1140                num_bytes--;
1141
1142                /* manage our position within the register */
1143                if (++pos == 4) {
1144                        pos = 0;
1145                        if (++rT == 32)
1146                                rT = 0;
1147                }
1148        }
1149
1150        return 0;
1151}
1152
1153static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword)
1154{
1155        u32 ra,rs;
1156        unsigned long tmp;
1157
1158        ra = (instword >> 16) & 0x1f;
1159        rs = (instword >> 21) & 0x1f;
1160
1161        tmp = regs->gpr[rs];
1162        tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL);
1163        tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL);
1164        tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
1165        regs->gpr[ra] = tmp;
1166
1167        return 0;
1168}
1169
1170static int emulate_isel(struct pt_regs *regs, u32 instword)
1171{
1172        u8 rT = (instword >> 21) & 0x1f;
1173        u8 rA = (instword >> 16) & 0x1f;
1174        u8 rB = (instword >> 11) & 0x1f;
1175        u8 BC = (instword >> 6) & 0x1f;
1176        u8 bit;
1177        unsigned long tmp;
1178
1179        tmp = (rA == 0) ? 0 : regs->gpr[rA];
1180        bit = (regs->ccr >> (31 - BC)) & 0x1;
1181
1182        regs->gpr[rT] = bit ? tmp : regs->gpr[rB];
1183
1184        return 0;
1185}
1186
1187#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1188static inline bool tm_abort_check(struct pt_regs *regs, int cause)
1189{
1190        /* If we're emulating a load/store in an active transaction, we cannot
1191         * emulate it as the kernel operates in transaction suspended context.
1192         * We need to abort the transaction.  This creates a persistent TM
1193         * abort so tell the user what caused it with a new code.
1194         */
1195        if (MSR_TM_TRANSACTIONAL(regs->msr)) {
1196                tm_enable();
1197                tm_abort(cause);
1198                return true;
1199        }
1200        return false;
1201}
1202#else
1203static inline bool tm_abort_check(struct pt_regs *regs, int reason)
1204{
1205        return false;
1206}
1207#endif
1208
1209static int emulate_instruction(struct pt_regs *regs)
1210{
1211        u32 instword;
1212        u32 rd;
1213
1214        if (!user_mode(regs))
1215                return -EINVAL;
1216        CHECK_FULL_REGS(regs);
1217
1218        if (get_user(instword, (u32 __user *)(regs->nip)))
1219                return -EFAULT;
1220
1221        /* Emulate the mfspr rD, PVR. */
1222        if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) {
1223                PPC_WARN_EMULATED(mfpvr, regs);
1224                rd = (instword >> 21) & 0x1f;
1225                regs->gpr[rd] = mfspr(SPRN_PVR);
1226                return 0;
1227        }
1228
1229        /* Emulating the dcba insn is just a no-op.  */
1230        if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) {
1231                PPC_WARN_EMULATED(dcba, regs);
1232                return 0;
1233        }
1234
1235        /* Emulate the mcrxr insn.  */
1236        if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) {
1237                int shift = (instword >> 21) & 0x1c;
1238                unsigned long msk = 0xf0000000UL >> shift;
1239
1240                PPC_WARN_EMULATED(mcrxr, regs);
1241                regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
1242                regs->xer &= ~0xf0000000UL;
1243                return 0;
1244        }
1245
1246        /* Emulate load/store string insn. */
1247        if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
1248                if (tm_abort_check(regs,
1249                                   TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
1250                        return -EINVAL;
1251                PPC_WARN_EMULATED(string, regs);
1252                return emulate_string_inst(regs, instword);
1253        }
1254
1255        /* Emulate the popcntb (Population Count Bytes) instruction. */
1256        if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) {
1257                PPC_WARN_EMULATED(popcntb, regs);
1258                return emulate_popcntb_inst(regs, instword);
1259        }
1260
1261        /* Emulate isel (Integer Select) instruction */
1262        if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) {
1263                PPC_WARN_EMULATED(isel, regs);
1264                return emulate_isel(regs, instword);
1265        }
1266
1267        /* Emulate sync instruction variants */
1268        if ((instword & PPC_INST_SYNC_MASK) == PPC_INST_SYNC) {
1269                PPC_WARN_EMULATED(sync, regs);
1270                asm volatile("sync");
1271                return 0;
1272        }
1273
1274#ifdef CONFIG_PPC64
1275        /* Emulate the mfspr rD, DSCR. */
1276        if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) ==
1277                PPC_INST_MFSPR_DSCR_USER) ||
1278             ((instword & PPC_INST_MFSPR_DSCR_MASK) ==
1279                PPC_INST_MFSPR_DSCR)) &&
1280                        cpu_has_feature(CPU_FTR_DSCR)) {
1281                PPC_WARN_EMULATED(mfdscr, regs);
1282                rd = (instword >> 21) & 0x1f;
1283                regs->gpr[rd] = mfspr(SPRN_DSCR);
1284                return 0;
1285        }
1286        /* Emulate the mtspr DSCR, rD. */
1287        if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) ==
1288                PPC_INST_MTSPR_DSCR_USER) ||
1289             ((instword & PPC_INST_MTSPR_DSCR_MASK) ==
1290                PPC_INST_MTSPR_DSCR)) &&
1291                        cpu_has_feature(CPU_FTR_DSCR)) {
1292                PPC_WARN_EMULATED(mtdscr, regs);
1293                rd = (instword >> 21) & 0x1f;
1294                current->thread.dscr = regs->gpr[rd];
1295                current->thread.dscr_inherit = 1;
1296                mtspr(SPRN_DSCR, current->thread.dscr);
1297                return 0;
1298        }
1299#endif
1300
1301        return -EINVAL;
1302}
1303
1304int is_valid_bugaddr(unsigned long addr)
1305{
1306        return is_kernel_addr(addr);
1307}
1308
1309#ifdef CONFIG_MATH_EMULATION
1310static int emulate_math(struct pt_regs *regs)
1311{
1312        int ret;
1313        extern int do_mathemu(struct pt_regs *regs);
1314
1315        ret = do_mathemu(regs);
1316        if (ret >= 0)
1317                PPC_WARN_EMULATED(math, regs);
1318
1319        switch (ret) {
1320        case 0:
1321                emulate_single_step(regs);
1322                return 0;
1323        case 1: {
1324                        int code = 0;
1325                        code = __parse_fpscr(current->thread.fp_state.fpscr);
1326                        _exception(SIGFPE, regs, code, regs->nip);
1327                        return 0;
1328                }
1329        case -EFAULT:
1330                _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1331                return 0;
1332        }
1333
1334        return -1;
1335}
1336#else
1337static inline int emulate_math(struct pt_regs *regs) { return -1; }
1338#endif
1339
1340void program_check_exception(struct pt_regs *regs)
1341{
1342        enum ctx_state prev_state = exception_enter();
1343        unsigned int reason = get_reason(regs);
1344
1345        /* We can now get here via a FP Unavailable exception if the core
1346         * has no FPU, in that case the reason flags will be 0 */
1347
1348        if (reason & REASON_FP) {
1349                /* IEEE FP exception */
1350                parse_fpe(regs);
1351                goto bail;
1352        }
1353        if (reason & REASON_TRAP) {
1354                unsigned long bugaddr;
1355                /* Debugger is first in line to stop recursive faults in
1356                 * rcu_lock, notify_die, or atomic_notifier_call_chain */
1357                if (debugger_bpt(regs))
1358                        goto bail;
1359
1360                if (kprobe_handler(regs))
1361                        goto bail;
1362
1363                /* trap exception */
1364                if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
1365                                == NOTIFY_STOP)
1366                        goto bail;
1367
1368                bugaddr = regs->nip;
1369                /*
1370                 * Fixup bugaddr for BUG_ON() in real mode
1371                 */
1372                if (!is_kernel_addr(bugaddr) && !(regs->msr & MSR_IR))
1373                        bugaddr += PAGE_OFFSET;
1374
1375                if (!(regs->msr & MSR_PR) &&  /* not user-mode */
1376                    report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) {
1377                        regs->nip += 4;
1378                        goto bail;
1379                }
1380                _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
1381                goto bail;
1382        }
1383#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1384        if (reason & REASON_TM) {
1385                /* This is a TM "Bad Thing Exception" program check.
1386                 * This occurs when:
1387                 * -  An rfid/hrfid/mtmsrd attempts to cause an illegal
1388                 *    transition in TM states.
1389                 * -  A trechkpt is attempted when transactional.
1390                 * -  A treclaim is attempted when non transactional.
1391                 * -  A tend is illegally attempted.
1392                 * -  writing a TM SPR when transactional.
1393                 *
1394                 * If usermode caused this, it's done something illegal and
1395                 * gets a SIGILL slap on the wrist.  We call it an illegal
1396                 * operand to distinguish from the instruction just being bad
1397                 * (e.g. executing a 'tend' on a CPU without TM!); it's an
1398                 * illegal /placement/ of a valid instruction.
1399                 */
1400                if (user_mode(regs)) {
1401                        _exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
1402                        goto bail;
1403                } else {
1404                        printk(KERN_EMERG "Unexpected TM Bad Thing exception "
1405                               "at %lx (msr 0x%x)\n", regs->nip, reason);
1406                        die("Unrecoverable exception", regs, SIGABRT);
1407                }
1408        }
1409#endif
1410
1411        /*
1412         * If we took the program check in the kernel skip down to sending a
1413         * SIGILL. The subsequent cases all relate to emulating instructions
1414         * which we should only do for userspace. We also do not want to enable
1415         * interrupts for kernel faults because that might lead to further
1416         * faults, and loose the context of the original exception.
1417         */
1418        if (!user_mode(regs))
1419                goto sigill;
1420
1421        /* We restore the interrupt state now */
1422        if (!arch_irq_disabled_regs(regs))
1423                local_irq_enable();
1424
1425        /* (reason & REASON_ILLEGAL) would be the obvious thing here,
1426         * but there seems to be a hardware bug on the 405GP (RevD)
1427         * that means ESR is sometimes set incorrectly - either to
1428         * ESR_DST (!?) or 0.  In the process of chasing this with the
1429         * hardware people - not sure if it can happen on any illegal
1430         * instruction or only on FP instructions, whether there is a
1431         * pattern to occurrences etc. -dgibson 31/Mar/2003
1432         */
1433        if (!emulate_math(regs))
1434                goto bail;
1435
1436        /* Try to emulate it if we should. */
1437        if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
1438                switch (emulate_instruction(regs)) {
1439                case 0:
1440                        regs->nip += 4;
1441                        emulate_single_step(regs);
1442                        goto bail;
1443                case -EFAULT:
1444                        _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1445                        goto bail;
1446                }
1447        }
1448
1449sigill:
1450        if (reason & REASON_PRIVILEGED)
1451                _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1452        else
1453                _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1454
1455bail:
1456        exception_exit(prev_state);
1457}
1458NOKPROBE_SYMBOL(program_check_exception);
1459
1460/*
1461 * This occurs when running in hypervisor mode on POWER6 or later
1462 * and an illegal instruction is encountered.
1463 */
1464void emulation_assist_interrupt(struct pt_regs *regs)
1465{
1466        regs->msr |= REASON_ILLEGAL;
1467        program_check_exception(regs);
1468}
1469NOKPROBE_SYMBOL(emulation_assist_interrupt);
1470
1471void alignment_exception(struct pt_regs *regs)
1472{
1473        enum ctx_state prev_state = exception_enter();
1474        int sig, code, fixed = 0;
1475
1476        /* We restore the interrupt state now */
1477        if (!arch_irq_disabled_regs(regs))
1478                local_irq_enable();
1479
1480        if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
1481                goto bail;
1482
1483        /* we don't implement logging of alignment exceptions */
1484        if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
1485                fixed = fix_alignment(regs);
1486
1487        if (fixed == 1) {
1488                regs->nip += 4; /* skip over emulated instruction */
1489                emulate_single_step(regs);
1490                goto bail;
1491        }
1492
1493        /* Operand address was bad */
1494        if (fixed == -EFAULT) {
1495                sig = SIGSEGV;
1496                code = SEGV_ACCERR;
1497        } else {
1498                sig = SIGBUS;
1499                code = BUS_ADRALN;
1500        }
1501        if (user_mode(regs))
1502                _exception(sig, regs, code, regs->dar);
1503        else
1504                bad_page_fault(regs, regs->dar, sig);
1505
1506bail:
1507        exception_exit(prev_state);
1508}
1509
1510void StackOverflow(struct pt_regs *regs)
1511{
1512        printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
1513               current, regs->gpr[1]);
1514        debugger(regs);
1515        show_regs(regs);
1516        panic("kernel stack overflow");
1517}
1518
1519void nonrecoverable_exception(struct pt_regs *regs)
1520{
1521        printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
1522               regs->nip, regs->msr);
1523        debugger(regs);
1524        die("nonrecoverable exception", regs, SIGKILL);
1525}
1526
1527void kernel_fp_unavailable_exception(struct pt_regs *regs)
1528{
1529        enum ctx_state prev_state = exception_enter();
1530
1531        printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
1532                          "%lx at %lx\n", regs->trap, regs->nip);
1533        die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
1534
1535        exception_exit(prev_state);
1536}
1537
1538void altivec_unavailable_exception(struct pt_regs *regs)
1539{
1540        enum ctx_state prev_state = exception_enter();
1541
1542        if (user_mode(regs)) {
1543                /* A user program has executed an altivec instruction,
1544                   but this kernel doesn't support altivec. */
1545                _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1546                goto bail;
1547        }
1548
1549        printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
1550                        "%lx at %lx\n", regs->trap, regs->nip);
1551        die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
1552
1553bail:
1554        exception_exit(prev_state);
1555}
1556
1557void vsx_unavailable_exception(struct pt_regs *regs)
1558{
1559        if (user_mode(regs)) {
1560                /* A user program has executed an vsx instruction,
1561                   but this kernel doesn't support vsx. */
1562                _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1563                return;
1564        }
1565
1566        printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception "
1567                        "%lx at %lx\n", regs->trap, regs->nip);
1568        die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
1569}
1570
1571#ifdef CONFIG_PPC64
1572static void tm_unavailable(struct pt_regs *regs)
1573{
1574#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1575        if (user_mode(regs)) {
1576                current->thread.load_tm++;
1577                regs->msr |= MSR_TM;
1578                tm_enable();
1579                tm_restore_sprs(&current->thread);
1580                return;
1581        }
1582#endif
1583        pr_emerg("Unrecoverable TM Unavailable Exception "
1584                        "%lx at %lx\n", regs->trap, regs->nip);
1585        die("Unrecoverable TM Unavailable Exception", regs, SIGABRT);
1586}
1587
1588void facility_unavailable_exception(struct pt_regs *regs)
1589{
1590        static char *facility_strings[] = {
1591                [FSCR_FP_LG] = "FPU",
1592                [FSCR_VECVSX_LG] = "VMX/VSX",
1593                [FSCR_DSCR_LG] = "DSCR",
1594                [FSCR_PM_LG] = "PMU SPRs",
1595                [FSCR_BHRB_LG] = "BHRB",
1596                [FSCR_TM_LG] = "TM",
1597                [FSCR_EBB_LG] = "EBB",
1598                [FSCR_TAR_LG] = "TAR",
1599                [FSCR_MSGP_LG] = "MSGP",
1600                [FSCR_SCV_LG] = "SCV",
1601        };
1602        char *facility = "unknown";
1603        u64 value;
1604        u32 instword, rd;
1605        u8 status;
1606        bool hv;
1607
1608        hv = (TRAP(regs) == 0xf80);
1609        if (hv)
1610                value = mfspr(SPRN_HFSCR);
1611        else
1612                value = mfspr(SPRN_FSCR);
1613
1614        status = value >> 56;
1615        if ((hv || status >= 2) &&
1616            (status < ARRAY_SIZE(facility_strings)) &&
1617            facility_strings[status])
1618                facility = facility_strings[status];
1619
1620        /* We should not have taken this interrupt in kernel */
1621        if (!user_mode(regs)) {
1622                pr_emerg("Facility '%s' unavailable (%d) exception in kernel mode at %lx\n",
1623                         facility, status, regs->nip);
1624                die("Unexpected facility unavailable exception", regs, SIGABRT);
1625        }
1626
1627        /* We restore the interrupt state now */
1628        if (!arch_irq_disabled_regs(regs))
1629                local_irq_enable();
1630
1631        if (status == FSCR_DSCR_LG) {
1632                /*
1633                 * User is accessing the DSCR register using the problem
1634                 * state only SPR number (0x03) either through a mfspr or
1635                 * a mtspr instruction. If it is a write attempt through
1636                 * a mtspr, then we set the inherit bit. This also allows
1637                 * the user to write or read the register directly in the
1638                 * future by setting via the FSCR DSCR bit. But in case it
1639                 * is a read DSCR attempt through a mfspr instruction, we
1640                 * just emulate the instruction instead. This code path will
1641                 * always emulate all the mfspr instructions till the user
1642                 * has attempted at least one mtspr instruction. This way it
1643                 * preserves the same behaviour when the user is accessing
1644                 * the DSCR through privilege level only SPR number (0x11)
1645                 * which is emulated through illegal instruction exception.
1646                 * We always leave HFSCR DSCR set.
1647                 */
1648                if (get_user(instword, (u32 __user *)(regs->nip))) {
1649                        pr_err("Failed to fetch the user instruction\n");
1650                        return;
1651                }
1652
1653                /* Write into DSCR (mtspr 0x03, RS) */
1654                if ((instword & PPC_INST_MTSPR_DSCR_USER_MASK)
1655                                == PPC_INST_MTSPR_DSCR_USER) {
1656                        rd = (instword >> 21) & 0x1f;
1657                        current->thread.dscr = regs->gpr[rd];
1658                        current->thread.dscr_inherit = 1;
1659                        current->thread.fscr |= FSCR_DSCR;
1660                        mtspr(SPRN_FSCR, current->thread.fscr);
1661                }
1662
1663                /* Read from DSCR (mfspr RT, 0x03) */
1664                if ((instword & PPC_INST_MFSPR_DSCR_USER_MASK)
1665                                == PPC_INST_MFSPR_DSCR_USER) {
1666                        if (emulate_instruction(regs)) {
1667                                pr_err("DSCR based mfspr emulation failed\n");
1668                                return;
1669                        }
1670                        regs->nip += 4;
1671                        emulate_single_step(regs);
1672                }
1673                return;
1674        }
1675
1676        if (status == FSCR_TM_LG) {
1677                /*
1678                 * If we're here then the hardware is TM aware because it
1679                 * generated an exception with FSRM_TM set.
1680                 *
1681                 * If cpu_has_feature(CPU_FTR_TM) is false, then either firmware
1682                 * told us not to do TM, or the kernel is not built with TM
1683                 * support.
1684                 *
1685                 * If both of those things are true, then userspace can spam the
1686                 * console by triggering the printk() below just by continually
1687                 * doing tbegin (or any TM instruction). So in that case just
1688                 * send the process a SIGILL immediately.
1689                 */
1690                if (!cpu_has_feature(CPU_FTR_TM))
1691                        goto out;
1692
1693                tm_unavailable(regs);
1694                return;
1695        }
1696
1697        pr_err_ratelimited("%sFacility '%s' unavailable (%d), exception at 0x%lx, MSR=%lx\n",
1698                hv ? "Hypervisor " : "", facility, status, regs->nip, regs->msr);
1699
1700out:
1701        _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1702}
1703#endif
1704
1705#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1706
1707void fp_unavailable_tm(struct pt_regs *regs)
1708{
1709        /* Note:  This does not handle any kind of FP laziness. */
1710
1711        TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n",
1712                 regs->nip, regs->msr);
1713
1714        /* We can only have got here if the task started using FP after
1715         * beginning the transaction.  So, the transactional regs are just a
1716         * copy of the checkpointed ones.  But, we still need to recheckpoint
1717         * as we're enabling FP for the process; it will return, abort the
1718         * transaction, and probably retry but now with FP enabled.  So the
1719         * checkpointed FP registers need to be loaded.
1720         */
1721        tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1722        /* Reclaim didn't save out any FPRs to transact_fprs. */
1723
1724        /* Enable FP for the task: */
1725        current->thread.load_fp = 1;
1726
1727        /* This loads and recheckpoints the FP registers from
1728         * thread.fpr[].  They will remain in registers after the
1729         * checkpoint so we don't need to reload them after.
1730         * If VMX is in use, the VRs now hold checkpointed values,
1731         * so we don't want to load the VRs from the thread_struct.
1732         */
1733        tm_recheckpoint(&current->thread);
1734}
1735
1736void altivec_unavailable_tm(struct pt_regs *regs)
1737{
1738        /* See the comments in fp_unavailable_tm().  This function operates
1739         * the same way.
1740         */
1741
1742        TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx,"
1743                 "MSR=%lx\n",
1744                 regs->nip, regs->msr);
1745        tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1746        current->thread.load_vec = 1;
1747        tm_recheckpoint(&current->thread);
1748        current->thread.used_vr = 1;
1749}
1750
1751void vsx_unavailable_tm(struct pt_regs *regs)
1752{
1753        /* See the comments in fp_unavailable_tm().  This works similarly,
1754         * though we're loading both FP and VEC registers in here.
1755         *
1756         * If FP isn't in use, load FP regs.  If VEC isn't in use, load VEC
1757         * regs.  Either way, set MSR_VSX.
1758         */
1759
1760        TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx,"
1761                 "MSR=%lx\n",
1762                 regs->nip, regs->msr);
1763
1764        current->thread.used_vsr = 1;
1765
1766        /* This reclaims FP and/or VR regs if they're already enabled */
1767        tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1768
1769        current->thread.load_vec = 1;
1770        current->thread.load_fp = 1;
1771
1772        tm_recheckpoint(&current->thread);
1773}
1774#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1775
1776void performance_monitor_exception(struct pt_regs *regs)
1777{
1778        __this_cpu_inc(irq_stat.pmu_irqs);
1779
1780        perf_irq(regs);
1781}
1782
1783#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1784static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
1785{
1786        int changed = 0;
1787        /*
1788         * Determine the cause of the debug event, clear the
1789         * event flags and send a trap to the handler. Torez
1790         */
1791        if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
1792                dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W);
1793#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1794                current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
1795#endif
1796                do_send_trap(regs, mfspr(SPRN_DAC1), debug_status,
1797                             5);
1798                changed |= 0x01;
1799        }  else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) {
1800                dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W);
1801                do_send_trap(regs, mfspr(SPRN_DAC2), debug_status,
1802                             6);
1803                changed |= 0x01;
1804        }  else if (debug_status & DBSR_IAC1) {
1805                current->thread.debug.dbcr0 &= ~DBCR0_IAC1;
1806                dbcr_iac_range(current) &= ~DBCR_IAC12MODE;
1807                do_send_trap(regs, mfspr(SPRN_IAC1), debug_status,
1808                             1);
1809                changed |= 0x01;
1810        }  else if (debug_status & DBSR_IAC2) {
1811                current->thread.debug.dbcr0 &= ~DBCR0_IAC2;
1812                do_send_trap(regs, mfspr(SPRN_IAC2), debug_status,
1813                             2);
1814                changed |= 0x01;
1815        }  else if (debug_status & DBSR_IAC3) {
1816                current->thread.debug.dbcr0 &= ~DBCR0_IAC3;
1817                dbcr_iac_range(current) &= ~DBCR_IAC34MODE;
1818                do_send_trap(regs, mfspr(SPRN_IAC3), debug_status,
1819                             3);
1820                changed |= 0x01;
1821        }  else if (debug_status & DBSR_IAC4) {
1822                current->thread.debug.dbcr0 &= ~DBCR0_IAC4;
1823                do_send_trap(regs, mfspr(SPRN_IAC4), debug_status,
1824                             4);
1825                changed |= 0x01;
1826        }
1827        /*
1828         * At the point this routine was called, the MSR(DE) was turned off.
1829         * Check all other debug flags and see if that bit needs to be turned
1830         * back on or not.
1831         */
1832        if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
1833                               current->thread.debug.dbcr1))
1834                regs->msr |= MSR_DE;
1835        else
1836                /* Make sure the IDM flag is off */
1837                current->thread.debug.dbcr0 &= ~DBCR0_IDM;
1838
1839        if (changed & 0x01)
1840                mtspr(SPRN_DBCR0, current->thread.debug.dbcr0);
1841}
1842
1843void DebugException(struct pt_regs *regs, unsigned long debug_status)
1844{
1845        current->thread.debug.dbsr = debug_status;
1846
1847        /* Hack alert: On BookE, Branch Taken stops on the branch itself, while
1848         * on server, it stops on the target of the branch. In order to simulate
1849         * the server behaviour, we thus restart right away with a single step
1850         * instead of stopping here when hitting a BT
1851         */
1852        if (debug_status & DBSR_BT) {
1853                regs->msr &= ~MSR_DE;
1854
1855                /* Disable BT */
1856                mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT);
1857                /* Clear the BT event */
1858                mtspr(SPRN_DBSR, DBSR_BT);
1859
1860                /* Do the single step trick only when coming from userspace */
1861                if (user_mode(regs)) {
1862                        current->thread.debug.dbcr0 &= ~DBCR0_BT;
1863                        current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
1864                        regs->msr |= MSR_DE;
1865                        return;
1866                }
1867
1868                if (kprobe_post_handler(regs))
1869                        return;
1870
1871                if (notify_die(DIE_SSTEP, "block_step", regs, 5,
1872                               5, SIGTRAP) == NOTIFY_STOP) {
1873                        return;
1874                }
1875                if (debugger_sstep(regs))
1876                        return;
1877        } else if (debug_status & DBSR_IC) {    /* Instruction complete */
1878                regs->msr &= ~MSR_DE;
1879
1880                /* Disable instruction completion */
1881                mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
1882                /* Clear the instruction completion event */
1883                mtspr(SPRN_DBSR, DBSR_IC);
1884
1885                if (kprobe_post_handler(regs))
1886                        return;
1887
1888                if (notify_die(DIE_SSTEP, "single_step", regs, 5,
1889                               5, SIGTRAP) == NOTIFY_STOP) {
1890                        return;
1891                }
1892
1893                if (debugger_sstep(regs))
1894                        return;
1895
1896                if (user_mode(regs)) {
1897                        current->thread.debug.dbcr0 &= ~DBCR0_IC;
1898                        if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
1899                                               current->thread.debug.dbcr1))
1900                                regs->msr |= MSR_DE;
1901                        else
1902                                /* Make sure the IDM bit is off */
1903                                current->thread.debug.dbcr0 &= ~DBCR0_IDM;
1904                }
1905
1906                _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
1907        } else
1908                handle_debug(regs, debug_status);
1909}
1910NOKPROBE_SYMBOL(DebugException);
1911#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
1912
1913#if !defined(CONFIG_TAU_INT)
1914void TAUException(struct pt_regs *regs)
1915{
1916        printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx    %s\n",
1917               regs->nip, regs->msr, regs->trap, print_tainted());
1918}
1919#endif /* CONFIG_INT_TAU */
1920
1921#ifdef CONFIG_ALTIVEC
1922void altivec_assist_exception(struct pt_regs *regs)
1923{
1924        int err;
1925
1926        if (!user_mode(regs)) {
1927                printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
1928                       " at %lx\n", regs->nip);
1929                die("Kernel VMX/Altivec assist exception", regs, SIGILL);
1930        }
1931
1932        flush_altivec_to_thread(current);
1933
1934        PPC_WARN_EMULATED(altivec, regs);
1935        err = emulate_altivec(regs);
1936        if (err == 0) {
1937                regs->nip += 4;         /* skip emulated instruction */
1938                emulate_single_step(regs);
1939                return;
1940        }
1941
1942        if (err == -EFAULT) {
1943                /* got an error reading the instruction */
1944                _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1945        } else {
1946                /* didn't recognize the instruction */
1947                /* XXX quick hack for now: set the non-Java bit in the VSCR */
1948                printk_ratelimited(KERN_ERR "Unrecognized altivec instruction "
1949                                   "in %s at %lx\n", current->comm, regs->nip);
1950                current->thread.vr_state.vscr.u[3] |= 0x10000;
1951        }
1952}
1953#endif /* CONFIG_ALTIVEC */
1954
1955#ifdef CONFIG_FSL_BOOKE
1956void CacheLockingException(struct pt_regs *regs, unsigned long address,
1957                           unsigned long error_code)
1958{
1959        /* We treat cache locking instructions from the user
1960         * as priv ops, in the future we could try to do
1961         * something smarter
1962         */
1963        if (error_code & (ESR_DLK|ESR_ILK))
1964                _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1965        return;
1966}
1967#endif /* CONFIG_FSL_BOOKE */
1968
1969#ifdef CONFIG_SPE
1970void SPEFloatingPointException(struct pt_regs *regs)
1971{
1972        extern int do_spe_mathemu(struct pt_regs *regs);
1973        unsigned long spefscr;
1974        int fpexc_mode;
1975        int code = FPE_FLTUNK;
1976        int err;
1977
1978        flush_spe_to_thread(current);
1979
1980        spefscr = current->thread.spefscr;
1981        fpexc_mode = current->thread.fpexc_mode;
1982
1983        if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
1984                code = FPE_FLTOVF;
1985        }
1986        else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
1987                code = FPE_FLTUND;
1988        }
1989        else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
1990                code = FPE_FLTDIV;
1991        else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
1992                code = FPE_FLTINV;
1993        }
1994        else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
1995                code = FPE_FLTRES;
1996
1997        err = do_spe_mathemu(regs);
1998        if (err == 0) {
1999                regs->nip += 4;         /* skip emulated instruction */
2000                emulate_single_step(regs);
2001                return;
2002        }
2003
2004        if (err == -EFAULT) {
2005                /* got an error reading the instruction */
2006                _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
2007        } else if (err == -EINVAL) {
2008                /* didn't recognize the instruction */
2009                printk(KERN_ERR "unrecognized spe instruction "
2010                       "in %s at %lx\n", current->comm, regs->nip);
2011        } else {
2012                _exception(SIGFPE, regs, code, regs->nip);
2013        }
2014
2015        return;
2016}
2017
2018void SPEFloatingPointRoundException(struct pt_regs *regs)
2019{
2020        extern int speround_handler(struct pt_regs *regs);
2021        int err;
2022
2023        preempt_disable();
2024        if (regs->msr & MSR_SPE)
2025                giveup_spe(current);
2026        preempt_enable();
2027
2028        regs->nip -= 4;
2029        err = speround_handler(regs);
2030        if (err == 0) {
2031                regs->nip += 4;         /* skip emulated instruction */
2032                emulate_single_step(regs);
2033                return;
2034        }
2035
2036        if (err == -EFAULT) {
2037                /* got an error reading the instruction */
2038                _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
2039        } else if (err == -EINVAL) {
2040                /* didn't recognize the instruction */
2041                printk(KERN_ERR "unrecognized spe instruction "
2042                       "in %s at %lx\n", current->comm, regs->nip);
2043        } else {
2044                _exception(SIGFPE, regs, FPE_FLTUNK, regs->nip);
2045                return;
2046        }
2047}
2048#endif
2049
2050/*
2051 * We enter here if we get an unrecoverable exception, that is, one
2052 * that happened at a point where the RI (recoverable interrupt) bit
2053 * in the MSR is 0.  This indicates that SRR0/1 are live, and that
2054 * we therefore lost state by taking this exception.
2055 */
2056void unrecoverable_exception(struct pt_regs *regs)
2057{
2058        printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
2059               regs->trap, regs->nip);
2060        die("Unrecoverable exception", regs, SIGABRT);
2061}
2062NOKPROBE_SYMBOL(unrecoverable_exception);
2063
2064#if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x)
2065/*
2066 * Default handler for a Watchdog exception,
2067 * spins until a reboot occurs
2068 */
2069void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
2070{
2071        /* Generic WatchdogHandler, implement your own */
2072        mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
2073        return;
2074}
2075
2076void WatchdogException(struct pt_regs *regs)
2077{
2078        printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
2079        WatchdogHandler(regs);
2080}
2081#endif
2082
2083/*
2084 * We enter here if we discover during exception entry that we are
2085 * running in supervisor mode with a userspace value in the stack pointer.
2086 */
2087void kernel_bad_stack(struct pt_regs *regs)
2088{
2089        printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
2090               regs->gpr[1], regs->nip);
2091        die("Bad kernel stack pointer", regs, SIGABRT);
2092}
2093NOKPROBE_SYMBOL(kernel_bad_stack);
2094
2095void __init trap_init(void)
2096{
2097}
2098
2099
2100#ifdef CONFIG_PPC_EMULATED_STATS
2101
2102#define WARN_EMULATED_SETUP(type)       .type = { .name = #type }
2103
2104struct ppc_emulated ppc_emulated = {
2105#ifdef CONFIG_ALTIVEC
2106        WARN_EMULATED_SETUP(altivec),
2107#endif
2108        WARN_EMULATED_SETUP(dcba),
2109        WARN_EMULATED_SETUP(dcbz),
2110        WARN_EMULATED_SETUP(fp_pair),
2111        WARN_EMULATED_SETUP(isel),
2112        WARN_EMULATED_SETUP(mcrxr),
2113        WARN_EMULATED_SETUP(mfpvr),
2114        WARN_EMULATED_SETUP(multiple),
2115        WARN_EMULATED_SETUP(popcntb),
2116        WARN_EMULATED_SETUP(spe),
2117        WARN_EMULATED_SETUP(string),
2118        WARN_EMULATED_SETUP(sync),
2119        WARN_EMULATED_SETUP(unaligned),
2120#ifdef CONFIG_MATH_EMULATION
2121        WARN_EMULATED_SETUP(math),
2122#endif
2123#ifdef CONFIG_VSX
2124        WARN_EMULATED_SETUP(vsx),
2125#endif
2126#ifdef CONFIG_PPC64
2127        WARN_EMULATED_SETUP(mfdscr),
2128        WARN_EMULATED_SETUP(mtdscr),
2129        WARN_EMULATED_SETUP(lq_stq),
2130        WARN_EMULATED_SETUP(lxvw4x),
2131        WARN_EMULATED_SETUP(lxvh8x),
2132        WARN_EMULATED_SETUP(lxvd2x),
2133        WARN_EMULATED_SETUP(lxvb16x),
2134#endif
2135};
2136
2137u32 ppc_warn_emulated;
2138
2139void ppc_warn_emulated_print(const char *type)
2140{
2141        pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm,
2142                            type);
2143}
2144
2145static int __init ppc_warn_emulated_init(void)
2146{
2147        struct dentry *dir, *d;
2148        unsigned int i;
2149        struct ppc_emulated_entry *entries = (void *)&ppc_emulated;
2150
2151        if (!powerpc_debugfs_root)
2152                return -ENODEV;
2153
2154        dir = debugfs_create_dir("emulated_instructions",
2155                                 powerpc_debugfs_root);
2156        if (!dir)
2157                return -ENOMEM;
2158
2159        d = debugfs_create_u32("do_warn", 0644, dir,
2160                               &ppc_warn_emulated);
2161        if (!d)
2162                goto fail;
2163
2164        for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) {
2165                d = debugfs_create_u32(entries[i].name, 0644, dir,
2166                                       (u32 *)&entries[i].val.counter);
2167                if (!d)
2168                        goto fail;
2169        }
2170
2171        return 0;
2172
2173fail:
2174        debugfs_remove_recursive(dir);
2175        return -ENOMEM;
2176}
2177
2178device_initcall(ppc_warn_emulated_init);
2179
2180#endif /* CONFIG_PPC_EMULATED_STATS */
2181