linux/arch/powerpc/kernel/traps.c
<<
>>
Prefs
   1/*
   2 *  Copyright (C) 1995-1996  Gary Thomas (gdt@linuxppc.org)
   3 *  Copyright 2007-2010 Freescale Semiconductor, Inc.
   4 *
   5 *  This program is free software; you can redistribute it and/or
   6 *  modify it under the terms of the GNU General Public License
   7 *  as published by the Free Software Foundation; either version
   8 *  2 of the License, or (at your option) any later version.
   9 *
  10 *  Modified by Cort Dougan (cort@cs.nmt.edu)
  11 *  and Paul Mackerras (paulus@samba.org)
  12 */
  13
  14/*
  15 * This file handles the architecture-dependent parts of hardware exceptions
  16 */
  17
  18#include <linux/errno.h>
  19#include <linux/sched.h>
  20#include <linux/kernel.h>
  21#include <linux/mm.h>
  22#include <linux/stddef.h>
  23#include <linux/unistd.h>
  24#include <linux/ptrace.h>
  25#include <linux/user.h>
  26#include <linux/interrupt.h>
  27#include <linux/init.h>
  28#include <linux/module.h>
  29#include <linux/prctl.h>
  30#include <linux/delay.h>
  31#include <linux/kprobes.h>
  32#include <linux/kexec.h>
  33#include <linux/backlight.h>
  34#include <linux/bug.h>
  35#include <linux/kdebug.h>
  36#include <linux/debugfs.h>
  37#include <linux/ratelimit.h>
  38#include <linux/context_tracking.h>
  39#include <linux/kmsg_dump.h>
  40
  41#include <asm/emulated_ops.h>
  42#include <asm/pgtable.h>
  43#include <asm/uaccess.h>
  44#include <asm/io.h>
  45#include <asm/machdep.h>
  46#include <asm/rtas.h>
  47#include <asm/pmc.h>
  48#include <asm/reg.h>
  49#ifdef CONFIG_PMAC_BACKLIGHT
  50#include <asm/backlight.h>
  51#endif
  52#ifdef CONFIG_PPC64
  53#include <asm/firmware.h>
  54#include <asm/processor.h>
  55#include <asm/tm.h>
  56#endif
  57#include <asm/kexec.h>
  58#include <asm/ppc-opcode.h>
  59#include <asm/rio.h>
  60#include <asm/fadump.h>
  61#include <asm/switch_to.h>
  62#include <asm/tm.h>
  63#include <asm/debug.h>
  64#include <asm/asm-prototypes.h>
  65#include <asm/hmi.h>
  66
  67#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
  68int (*__debugger)(struct pt_regs *regs) __read_mostly;
  69int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly;
  70int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly;
  71int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly;
  72int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly;
  73int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly;
  74int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly;
  75
  76EXPORT_SYMBOL(__debugger);
  77EXPORT_SYMBOL(__debugger_ipi);
  78EXPORT_SYMBOL(__debugger_bpt);
  79EXPORT_SYMBOL(__debugger_sstep);
  80EXPORT_SYMBOL(__debugger_iabr_match);
  81EXPORT_SYMBOL(__debugger_break_match);
  82EXPORT_SYMBOL(__debugger_fault_handler);
  83#endif
  84
  85/* Transactional Memory trap debug */
  86#ifdef TM_DEBUG_SW
  87#define TM_DEBUG(x...) printk(KERN_INFO x)
  88#else
  89#define TM_DEBUG(x...) do { } while(0)
  90#endif
  91
  92/*
  93 * Trap & Exception support
  94 */
  95
  96#ifdef CONFIG_PMAC_BACKLIGHT
  97static void pmac_backlight_unblank(void)
  98{
  99        mutex_lock(&pmac_backlight_mutex);
 100        if (pmac_backlight) {
 101                struct backlight_properties *props;
 102
 103                props = &pmac_backlight->props;
 104                props->brightness = props->max_brightness;
 105                props->power = FB_BLANK_UNBLANK;
 106                backlight_update_status(pmac_backlight);
 107        }
 108        mutex_unlock(&pmac_backlight_mutex);
 109}
 110#else
 111static inline void pmac_backlight_unblank(void) { }
 112#endif
 113
 114static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
 115static int die_owner = -1;
 116static unsigned int die_nest_count;
 117static int die_counter;
 118
 119static unsigned __kprobes long oops_begin(struct pt_regs *regs)
 120{
 121        int cpu;
 122        unsigned long flags;
 123
 124        if (debugger(regs))
 125                return 1;
 126
 127        oops_enter();
 128
 129        /* racy, but better than risking deadlock. */
 130        raw_local_irq_save(flags);
 131        cpu = smp_processor_id();
 132        if (!arch_spin_trylock(&die_lock)) {
 133                if (cpu == die_owner)
 134                        /* nested oops. should stop eventually */;
 135                else
 136                        arch_spin_lock(&die_lock);
 137        }
 138        die_nest_count++;
 139        die_owner = cpu;
 140        console_verbose();
 141        bust_spinlocks(1);
 142        if (machine_is(powermac))
 143                pmac_backlight_unblank();
 144        return flags;
 145}
 146
 147static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
 148                               int signr)
 149{
 150        bust_spinlocks(0);
 151        die_owner = -1;
 152        add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
 153        die_nest_count--;
 154        oops_exit();
 155        printk("\n");
 156        if (!die_nest_count)
 157                /* Nest count reaches zero, release the lock. */
 158                arch_spin_unlock(&die_lock);
 159        raw_local_irq_restore(flags);
 160
 161        crash_fadump(regs, "die oops");
 162
 163        if (kexec_should_crash(current))
 164                crash_kexec(regs);
 165
 166        if (!signr)
 167                return;
 168
 169        /*
 170         * While our oops output is serialised by a spinlock, output
 171         * from panic() called below can race and corrupt it. If we
 172         * know we are going to panic, delay for 1 second so we have a
 173         * chance to get clean backtraces from all CPUs that are oopsing.
 174         */
 175        if (in_interrupt() || panic_on_oops || !current->pid ||
 176            is_global_init(current)) {
 177                mdelay(MSEC_PER_SEC);
 178        }
 179
 180        if (in_interrupt())
 181                panic("Fatal exception in interrupt");
 182        if (panic_on_oops)
 183                panic("Fatal exception");
 184        do_exit(signr);
 185}
 186
 187static int __kprobes __die(const char *str, struct pt_regs *regs, long err)
 188{
 189        printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
 190#ifdef CONFIG_PREEMPT
 191        printk("PREEMPT ");
 192#endif
 193#ifdef CONFIG_SMP
 194        printk("SMP NR_CPUS=%d ", NR_CPUS);
 195#endif
 196        if (debug_pagealloc_enabled())
 197                printk("DEBUG_PAGEALLOC ");
 198#ifdef CONFIG_NUMA
 199        printk("NUMA ");
 200#endif
 201        printk("%s\n", ppc_md.name ? ppc_md.name : "");
 202
 203        if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP)
 204                return 1;
 205
 206        print_modules();
 207        show_regs(regs);
 208
 209        return 0;
 210}
 211
 212void die(const char *str, struct pt_regs *regs, long err)
 213{
 214        unsigned long flags = oops_begin(regs);
 215
 216        if (__die(str, regs, err))
 217                err = 0;
 218        oops_end(flags, regs, err);
 219}
 220
 221void user_single_step_siginfo(struct task_struct *tsk,
 222                                struct pt_regs *regs, siginfo_t *info)
 223{
 224        memset(info, 0, sizeof(*info));
 225        info->si_signo = SIGTRAP;
 226        info->si_code = TRAP_TRACE;
 227        info->si_addr = (void __user *)regs->nip;
 228}
 229
 230void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
 231{
 232        siginfo_t info;
 233        const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \
 234                        "at %08lx nip %08lx lr %08lx code %x\n";
 235        const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \
 236                        "at %016lx nip %016lx lr %016lx code %x\n";
 237
 238        if (!user_mode(regs)) {
 239                die("Exception in kernel mode", regs, signr);
 240                return;
 241        }
 242
 243        if (show_unhandled_signals && unhandled_signal(current, signr)) {
 244                printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
 245                                   current->comm, current->pid, signr,
 246                                   addr, regs->nip, regs->link, code);
 247        }
 248
 249        if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs))
 250                local_irq_enable();
 251
 252        current->thread.trap_nr = code;
 253        memset(&info, 0, sizeof(info));
 254        info.si_signo = signr;
 255        info.si_code = code;
 256        info.si_addr = (void __user *) addr;
 257        force_sig_info(signr, &info, current);
 258}
 259
 260#ifdef CONFIG_PPC64
 261void system_reset_exception(struct pt_regs *regs)
 262{
 263        /* See if any machine dependent calls */
 264        if (ppc_md.system_reset_exception) {
 265                if (ppc_md.system_reset_exception(regs))
 266                        return;
 267        }
 268
 269        if (debugger(regs))
 270                goto out;
 271
 272        kmsg_dump(KMSG_DUMP_OOPS);
 273        /*
 274         * A system reset is a request to dump, so we always send
 275         * it through the crashdump code (if fadump or kdump are
 276         * registered).
 277         */
 278        crash_fadump(regs, "System Reset");
 279
 280        crash_kexec(regs);
 281
 282        /*
 283         * We aren't the primary crash CPU. We need to send it
 284         * to a holding pattern to avoid it ending up in the panic
 285         * code.
 286         */
 287        crash_kexec_secondary(regs);
 288
 289        /*
 290         * No debugger or crash dump registered, print logs then
 291         * panic.
 292         */
 293        die("System Reset", regs, SIGABRT);
 294
 295        mdelay(2*MSEC_PER_SEC); /* Wait a little while for others to print */
 296        add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
 297        nmi_panic(regs, "System Reset");
 298
 299out:
 300        /* Must die if the interrupt is not recoverable */
 301        if (!(regs->msr & MSR_RI))
 302                nmi_panic(regs, "Unrecoverable System Reset");
 303
 304        /* What should we do here? We could issue a shutdown or hard reset. */
 305}
 306
 307/*
 308 * This function is called in real mode. Strictly no printk's please.
 309 *
 310 * regs->nip and regs->msr contains srr0 and ssr1.
 311 */
 312long machine_check_early(struct pt_regs *regs)
 313{
 314        long handled = 0;
 315
 316        __get_cpu_var(irq_stat).mce_exceptions++;
 317
 318        if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
 319                handled = cur_cpu_spec->machine_check_early(regs);
 320        return handled;
 321}
 322
 323long hmi_exception_realmode(struct pt_regs *regs)
 324{
 325        __get_cpu_var(irq_stat).hmi_exceptions++;
 326
 327        wait_for_subcore_guest_exit();
 328
 329        if (ppc_md.hmi_exception_early)
 330                ppc_md.hmi_exception_early(regs);
 331
 332        wait_for_tb_resync();
 333
 334        return 0;
 335}
 336
 337#endif
 338
 339/*
 340 * I/O accesses can cause machine checks on powermacs.
 341 * Check if the NIP corresponds to the address of a sync
 342 * instruction for which there is an entry in the exception
 343 * table.
 344 * Note that the 601 only takes a machine check on TEA
 345 * (transfer error ack) signal assertion, and does not
 346 * set any of the top 16 bits of SRR1.
 347 *  -- paulus.
 348 */
 349static inline int check_io_access(struct pt_regs *regs)
 350{
 351#ifdef CONFIG_PPC32
 352        unsigned long msr = regs->msr;
 353        const struct exception_table_entry *entry;
 354        unsigned int *nip = (unsigned int *)regs->nip;
 355
 356        if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
 357            && (entry = search_exception_tables(regs->nip)) != NULL) {
 358                /*
 359                 * Check that it's a sync instruction, or somewhere
 360                 * in the twi; isync; nop sequence that inb/inw/inl uses.
 361                 * As the address is in the exception table
 362                 * we should be able to read the instr there.
 363                 * For the debug message, we look at the preceding
 364                 * load or store.
 365                 */
 366                if (*nip == 0x60000000)         /* nop */
 367                        nip -= 2;
 368                else if (*nip == 0x4c00012c)    /* isync */
 369                        --nip;
 370                if (*nip == 0x7c0004ac || (*nip >> 26) == 3) {
 371                        /* sync or twi */
 372                        unsigned int rb;
 373
 374                        --nip;
 375                        rb = (*nip >> 11) & 0x1f;
 376                        printk(KERN_DEBUG "%s bad port %lx at %p\n",
 377                               (*nip & 0x100)? "OUT to": "IN from",
 378                               regs->gpr[rb] - _IO_BASE, nip);
 379                        regs->msr |= MSR_RI;
 380                        regs->nip = entry->fixup;
 381                        return 1;
 382                }
 383        }
 384#endif /* CONFIG_PPC32 */
 385        return 0;
 386}
 387
 388#ifdef CONFIG_PPC_ADV_DEBUG_REGS
 389/* On 4xx, the reason for the machine check or program exception
 390   is in the ESR. */
 391#define get_reason(regs)        ((regs)->dsisr)
 392#ifndef CONFIG_FSL_BOOKE
 393#define get_mc_reason(regs)     ((regs)->dsisr)
 394#else
 395#define get_mc_reason(regs)     (mfspr(SPRN_MCSR))
 396#endif
 397#define REASON_FP               ESR_FP
 398#define REASON_ILLEGAL          (ESR_PIL | ESR_PUO)
 399#define REASON_PRIVILEGED       ESR_PPR
 400#define REASON_TRAP             ESR_PTR
 401
 402/* single-step stuff */
 403#define single_stepping(regs)   (current->thread.debug.dbcr0 & DBCR0_IC)
 404#define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC)
 405
 406#else
 407/* On non-4xx, the reason for the machine check or program
 408   exception is in the MSR. */
 409#define get_reason(regs)        ((regs)->msr)
 410#define get_mc_reason(regs)     ((regs)->msr)
 411#define REASON_TM               0x200000
 412#define REASON_FP               0x100000
 413#define REASON_ILLEGAL          0x80000
 414#define REASON_PRIVILEGED       0x40000
 415#define REASON_TRAP             0x20000
 416
 417#define single_stepping(regs)   ((regs)->msr & MSR_SE)
 418#define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
 419#endif
 420
 421#if defined(CONFIG_4xx)
 422int machine_check_4xx(struct pt_regs *regs)
 423{
 424        unsigned long reason = get_mc_reason(regs);
 425
 426        if (reason & ESR_IMCP) {
 427                printk("Instruction");
 428                mtspr(SPRN_ESR, reason & ~ESR_IMCP);
 429        } else
 430                printk("Data");
 431        printk(" machine check in kernel mode.\n");
 432
 433        return 0;
 434}
 435
 436int machine_check_440A(struct pt_regs *regs)
 437{
 438        unsigned long reason = get_mc_reason(regs);
 439
 440        printk("Machine check in kernel mode.\n");
 441        if (reason & ESR_IMCP){
 442                printk("Instruction Synchronous Machine Check exception\n");
 443                mtspr(SPRN_ESR, reason & ~ESR_IMCP);
 444        }
 445        else {
 446                u32 mcsr = mfspr(SPRN_MCSR);
 447                if (mcsr & MCSR_IB)
 448                        printk("Instruction Read PLB Error\n");
 449                if (mcsr & MCSR_DRB)
 450                        printk("Data Read PLB Error\n");
 451                if (mcsr & MCSR_DWB)
 452                        printk("Data Write PLB Error\n");
 453                if (mcsr & MCSR_TLBP)
 454                        printk("TLB Parity Error\n");
 455                if (mcsr & MCSR_ICP){
 456                        flush_instruction_cache();
 457                        printk("I-Cache Parity Error\n");
 458                }
 459                if (mcsr & MCSR_DCSP)
 460                        printk("D-Cache Search Parity Error\n");
 461                if (mcsr & MCSR_DCFP)
 462                        printk("D-Cache Flush Parity Error\n");
 463                if (mcsr & MCSR_IMPE)
 464                        printk("Machine Check exception is imprecise\n");
 465
 466                /* Clear MCSR */
 467                mtspr(SPRN_MCSR, mcsr);
 468        }
 469        return 0;
 470}
 471
 472int machine_check_47x(struct pt_regs *regs)
 473{
 474        unsigned long reason = get_mc_reason(regs);
 475        u32 mcsr;
 476
 477        printk(KERN_ERR "Machine check in kernel mode.\n");
 478        if (reason & ESR_IMCP) {
 479                printk(KERN_ERR
 480                       "Instruction Synchronous Machine Check exception\n");
 481                mtspr(SPRN_ESR, reason & ~ESR_IMCP);
 482                return 0;
 483        }
 484        mcsr = mfspr(SPRN_MCSR);
 485        if (mcsr & MCSR_IB)
 486                printk(KERN_ERR "Instruction Read PLB Error\n");
 487        if (mcsr & MCSR_DRB)
 488                printk(KERN_ERR "Data Read PLB Error\n");
 489        if (mcsr & MCSR_DWB)
 490                printk(KERN_ERR "Data Write PLB Error\n");
 491        if (mcsr & MCSR_TLBP)
 492                printk(KERN_ERR "TLB Parity Error\n");
 493        if (mcsr & MCSR_ICP) {
 494                flush_instruction_cache();
 495                printk(KERN_ERR "I-Cache Parity Error\n");
 496        }
 497        if (mcsr & MCSR_DCSP)
 498                printk(KERN_ERR "D-Cache Search Parity Error\n");
 499        if (mcsr & PPC47x_MCSR_GPR)
 500                printk(KERN_ERR "GPR Parity Error\n");
 501        if (mcsr & PPC47x_MCSR_FPR)
 502                printk(KERN_ERR "FPR Parity Error\n");
 503        if (mcsr & PPC47x_MCSR_IPR)
 504                printk(KERN_ERR "Machine Check exception is imprecise\n");
 505
 506        /* Clear MCSR */
 507        mtspr(SPRN_MCSR, mcsr);
 508
 509        return 0;
 510}
 511#elif defined(CONFIG_E500)
 512int machine_check_e500mc(struct pt_regs *regs)
 513{
 514        unsigned long mcsr = mfspr(SPRN_MCSR);
 515        unsigned long reason = mcsr;
 516        int recoverable = 1;
 517
 518        if (reason & MCSR_LD) {
 519                recoverable = fsl_rio_mcheck_exception(regs);
 520                if (recoverable == 1)
 521                        goto silent_out;
 522        }
 523
 524        printk("Machine check in kernel mode.\n");
 525        printk("Caused by (from MCSR=%lx): ", reason);
 526
 527        if (reason & MCSR_MCP)
 528                printk("Machine Check Signal\n");
 529
 530        if (reason & MCSR_ICPERR) {
 531                printk("Instruction Cache Parity Error\n");
 532
 533                /*
 534                 * This is recoverable by invalidating the i-cache.
 535                 */
 536                mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI);
 537                while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI)
 538                        ;
 539
 540                /*
 541                 * This will generally be accompanied by an instruction
 542                 * fetch error report -- only treat MCSR_IF as fatal
 543                 * if it wasn't due to an L1 parity error.
 544                 */
 545                reason &= ~MCSR_IF;
 546        }
 547
 548        if (reason & MCSR_DCPERR_MC) {
 549                printk("Data Cache Parity Error\n");
 550
 551                /*
 552                 * In write shadow mode we auto-recover from the error, but it
 553                 * may still get logged and cause a machine check.  We should
 554                 * only treat the non-write shadow case as non-recoverable.
 555                 */
 556                if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS))
 557                        recoverable = 0;
 558        }
 559
 560        if (reason & MCSR_L2MMU_MHIT) {
 561                printk("Hit on multiple TLB entries\n");
 562                recoverable = 0;
 563        }
 564
 565        if (reason & MCSR_NMI)
 566                printk("Non-maskable interrupt\n");
 567
 568        if (reason & MCSR_IF) {
 569                printk("Instruction Fetch Error Report\n");
 570                recoverable = 0;
 571        }
 572
 573        if (reason & MCSR_LD) {
 574                printk("Load Error Report\n");
 575                recoverable = 0;
 576        }
 577
 578        if (reason & MCSR_ST) {
 579                printk("Store Error Report\n");
 580                recoverable = 0;
 581        }
 582
 583        if (reason & MCSR_LDG) {
 584                printk("Guarded Load Error Report\n");
 585                recoverable = 0;
 586        }
 587
 588        if (reason & MCSR_TLBSYNC)
 589                printk("Simultaneous tlbsync operations\n");
 590
 591        if (reason & MCSR_BSL2_ERR) {
 592                printk("Level 2 Cache Error\n");
 593                recoverable = 0;
 594        }
 595
 596        if (reason & MCSR_MAV) {
 597                u64 addr;
 598
 599                addr = mfspr(SPRN_MCAR);
 600                addr |= (u64)mfspr(SPRN_MCARU) << 32;
 601
 602                printk("Machine Check %s Address: %#llx\n",
 603                       reason & MCSR_MEA ? "Effective" : "Physical", addr);
 604        }
 605
 606silent_out:
 607        mtspr(SPRN_MCSR, mcsr);
 608        return mfspr(SPRN_MCSR) == 0 && recoverable;
 609}
 610
 611int machine_check_e500(struct pt_regs *regs)
 612{
 613        unsigned long reason = get_mc_reason(regs);
 614
 615        if (reason & MCSR_BUS_RBERR) {
 616                if (fsl_rio_mcheck_exception(regs))
 617                        return 1;
 618        }
 619
 620        printk("Machine check in kernel mode.\n");
 621        printk("Caused by (from MCSR=%lx): ", reason);
 622
 623        if (reason & MCSR_MCP)
 624                printk("Machine Check Signal\n");
 625        if (reason & MCSR_ICPERR)
 626                printk("Instruction Cache Parity Error\n");
 627        if (reason & MCSR_DCP_PERR)
 628                printk("Data Cache Push Parity Error\n");
 629        if (reason & MCSR_DCPERR)
 630                printk("Data Cache Parity Error\n");
 631        if (reason & MCSR_BUS_IAERR)
 632                printk("Bus - Instruction Address Error\n");
 633        if (reason & MCSR_BUS_RAERR)
 634                printk("Bus - Read Address Error\n");
 635        if (reason & MCSR_BUS_WAERR)
 636                printk("Bus - Write Address Error\n");
 637        if (reason & MCSR_BUS_IBERR)
 638                printk("Bus - Instruction Data Error\n");
 639        if (reason & MCSR_BUS_RBERR)
 640                printk("Bus - Read Data Bus Error\n");
 641        if (reason & MCSR_BUS_WBERR)
 642                printk("Bus - Read Data Bus Error\n");
 643        if (reason & MCSR_BUS_IPERR)
 644                printk("Bus - Instruction Parity Error\n");
 645        if (reason & MCSR_BUS_RPERR)
 646                printk("Bus - Read Parity Error\n");
 647
 648        return 0;
 649}
 650
 651int machine_check_generic(struct pt_regs *regs)
 652{
 653        return 0;
 654}
 655#elif defined(CONFIG_E200)
 656int machine_check_e200(struct pt_regs *regs)
 657{
 658        unsigned long reason = get_mc_reason(regs);
 659
 660        printk("Machine check in kernel mode.\n");
 661        printk("Caused by (from MCSR=%lx): ", reason);
 662
 663        if (reason & MCSR_MCP)
 664                printk("Machine Check Signal\n");
 665        if (reason & MCSR_CP_PERR)
 666                printk("Cache Push Parity Error\n");
 667        if (reason & MCSR_CPERR)
 668                printk("Cache Parity Error\n");
 669        if (reason & MCSR_EXCP_ERR)
 670                printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
 671        if (reason & MCSR_BUS_IRERR)
 672                printk("Bus - Read Bus Error on instruction fetch\n");
 673        if (reason & MCSR_BUS_DRERR)
 674                printk("Bus - Read Bus Error on data load\n");
 675        if (reason & MCSR_BUS_WRERR)
 676                printk("Bus - Write Bus Error on buffered store or cache line push\n");
 677
 678        return 0;
 679}
 680#else
 681int machine_check_generic(struct pt_regs *regs)
 682{
 683        unsigned long reason = get_mc_reason(regs);
 684
 685        printk("Machine check in kernel mode.\n");
 686        printk("Caused by (from SRR1=%lx): ", reason);
 687        switch (reason & 0x601F0000) {
 688        case 0x80000:
 689                printk("Machine check signal\n");
 690                break;
 691        case 0:         /* for 601 */
 692        case 0x40000:
 693        case 0x140000:  /* 7450 MSS error and TEA */
 694                printk("Transfer error ack signal\n");
 695                break;
 696        case 0x20000:
 697                printk("Data parity error signal\n");
 698                break;
 699        case 0x10000:
 700                printk("Address parity error signal\n");
 701                break;
 702        case 0x20000000:
 703                printk("L1 Data Cache error\n");
 704                break;
 705        case 0x40000000:
 706                printk("L1 Instruction Cache error\n");
 707                break;
 708        case 0x00100000:
 709                printk("L2 data cache parity error\n");
 710                break;
 711        default:
 712                printk("Unknown values in msr\n");
 713        }
 714        return 0;
 715}
 716#endif /* everything else */
 717
 718void machine_check_exception(struct pt_regs *regs)
 719{
 720        enum ctx_state prev_state = exception_enter();
 721        int recover = 0;
 722
 723        __get_cpu_var(irq_stat).mce_exceptions++;
 724
 725        /* See if any machine dependent calls. In theory, we would want
 726         * to call the CPU first, and call the ppc_md. one if the CPU
 727         * one returns a positive number. However there is existing code
 728         * that assumes the board gets a first chance, so let's keep it
 729         * that way for now and fix things later. --BenH.
 730         */
 731        if (ppc_md.machine_check_exception)
 732                recover = ppc_md.machine_check_exception(regs);
 733        else if (cur_cpu_spec->machine_check)
 734                recover = cur_cpu_spec->machine_check(regs);
 735
 736        if (recover > 0)
 737                goto bail;
 738
 739#if defined(CONFIG_8xx) && defined(CONFIG_PCI)
 740        /* the qspan pci read routines can cause machine checks -- Cort
 741         *
 742         * yuck !!! that totally needs to go away ! There are better ways
 743         * to deal with that than having a wart in the mcheck handler.
 744         * -- BenH
 745         */
 746        bad_page_fault(regs, regs->dar, SIGBUS);
 747        goto bail;
 748#endif
 749
 750        if (debugger_fault_handler(regs))
 751                goto bail;
 752
 753        if (check_io_access(regs))
 754                goto bail;
 755
 756        die("Machine check", regs, SIGBUS);
 757
 758        /* Must die if the interrupt is not recoverable */
 759        if (!(regs->msr & MSR_RI))
 760                panic("Unrecoverable Machine check");
 761
 762bail:
 763        exception_exit(prev_state);
 764}
 765
 766void SMIException(struct pt_regs *regs)
 767{
 768        die("System Management Interrupt", regs, SIGABRT);
 769}
 770
 771void handle_hmi_exception(struct pt_regs *regs)
 772{
 773        struct pt_regs *old_regs;
 774
 775        old_regs = set_irq_regs(regs);
 776        irq_enter();
 777
 778        if (ppc_md.handle_hmi_exception)
 779                ppc_md.handle_hmi_exception(regs);
 780
 781        irq_exit();
 782        set_irq_regs(old_regs);
 783}
 784
 785void unknown_exception(struct pt_regs *regs)
 786{
 787        enum ctx_state prev_state = exception_enter();
 788
 789        printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
 790               regs->nip, regs->msr, regs->trap);
 791
 792        _exception(SIGTRAP, regs, 0, 0);
 793
 794        exception_exit(prev_state);
 795}
 796
 797void instruction_breakpoint_exception(struct pt_regs *regs)
 798{
 799        enum ctx_state prev_state = exception_enter();
 800
 801        if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
 802                                        5, SIGTRAP) == NOTIFY_STOP)
 803                goto bail;
 804        if (debugger_iabr_match(regs))
 805                goto bail;
 806        _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
 807
 808bail:
 809        exception_exit(prev_state);
 810}
 811
 812void RunModeException(struct pt_regs *regs)
 813{
 814        _exception(SIGTRAP, regs, 0, 0);
 815}
 816
 817void __kprobes single_step_exception(struct pt_regs *regs)
 818{
 819        enum ctx_state prev_state = exception_enter();
 820
 821        clear_single_step(regs);
 822
 823        if (notify_die(DIE_SSTEP, "single_step", regs, 5,
 824                                        5, SIGTRAP) == NOTIFY_STOP)
 825                goto bail;
 826        if (debugger_sstep(regs))
 827                goto bail;
 828
 829        _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
 830
 831bail:
 832        exception_exit(prev_state);
 833}
 834
 835/*
 836 * After we have successfully emulated an instruction, we have to
 837 * check if the instruction was being single-stepped, and if so,
 838 * pretend we got a single-step exception.  This was pointed out
 839 * by Kumar Gala.  -- paulus
 840 */
 841static void emulate_single_step(struct pt_regs *regs)
 842{
 843        if (single_stepping(regs))
 844                single_step_exception(regs);
 845}
 846
 847static inline int __parse_fpscr(unsigned long fpscr)
 848{
 849        int ret = 0;
 850
 851        /* Invalid operation */
 852        if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
 853                ret = FPE_FLTINV;
 854
 855        /* Overflow */
 856        else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
 857                ret = FPE_FLTOVF;
 858
 859        /* Underflow */
 860        else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
 861                ret = FPE_FLTUND;
 862
 863        /* Divide by zero */
 864        else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
 865                ret = FPE_FLTDIV;
 866
 867        /* Inexact result */
 868        else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
 869                ret = FPE_FLTRES;
 870
 871        return ret;
 872}
 873
 874static void parse_fpe(struct pt_regs *regs)
 875{
 876        int code = 0;
 877
 878        flush_fp_to_thread(current);
 879
 880        code = __parse_fpscr(current->thread.fp_state.fpscr);
 881
 882        _exception(SIGFPE, regs, code, regs->nip);
 883}
 884
 885/*
 886 * Illegal instruction emulation support.  Originally written to
 887 * provide the PVR to user applications using the mfspr rd, PVR.
 888 * Return non-zero if we can't emulate, or -EFAULT if the associated
 889 * memory access caused an access fault.  Return zero on success.
 890 *
 891 * There are a couple of ways to do this, either "decode" the instruction
 892 * or directly match lots of bits.  In this case, matching lots of
 893 * bits is faster and easier.
 894 *
 895 */
 896static int emulate_string_inst(struct pt_regs *regs, u32 instword)
 897{
 898        u8 rT = (instword >> 21) & 0x1f;
 899        u8 rA = (instword >> 16) & 0x1f;
 900        u8 NB_RB = (instword >> 11) & 0x1f;
 901        u32 num_bytes;
 902        unsigned long EA;
 903        int pos = 0;
 904
 905        /* Early out if we are an invalid form of lswx */
 906        if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX)
 907                if ((rT == rA) || (rT == NB_RB))
 908                        return -EINVAL;
 909
 910        EA = (rA == 0) ? 0 : regs->gpr[rA];
 911
 912        switch (instword & PPC_INST_STRING_MASK) {
 913                case PPC_INST_LSWX:
 914                case PPC_INST_STSWX:
 915                        EA += NB_RB;
 916                        num_bytes = regs->xer & 0x7f;
 917                        break;
 918                case PPC_INST_LSWI:
 919                case PPC_INST_STSWI:
 920                        num_bytes = (NB_RB == 0) ? 32 : NB_RB;
 921                        break;
 922                default:
 923                        return -EINVAL;
 924        }
 925
 926        while (num_bytes != 0)
 927        {
 928                u8 val;
 929                u32 shift = 8 * (3 - (pos & 0x3));
 930
 931                /* if process is 32-bit, clear upper 32 bits of EA */
 932                if ((regs->msr & MSR_64BIT) == 0)
 933                        EA &= 0xFFFFFFFF;
 934
 935                switch ((instword & PPC_INST_STRING_MASK)) {
 936                        case PPC_INST_LSWX:
 937                        case PPC_INST_LSWI:
 938                                if (get_user(val, (u8 __user *)EA))
 939                                        return -EFAULT;
 940                                /* first time updating this reg,
 941                                 * zero it out */
 942                                if (pos == 0)
 943                                        regs->gpr[rT] = 0;
 944                                regs->gpr[rT] |= val << shift;
 945                                break;
 946                        case PPC_INST_STSWI:
 947                        case PPC_INST_STSWX:
 948                                val = regs->gpr[rT] >> shift;
 949                                if (put_user(val, (u8 __user *)EA))
 950                                        return -EFAULT;
 951                                break;
 952                }
 953                /* move EA to next address */
 954                EA += 1;
 955                num_bytes--;
 956
 957                /* manage our position within the register */
 958                if (++pos == 4) {
 959                        pos = 0;
 960                        if (++rT == 32)
 961                                rT = 0;
 962                }
 963        }
 964
 965        return 0;
 966}
 967
 968static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword)
 969{
 970        u32 ra,rs;
 971        unsigned long tmp;
 972
 973        ra = (instword >> 16) & 0x1f;
 974        rs = (instword >> 21) & 0x1f;
 975
 976        tmp = regs->gpr[rs];
 977        tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL);
 978        tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL);
 979        tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
 980        regs->gpr[ra] = tmp;
 981
 982        return 0;
 983}
 984
 985static int emulate_isel(struct pt_regs *regs, u32 instword)
 986{
 987        u8 rT = (instword >> 21) & 0x1f;
 988        u8 rA = (instword >> 16) & 0x1f;
 989        u8 rB = (instword >> 11) & 0x1f;
 990        u8 BC = (instword >> 6) & 0x1f;
 991        u8 bit;
 992        unsigned long tmp;
 993
 994        tmp = (rA == 0) ? 0 : regs->gpr[rA];
 995        bit = (regs->ccr >> (31 - BC)) & 0x1;
 996
 997        regs->gpr[rT] = bit ? tmp : regs->gpr[rB];
 998
 999        return 0;
1000}
1001
1002#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1003static inline bool tm_abort_check(struct pt_regs *regs, int cause)
1004{
1005        /* If we're emulating a load/store in an active transaction, we cannot
1006         * emulate it as the kernel operates in transaction suspended context.
1007         * We need to abort the transaction.  This creates a persistent TM
1008         * abort so tell the user what caused it with a new code.
1009         */
1010        if (MSR_TM_TRANSACTIONAL(regs->msr)) {
1011                tm_enable();
1012                tm_abort(cause);
1013                return true;
1014        }
1015        return false;
1016}
1017#else
1018static inline bool tm_abort_check(struct pt_regs *regs, int reason)
1019{
1020        return false;
1021}
1022#endif
1023
1024static int emulate_instruction(struct pt_regs *regs)
1025{
1026        u32 instword;
1027        u32 rd;
1028
1029        if (!user_mode(regs))
1030                return -EINVAL;
1031        CHECK_FULL_REGS(regs);
1032
1033        if (get_user(instword, (u32 __user *)(regs->nip)))
1034                return -EFAULT;
1035
1036        /* Emulate the mfspr rD, PVR. */
1037        if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) {
1038                PPC_WARN_EMULATED(mfpvr, regs);
1039                rd = (instword >> 21) & 0x1f;
1040                regs->gpr[rd] = mfspr(SPRN_PVR);
1041                return 0;
1042        }
1043
1044        /* Emulating the dcba insn is just a no-op.  */
1045        if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) {
1046                PPC_WARN_EMULATED(dcba, regs);
1047                return 0;
1048        }
1049
1050        /* Emulate the mcrxr insn.  */
1051        if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) {
1052                int shift = (instword >> 21) & 0x1c;
1053                unsigned long msk = 0xf0000000UL >> shift;
1054
1055                PPC_WARN_EMULATED(mcrxr, regs);
1056                regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
1057                regs->xer &= ~0xf0000000UL;
1058                return 0;
1059        }
1060
1061        /* Emulate load/store string insn. */
1062        if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
1063                if (tm_abort_check(regs,
1064                                   TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
1065                        return -EINVAL;
1066                PPC_WARN_EMULATED(string, regs);
1067                return emulate_string_inst(regs, instword);
1068        }
1069
1070        /* Emulate the popcntb (Population Count Bytes) instruction. */
1071        if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) {
1072                PPC_WARN_EMULATED(popcntb, regs);
1073                return emulate_popcntb_inst(regs, instword);
1074        }
1075
1076        /* Emulate isel (Integer Select) instruction */
1077        if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) {
1078                PPC_WARN_EMULATED(isel, regs);
1079                return emulate_isel(regs, instword);
1080        }
1081
1082        /* Emulate sync instruction variants */
1083        if ((instword & PPC_INST_SYNC_MASK) == PPC_INST_SYNC) {
1084                PPC_WARN_EMULATED(sync, regs);
1085                asm volatile("sync");
1086                return 0;
1087        }
1088
1089#ifdef CONFIG_PPC64
1090        /* Emulate the mfspr rD, DSCR. */
1091        if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) ==
1092                PPC_INST_MFSPR_DSCR_USER) ||
1093             ((instword & PPC_INST_MFSPR_DSCR_MASK) ==
1094                PPC_INST_MFSPR_DSCR)) &&
1095                        cpu_has_feature(CPU_FTR_DSCR)) {
1096                PPC_WARN_EMULATED(mfdscr, regs);
1097                rd = (instword >> 21) & 0x1f;
1098                regs->gpr[rd] = mfspr(SPRN_DSCR);
1099                return 0;
1100        }
1101        /* Emulate the mtspr DSCR, rD. */
1102        if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) ==
1103                PPC_INST_MTSPR_DSCR_USER) ||
1104             ((instword & PPC_INST_MTSPR_DSCR_MASK) ==
1105                PPC_INST_MTSPR_DSCR)) &&
1106                        cpu_has_feature(CPU_FTR_DSCR)) {
1107                PPC_WARN_EMULATED(mtdscr, regs);
1108                rd = (instword >> 21) & 0x1f;
1109                current->thread.dscr = regs->gpr[rd];
1110                current->thread.dscr_inherit = 1;
1111                mtspr(SPRN_DSCR, current->thread.dscr);
1112                return 0;
1113        }
1114#endif
1115
1116        return -EINVAL;
1117}
1118
1119int is_valid_bugaddr(unsigned long addr)
1120{
1121        return is_kernel_addr(addr);
1122}
1123
1124void __kprobes program_check_exception(struct pt_regs *regs)
1125{
1126        enum ctx_state prev_state = exception_enter();
1127        unsigned int reason = get_reason(regs);
1128        extern int do_mathemu(struct pt_regs *regs);
1129
1130        /* We can now get here via a FP Unavailable exception if the core
1131         * has no FPU, in that case the reason flags will be 0 */
1132
1133        if (reason & REASON_FP) {
1134                /* IEEE FP exception */
1135                parse_fpe(regs);
1136                goto bail;
1137        }
1138        if (reason & REASON_TRAP) {
1139                /* Debugger is first in line to stop recursive faults in
1140                 * rcu_lock, notify_die, or atomic_notifier_call_chain */
1141                if (debugger_bpt(regs))
1142                        goto bail;
1143
1144                /* trap exception */
1145                if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
1146                                == NOTIFY_STOP)
1147                        goto bail;
1148
1149                if (!(regs->msr & MSR_PR) &&  /* not user-mode */
1150                    report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
1151                        regs->nip += 4;
1152                        goto bail;
1153                }
1154                _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
1155                goto bail;
1156        }
1157#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1158        if (reason & REASON_TM) {
1159                /* This is a TM "Bad Thing Exception" program check.
1160                 * This occurs when:
1161                 * -  An rfid/hrfid/mtmsrd attempts to cause an illegal
1162                 *    transition in TM states.
1163                 * -  A trechkpt is attempted when transactional.
1164                 * -  A treclaim is attempted when non transactional.
1165                 * -  A tend is illegally attempted.
1166                 * -  writing a TM SPR when transactional.
1167                 */
1168                if (!user_mode(regs) &&
1169                    report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
1170                        regs->nip += 4;
1171                        goto bail;
1172                }
1173                /* If usermode caused this, it's done something illegal and
1174                 * gets a SIGILL slap on the wrist.  We call it an illegal
1175                 * operand to distinguish from the instruction just being bad
1176                 * (e.g. executing a 'tend' on a CPU without TM!); it's an
1177                 * illegal /placement/ of a valid instruction.
1178                 */
1179                if (user_mode(regs)) {
1180                        _exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
1181                        goto bail;
1182                } else {
1183                        printk(KERN_EMERG "Unexpected TM Bad Thing exception "
1184                               "at %lx (msr 0x%x)\n", regs->nip, reason);
1185                        die("Unrecoverable exception", regs, SIGABRT);
1186                }
1187        }
1188#endif
1189
1190        /*
1191         * If we took the program check in the kernel skip down to sending a
1192         * SIGILL. The subsequent cases all relate to emulating instructions
1193         * which we should only do for userspace. We also do not want to enable
1194         * interrupts for kernel faults because that might lead to further
1195         * faults, and loose the context of the original exception.
1196         */
1197        if (!user_mode(regs))
1198                goto sigill;
1199
1200        /* We restore the interrupt state now */
1201        if (!arch_irq_disabled_regs(regs))
1202                local_irq_enable();
1203
1204#ifdef CONFIG_MATH_EMULATION
1205        /* (reason & REASON_ILLEGAL) would be the obvious thing here,
1206         * but there seems to be a hardware bug on the 405GP (RevD)
1207         * that means ESR is sometimes set incorrectly - either to
1208         * ESR_DST (!?) or 0.  In the process of chasing this with the
1209         * hardware people - not sure if it can happen on any illegal
1210         * instruction or only on FP instructions, whether there is a
1211         * pattern to occurrences etc. -dgibson 31/Mar/2003
1212         */
1213        switch (do_mathemu(regs)) {
1214        case 0:
1215                emulate_single_step(regs);
1216                goto bail;
1217        case 1: {
1218                        int code = 0;
1219                        code = __parse_fpscr(current->thread.fp_state.fpscr);
1220                        _exception(SIGFPE, regs, code, regs->nip);
1221                        goto bail;
1222                }
1223        case -EFAULT:
1224                _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1225                goto bail;
1226        }
1227        /* fall through on any other errors */
1228#endif /* CONFIG_MATH_EMULATION */
1229
1230        /* Try to emulate it if we should. */
1231        if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
1232                switch (emulate_instruction(regs)) {
1233                case 0:
1234                        regs->nip += 4;
1235                        emulate_single_step(regs);
1236                        goto bail;
1237                case -EFAULT:
1238                        _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1239                        goto bail;
1240                }
1241        }
1242
1243sigill:
1244        if (reason & REASON_PRIVILEGED)
1245                _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1246        else
1247                _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1248
1249bail:
1250        exception_exit(prev_state);
1251}
1252
1253/*
1254 * This occurs when running in hypervisor mode on POWER6 or later
1255 * and an illegal instruction is encountered.
1256 */
1257void __kprobes emulation_assist_interrupt(struct pt_regs *regs)
1258{
1259        regs->msr |= REASON_ILLEGAL;
1260        program_check_exception(regs);
1261}
1262
1263void alignment_exception(struct pt_regs *regs)
1264{
1265        enum ctx_state prev_state = exception_enter();
1266        int sig, code, fixed = 0;
1267
1268        /* We restore the interrupt state now */
1269        if (!arch_irq_disabled_regs(regs))
1270                local_irq_enable();
1271
1272        if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
1273                goto bail;
1274
1275        /* we don't implement logging of alignment exceptions */
1276        if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
1277                fixed = fix_alignment(regs);
1278
1279        if (fixed == 1) {
1280                regs->nip += 4; /* skip over emulated instruction */
1281                emulate_single_step(regs);
1282                goto bail;
1283        }
1284
1285        /* Operand address was bad */
1286        if (fixed == -EFAULT) {
1287                sig = SIGSEGV;
1288                code = SEGV_ACCERR;
1289        } else {
1290                sig = SIGBUS;
1291                code = BUS_ADRALN;
1292        }
1293        if (user_mode(regs))
1294                _exception(sig, regs, code, regs->dar);
1295        else
1296                bad_page_fault(regs, regs->dar, sig);
1297
1298bail:
1299        exception_exit(prev_state);
1300}
1301
1302void StackOverflow(struct pt_regs *regs)
1303{
1304        printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
1305               current, regs->gpr[1]);
1306        debugger(regs);
1307        show_regs(regs);
1308        panic("kernel stack overflow");
1309}
1310
1311void nonrecoverable_exception(struct pt_regs *regs)
1312{
1313        printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
1314               regs->nip, regs->msr);
1315        debugger(regs);
1316        die("nonrecoverable exception", regs, SIGKILL);
1317}
1318
1319void trace_syscall(struct pt_regs *regs)
1320{
1321        printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld    %s\n",
1322               current, task_pid_nr(current), regs->nip, regs->link, regs->gpr[0],
1323               regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted());
1324}
1325
1326void kernel_fp_unavailable_exception(struct pt_regs *regs)
1327{
1328        enum ctx_state prev_state = exception_enter();
1329
1330        printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
1331                          "%lx at %lx\n", regs->trap, regs->nip);
1332        die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
1333
1334        exception_exit(prev_state);
1335}
1336
1337void altivec_unavailable_exception(struct pt_regs *regs)
1338{
1339        enum ctx_state prev_state = exception_enter();
1340
1341        if (user_mode(regs)) {
1342                /* A user program has executed an altivec instruction,
1343                   but this kernel doesn't support altivec. */
1344                _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1345                goto bail;
1346        }
1347
1348        printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
1349                        "%lx at %lx\n", regs->trap, regs->nip);
1350        die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
1351
1352bail:
1353        exception_exit(prev_state);
1354}
1355
1356void vsx_unavailable_exception(struct pt_regs *regs)
1357{
1358        if (user_mode(regs)) {
1359                /* A user program has executed an vsx instruction,
1360                   but this kernel doesn't support vsx. */
1361                _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1362                return;
1363        }
1364
1365        printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception "
1366                        "%lx at %lx\n", regs->trap, regs->nip);
1367        die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
1368}
1369
1370#ifdef CONFIG_PPC64
1371static void tm_unavailable(struct pt_regs *regs)
1372{
1373        pr_emerg("Unrecoverable TM Unavailable Exception "
1374                        "%lx at %lx\n", regs->trap, regs->nip);
1375        die("Unrecoverable TM Unavailable Exception", regs, SIGABRT);
1376}
1377
1378void facility_unavailable_exception(struct pt_regs *regs)
1379{
1380        static char *facility_strings[] = {
1381                [FSCR_FP_LG] = "FPU",
1382                [FSCR_VECVSX_LG] = "VMX/VSX",
1383                [FSCR_DSCR_LG] = "DSCR",
1384                [FSCR_PM_LG] = "PMU SPRs",
1385                [FSCR_BHRB_LG] = "BHRB",
1386                [FSCR_TM_LG] = "TM",
1387                [FSCR_EBB_LG] = "EBB",
1388                [FSCR_TAR_LG] = "TAR",
1389        };
1390        char *facility = "unknown";
1391        u64 value;
1392        u32 instword, rd;
1393        u8 status;
1394        bool hv;
1395
1396        hv = (regs->trap == 0xf80);
1397        if (hv)
1398                value = mfspr(SPRN_HFSCR);
1399        else
1400                value = mfspr(SPRN_FSCR);
1401
1402        status = value >> 56;
1403        if (status == FSCR_DSCR_LG) {
1404                /*
1405                 * User is accessing the DSCR register using the problem
1406                 * state only SPR number (0x03) either through a mfspr or
1407                 * a mtspr instruction. If it is a write attempt through
1408                 * a mtspr, then we set the inherit bit. This also allows
1409                 * the user to write or read the register directly in the
1410                 * future by setting via the FSCR DSCR bit. But in case it
1411                 * is a read DSCR attempt through a mfspr instruction, we
1412                 * just emulate the instruction instead. This code path will
1413                 * always emulate all the mfspr instructions till the user
1414                 * has attempted atleast one mtspr instruction. This way it
1415                 * preserves the same behaviour when the user is accessing
1416                 * the DSCR through privilege level only SPR number (0x11)
1417                 * which is emulated through illegal instruction exception.
1418                 * We always leave HFSCR DSCR set.
1419                 */
1420                if (get_user(instword, (u32 __user *)(regs->nip))) {
1421                        pr_err("Failed to fetch the user instruction\n");
1422                        return;
1423                }
1424
1425                /* Write into DSCR (mtspr 0x03, RS) */
1426                if ((instword & PPC_INST_MTSPR_DSCR_USER_MASK)
1427                                == PPC_INST_MTSPR_DSCR_USER) {
1428                        rd = (instword >> 21) & 0x1f;
1429                        current->thread.dscr = regs->gpr[rd];
1430                        current->thread.dscr_inherit = 1;
1431                        mtspr(SPRN_FSCR, value | FSCR_DSCR);
1432                }
1433
1434                /* Read from DSCR (mfspr RT, 0x03) */
1435                if ((instword & PPC_INST_MFSPR_DSCR_USER_MASK)
1436                                == PPC_INST_MFSPR_DSCR_USER) {
1437                        if (emulate_instruction(regs)) {
1438                                pr_err("DSCR based mfspr emulation failed\n");
1439                                return;
1440                        }
1441                        regs->nip += 4;
1442                        emulate_single_step(regs);
1443                }
1444                return;
1445        }
1446
1447        if (status == FSCR_TM_LG) {
1448                /*
1449                 * If we're here then the hardware is TM aware because it
1450                 * generated an exception with FSRM_TM set.
1451                 *
1452                 * If cpu_has_feature(CPU_FTR_TM) is false, then either firmware
1453                 * told us not to do TM, or the kernel is not built with TM
1454                 * support.
1455                 *
1456                 * If both of those things are true, then userspace can spam the
1457                 * console by triggering the printk() below just by continually
1458                 * doing tbegin (or any TM instruction). So in that case just
1459                 * send the process a SIGILL immediately.
1460                 */
1461                if (!cpu_has_feature(CPU_FTR_TM))
1462                        goto out;
1463
1464                tm_unavailable(regs);
1465                return;
1466        }
1467
1468        if ((status < ARRAY_SIZE(facility_strings)) &&
1469            facility_strings[status])
1470                facility = facility_strings[status];
1471
1472        /* We restore the interrupt state now */
1473        if (!arch_irq_disabled_regs(regs))
1474                local_irq_enable();
1475
1476        pr_err_ratelimited(
1477                "%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n",
1478                hv ? "Hypervisor " : "", facility, regs->nip, regs->msr);
1479
1480out:
1481        if (user_mode(regs)) {
1482                _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1483                return;
1484        }
1485
1486        die("Unexpected facility unavailable exception", regs, SIGABRT);
1487}
1488#endif
1489
1490#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1491
1492void fp_unavailable_tm(struct pt_regs *regs)
1493{
1494        /* Note:  This does not handle any kind of FP laziness. */
1495
1496        TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n",
1497                 regs->nip, regs->msr);
1498
1499        /* We can only have got here if the task started using FP after
1500         * beginning the transaction.  So, the transactional regs are just a
1501         * copy of the checkpointed ones.  But, we still need to recheckpoint
1502         * as we're enabling FP for the process; it will return, abort the
1503         * transaction, and probably retry but now with FP enabled.  So the
1504         * checkpointed FP registers need to be loaded.
1505         */
1506        tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1507        /* Reclaim didn't save out any FPRs to transact_fprs. */
1508
1509        /* Enable FP for the task: */
1510        regs->msr |= (MSR_FP | current->thread.fpexc_mode);
1511
1512        /* This loads and recheckpoints the FP registers from
1513         * thread.fpr[].  They will remain in registers after the
1514         * checkpoint so we don't need to reload them after.
1515         * If VMX is in use, the VRs now hold checkpointed values,
1516         * so we don't want to load the VRs from the thread_struct.
1517         */
1518        tm_recheckpoint(&current->thread, MSR_FP);
1519
1520        /* If VMX is in use, get the transactional values back */
1521        if (regs->msr & MSR_VEC) {
1522                do_load_up_transact_altivec(&current->thread);
1523                /* At this point all the VSX state is loaded, so enable it */
1524                regs->msr |= MSR_VSX;
1525        }
1526}
1527
1528void altivec_unavailable_tm(struct pt_regs *regs)
1529{
1530        /* See the comments in fp_unavailable_tm().  This function operates
1531         * the same way.
1532         */
1533
1534        TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx,"
1535                 "MSR=%lx\n",
1536                 regs->nip, regs->msr);
1537        tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1538        regs->msr |= MSR_VEC;
1539        tm_recheckpoint(&current->thread, MSR_VEC);
1540        current->thread.used_vr = 1;
1541
1542        if (regs->msr & MSR_FP) {
1543                do_load_up_transact_fpu(&current->thread);
1544                regs->msr |= MSR_VSX;
1545        }
1546}
1547
1548void vsx_unavailable_tm(struct pt_regs *regs)
1549{
1550        unsigned long orig_msr = regs->msr;
1551
1552        /* See the comments in fp_unavailable_tm().  This works similarly,
1553         * though we're loading both FP and VEC registers in here.
1554         *
1555         * If FP isn't in use, load FP regs.  If VEC isn't in use, load VEC
1556         * regs.  Either way, set MSR_VSX.
1557         */
1558
1559        TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx,"
1560                 "MSR=%lx\n",
1561                 regs->nip, regs->msr);
1562
1563        current->thread.used_vsr = 1;
1564
1565        /* If FP and VMX are already loaded, we have all the state we need */
1566        if ((orig_msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC)) {
1567                regs->msr |= MSR_VSX;
1568                return;
1569        }
1570
1571        /* This reclaims FP and/or VR regs if they're already enabled */
1572        tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1573
1574        regs->msr |= MSR_VEC | MSR_FP | current->thread.fpexc_mode |
1575                MSR_VSX;
1576
1577        /* This loads & recheckpoints FP and VRs; but we have
1578         * to be sure not to overwrite previously-valid state.
1579         */
1580        tm_recheckpoint(&current->thread, regs->msr & ~orig_msr);
1581
1582        if (orig_msr & MSR_FP)
1583                do_load_up_transact_fpu(&current->thread);
1584        if (orig_msr & MSR_VEC)
1585                do_load_up_transact_altivec(&current->thread);
1586}
1587#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1588
1589void performance_monitor_exception(struct pt_regs *regs)
1590{
1591        __get_cpu_var(irq_stat).pmu_irqs++;
1592
1593        perf_irq(regs);
1594}
1595
1596#ifdef CONFIG_8xx
1597void SoftwareEmulation(struct pt_regs *regs)
1598{
1599        extern int do_mathemu(struct pt_regs *);
1600        extern int Soft_emulate_8xx(struct pt_regs *);
1601#if defined(CONFIG_MATH_EMULATION) || defined(CONFIG_8XX_MINIMAL_FPEMU)
1602        int errcode;
1603#endif
1604
1605        CHECK_FULL_REGS(regs);
1606
1607        if (!user_mode(regs)) {
1608                debugger(regs);
1609                die("Kernel Mode Software FPU Emulation", regs, SIGFPE);
1610        }
1611
1612#ifdef CONFIG_MATH_EMULATION
1613        errcode = do_mathemu(regs);
1614        if (errcode >= 0)
1615                PPC_WARN_EMULATED(math, regs);
1616
1617        switch (errcode) {
1618        case 0:
1619                emulate_single_step(regs);
1620                return;
1621        case 1: {
1622                        int code = 0;
1623                        code = __parse_fpscr(current->thread.fpscr.val);
1624                        _exception(SIGFPE, regs, code, regs->nip);
1625                        return;
1626                }
1627        case -EFAULT:
1628                _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1629                return;
1630        default:
1631                _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1632                return;
1633        }
1634
1635#elif defined(CONFIG_8XX_MINIMAL_FPEMU)
1636        errcode = Soft_emulate_8xx(regs);
1637        if (errcode >= 0)
1638                PPC_WARN_EMULATED(8xx, regs);
1639
1640        switch (errcode) {
1641        case 0:
1642                emulate_single_step(regs);
1643                return;
1644        case 1:
1645                _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1646                return;
1647        case -EFAULT:
1648                _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1649                return;
1650        }
1651#else
1652        _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1653#endif
1654}
1655#endif /* CONFIG_8xx */
1656
1657#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1658static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
1659{
1660        int changed = 0;
1661        /*
1662         * Determine the cause of the debug event, clear the
1663         * event flags and send a trap to the handler. Torez
1664         */
1665        if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
1666                dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W);
1667#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1668                current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
1669#endif
1670                do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT,
1671                             5);
1672                changed |= 0x01;
1673        }  else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) {
1674                dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W);
1675                do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, TRAP_HWBKPT,
1676                             6);
1677                changed |= 0x01;
1678        }  else if (debug_status & DBSR_IAC1) {
1679                current->thread.debug.dbcr0 &= ~DBCR0_IAC1;
1680                dbcr_iac_range(current) &= ~DBCR_IAC12MODE;
1681                do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT,
1682                             1);
1683                changed |= 0x01;
1684        }  else if (debug_status & DBSR_IAC2) {
1685                current->thread.debug.dbcr0 &= ~DBCR0_IAC2;
1686                do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT,
1687                             2);
1688                changed |= 0x01;
1689        }  else if (debug_status & DBSR_IAC3) {
1690                current->thread.debug.dbcr0 &= ~DBCR0_IAC3;
1691                dbcr_iac_range(current) &= ~DBCR_IAC34MODE;
1692                do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT,
1693                             3);
1694                changed |= 0x01;
1695        }  else if (debug_status & DBSR_IAC4) {
1696                current->thread.debug.dbcr0 &= ~DBCR0_IAC4;
1697                do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT,
1698                             4);
1699                changed |= 0x01;
1700        }
1701        /*
1702         * At the point this routine was called, the MSR(DE) was turned off.
1703         * Check all other debug flags and see if that bit needs to be turned
1704         * back on or not.
1705         */
1706        if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
1707                               current->thread.debug.dbcr1))
1708                regs->msr |= MSR_DE;
1709        else
1710                /* Make sure the IDM flag is off */
1711                current->thread.debug.dbcr0 &= ~DBCR0_IDM;
1712
1713        if (changed & 0x01)
1714                mtspr(SPRN_DBCR0, current->thread.debug.dbcr0);
1715}
1716
1717void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
1718{
1719        current->thread.debug.dbsr = debug_status;
1720
1721        /* Hack alert: On BookE, Branch Taken stops on the branch itself, while
1722         * on server, it stops on the target of the branch. In order to simulate
1723         * the server behaviour, we thus restart right away with a single step
1724         * instead of stopping here when hitting a BT
1725         */
1726        if (debug_status & DBSR_BT) {
1727                regs->msr &= ~MSR_DE;
1728
1729                /* Disable BT */
1730                mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT);
1731                /* Clear the BT event */
1732                mtspr(SPRN_DBSR, DBSR_BT);
1733
1734                /* Do the single step trick only when coming from userspace */
1735                if (user_mode(regs)) {
1736                        current->thread.debug.dbcr0 &= ~DBCR0_BT;
1737                        current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
1738                        regs->msr |= MSR_DE;
1739                        return;
1740                }
1741
1742                if (notify_die(DIE_SSTEP, "block_step", regs, 5,
1743                               5, SIGTRAP) == NOTIFY_STOP) {
1744                        return;
1745                }
1746                if (debugger_sstep(regs))
1747                        return;
1748        } else if (debug_status & DBSR_IC) {    /* Instruction complete */
1749                regs->msr &= ~MSR_DE;
1750
1751                /* Disable instruction completion */
1752                mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
1753                /* Clear the instruction completion event */
1754                mtspr(SPRN_DBSR, DBSR_IC);
1755
1756                if (notify_die(DIE_SSTEP, "single_step", regs, 5,
1757                               5, SIGTRAP) == NOTIFY_STOP) {
1758                        return;
1759                }
1760
1761                if (debugger_sstep(regs))
1762                        return;
1763
1764                if (user_mode(regs)) {
1765                        current->thread.debug.dbcr0 &= ~DBCR0_IC;
1766                        if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
1767                                               current->thread.debug.dbcr1))
1768                                regs->msr |= MSR_DE;
1769                        else
1770                                /* Make sure the IDM bit is off */
1771                                current->thread.debug.dbcr0 &= ~DBCR0_IDM;
1772                }
1773
1774                _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
1775        } else
1776                handle_debug(regs, debug_status);
1777}
1778#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
1779
1780#if !defined(CONFIG_TAU_INT)
1781void TAUException(struct pt_regs *regs)
1782{
1783        printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx    %s\n",
1784               regs->nip, regs->msr, regs->trap, print_tainted());
1785}
1786#endif /* CONFIG_INT_TAU */
1787
1788#ifdef CONFIG_ALTIVEC
1789void altivec_assist_exception(struct pt_regs *regs)
1790{
1791        int err;
1792
1793        if (!user_mode(regs)) {
1794                printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
1795                       " at %lx\n", regs->nip);
1796                die("Kernel VMX/Altivec assist exception", regs, SIGILL);
1797        }
1798
1799        flush_altivec_to_thread(current);
1800
1801        PPC_WARN_EMULATED(altivec, regs);
1802        err = emulate_altivec(regs);
1803        if (err == 0) {
1804                regs->nip += 4;         /* skip emulated instruction */
1805                emulate_single_step(regs);
1806                return;
1807        }
1808
1809        if (err == -EFAULT) {
1810                /* got an error reading the instruction */
1811                _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1812        } else {
1813                /* didn't recognize the instruction */
1814                /* XXX quick hack for now: set the non-Java bit in the VSCR */
1815                printk_ratelimited(KERN_ERR "Unrecognized altivec instruction "
1816                                   "in %s at %lx\n", current->comm, regs->nip);
1817                current->thread.vr_state.vscr.u[3] |= 0x10000;
1818        }
1819}
1820#endif /* CONFIG_ALTIVEC */
1821
1822#ifdef CONFIG_VSX
1823void vsx_assist_exception(struct pt_regs *regs)
1824{
1825        if (!user_mode(regs)) {
1826                printk(KERN_EMERG "VSX assist exception in kernel mode"
1827                       " at %lx\n", regs->nip);
1828                die("Kernel VSX assist exception", regs, SIGILL);
1829        }
1830
1831        flush_vsx_to_thread(current);
1832        printk(KERN_INFO "VSX assist not supported at %lx\n", regs->nip);
1833        _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1834}
1835#endif /* CONFIG_VSX */
1836
1837#ifdef CONFIG_FSL_BOOKE
1838void CacheLockingException(struct pt_regs *regs, unsigned long address,
1839                           unsigned long error_code)
1840{
1841        /* We treat cache locking instructions from the user
1842         * as priv ops, in the future we could try to do
1843         * something smarter
1844         */
1845        if (error_code & (ESR_DLK|ESR_ILK))
1846                _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1847        return;
1848}
1849#endif /* CONFIG_FSL_BOOKE */
1850
1851#ifdef CONFIG_SPE
1852void SPEFloatingPointException(struct pt_regs *regs)
1853{
1854        extern int do_spe_mathemu(struct pt_regs *regs);
1855        unsigned long spefscr;
1856        int fpexc_mode;
1857        int code = 0;
1858        int err;
1859
1860        flush_spe_to_thread(current);
1861
1862        spefscr = current->thread.spefscr;
1863        fpexc_mode = current->thread.fpexc_mode;
1864
1865        if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
1866                code = FPE_FLTOVF;
1867        }
1868        else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
1869                code = FPE_FLTUND;
1870        }
1871        else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
1872                code = FPE_FLTDIV;
1873        else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
1874                code = FPE_FLTINV;
1875        }
1876        else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
1877                code = FPE_FLTRES;
1878
1879        err = do_spe_mathemu(regs);
1880        if (err == 0) {
1881                regs->nip += 4;         /* skip emulated instruction */
1882                emulate_single_step(regs);
1883                return;
1884        }
1885
1886        if (err == -EFAULT) {
1887                /* got an error reading the instruction */
1888                _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1889        } else if (err == -EINVAL) {
1890                /* didn't recognize the instruction */
1891                printk(KERN_ERR "unrecognized spe instruction "
1892                       "in %s at %lx\n", current->comm, regs->nip);
1893        } else {
1894                _exception(SIGFPE, regs, code, regs->nip);
1895        }
1896
1897        return;
1898}
1899
1900void SPEFloatingPointRoundException(struct pt_regs *regs)
1901{
1902        extern int speround_handler(struct pt_regs *regs);
1903        int err;
1904
1905        preempt_disable();
1906        if (regs->msr & MSR_SPE)
1907                giveup_spe(current);
1908        preempt_enable();
1909
1910        regs->nip -= 4;
1911        err = speround_handler(regs);
1912        if (err == 0) {
1913                regs->nip += 4;         /* skip emulated instruction */
1914                emulate_single_step(regs);
1915                return;
1916        }
1917
1918        if (err == -EFAULT) {
1919                /* got an error reading the instruction */
1920                _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1921        } else if (err == -EINVAL) {
1922                /* didn't recognize the instruction */
1923                printk(KERN_ERR "unrecognized spe instruction "
1924                       "in %s at %lx\n", current->comm, regs->nip);
1925        } else {
1926                _exception(SIGFPE, regs, 0, regs->nip);
1927                return;
1928        }
1929}
1930#endif
1931
1932/*
1933 * We enter here if we get an unrecoverable exception, that is, one
1934 * that happened at a point where the RI (recoverable interrupt) bit
1935 * in the MSR is 0.  This indicates that SRR0/1 are live, and that
1936 * we therefore lost state by taking this exception.
1937 */
1938void unrecoverable_exception(struct pt_regs *regs)
1939{
1940        printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
1941               regs->trap, regs->nip);
1942        die("Unrecoverable exception", regs, SIGABRT);
1943}
1944
1945#if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x)
1946/*
1947 * Default handler for a Watchdog exception,
1948 * spins until a reboot occurs
1949 */
1950void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
1951{
1952        /* Generic WatchdogHandler, implement your own */
1953        mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
1954        return;
1955}
1956
1957void WatchdogException(struct pt_regs *regs)
1958{
1959        printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
1960        WatchdogHandler(regs);
1961}
1962#endif
1963
1964/*
1965 * We enter here if we discover during exception entry that we are
1966 * running in supervisor mode with a userspace value in the stack pointer.
1967 */
1968void kernel_bad_stack(struct pt_regs *regs)
1969{
1970        printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
1971               regs->gpr[1], regs->nip);
1972        die("Bad kernel stack pointer", regs, SIGABRT);
1973}
1974
1975void __init trap_init(void)
1976{
1977}
1978
1979
1980#ifdef CONFIG_PPC_EMULATED_STATS
1981
1982#define WARN_EMULATED_SETUP(type)       .type = { .name = #type }
1983
1984struct ppc_emulated ppc_emulated = {
1985#ifdef CONFIG_ALTIVEC
1986        WARN_EMULATED_SETUP(altivec),
1987#endif
1988        WARN_EMULATED_SETUP(dcba),
1989        WARN_EMULATED_SETUP(dcbz),
1990        WARN_EMULATED_SETUP(fp_pair),
1991        WARN_EMULATED_SETUP(isel),
1992        WARN_EMULATED_SETUP(mcrxr),
1993        WARN_EMULATED_SETUP(mfpvr),
1994        WARN_EMULATED_SETUP(multiple),
1995        WARN_EMULATED_SETUP(popcntb),
1996        WARN_EMULATED_SETUP(spe),
1997        WARN_EMULATED_SETUP(string),
1998        WARN_EMULATED_SETUP(sync),
1999        WARN_EMULATED_SETUP(unaligned),
2000#ifdef CONFIG_MATH_EMULATION
2001        WARN_EMULATED_SETUP(math),
2002#elif defined(CONFIG_8XX_MINIMAL_FPEMU)
2003        WARN_EMULATED_SETUP(8xx),
2004#endif
2005#ifdef CONFIG_VSX
2006        WARN_EMULATED_SETUP(vsx),
2007#endif
2008#ifdef CONFIG_PPC64
2009        WARN_EMULATED_SETUP(mfdscr),
2010        WARN_EMULATED_SETUP(mtdscr),
2011        WARN_EMULATED_SETUP(lq_stq),
2012#endif
2013};
2014
2015u32 ppc_warn_emulated;
2016
2017void ppc_warn_emulated_print(const char *type)
2018{
2019        pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm,
2020                            type);
2021}
2022
2023static int __init ppc_warn_emulated_init(void)
2024{
2025        struct dentry *dir, *d;
2026        unsigned int i;
2027        struct ppc_emulated_entry *entries = (void *)&ppc_emulated;
2028
2029        if (!powerpc_debugfs_root)
2030                return -ENODEV;
2031
2032        dir = debugfs_create_dir("emulated_instructions",
2033                                 powerpc_debugfs_root);
2034        if (!dir)
2035                return -ENOMEM;
2036
2037        d = debugfs_create_u32("do_warn", S_IRUGO | S_IWUSR, dir,
2038                               &ppc_warn_emulated);
2039        if (!d)
2040                goto fail;
2041
2042        for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) {
2043                d = debugfs_create_u32(entries[i].name, S_IRUGO | S_IWUSR, dir,
2044                                       (u32 *)&entries[i].val.counter);
2045                if (!d)
2046                        goto fail;
2047        }
2048
2049        return 0;
2050
2051fail:
2052        debugfs_remove_recursive(dir);
2053        return -ENOMEM;
2054}
2055
2056device_initcall(ppc_warn_emulated_init);
2057
2058#endif /* CONFIG_PPC_EMULATED_STATS */
2059