linux/arch/powerpc/kernel/traps.c
<<
>>
Prefs
   1/*
   2 *  Copyright (C) 1995-1996  Gary Thomas (gdt@linuxppc.org)
   3 *  Copyright 2007-2010 Freescale Semiconductor, Inc.
   4 *
   5 *  This program is free software; you can redistribute it and/or
   6 *  modify it under the terms of the GNU General Public License
   7 *  as published by the Free Software Foundation; either version
   8 *  2 of the License, or (at your option) any later version.
   9 *
  10 *  Modified by Cort Dougan (cort@cs.nmt.edu)
  11 *  and Paul Mackerras (paulus@samba.org)
  12 */
  13
  14/*
  15 * This file handles the architecture-dependent parts of hardware exceptions
  16 */
  17
  18#include <linux/errno.h>
  19#include <linux/sched.h>
  20#include <linux/kernel.h>
  21#include <linux/mm.h>
  22#include <linux/stddef.h>
  23#include <linux/unistd.h>
  24#include <linux/ptrace.h>
  25#include <linux/user.h>
  26#include <linux/interrupt.h>
  27#include <linux/init.h>
  28#include <linux/module.h>
  29#include <linux/prctl.h>
  30#include <linux/delay.h>
  31#include <linux/kprobes.h>
  32#include <linux/kexec.h>
  33#include <linux/backlight.h>
  34#include <linux/bug.h>
  35#include <linux/kdebug.h>
  36#include <linux/debugfs.h>
  37#include <linux/ratelimit.h>
  38#include <linux/context_tracking.h>
  39
  40#include <asm/emulated_ops.h>
  41#include <asm/pgtable.h>
  42#include <asm/uaccess.h>
  43#include <asm/io.h>
  44#include <asm/machdep.h>
  45#include <asm/rtas.h>
  46#include <asm/pmc.h>
  47#ifdef CONFIG_PPC32
  48#include <asm/reg.h>
  49#endif
  50#ifdef CONFIG_PMAC_BACKLIGHT
  51#include <asm/backlight.h>
  52#endif
  53#ifdef CONFIG_PPC64
  54#include <asm/firmware.h>
  55#include <asm/processor.h>
  56#include <asm/tm.h>
  57#endif
  58#include <asm/kexec.h>
  59#include <asm/ppc-opcode.h>
  60#include <asm/rio.h>
  61#include <asm/fadump.h>
  62#include <asm/switch_to.h>
  63#include <asm/tm.h>
  64#include <asm/debug.h>
  65
  66#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
  67int (*__debugger)(struct pt_regs *regs) __read_mostly;
  68int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly;
  69int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly;
  70int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly;
  71int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly;
  72int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly;
  73int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly;
  74
  75EXPORT_SYMBOL(__debugger);
  76EXPORT_SYMBOL(__debugger_ipi);
  77EXPORT_SYMBOL(__debugger_bpt);
  78EXPORT_SYMBOL(__debugger_sstep);
  79EXPORT_SYMBOL(__debugger_iabr_match);
  80EXPORT_SYMBOL(__debugger_break_match);
  81EXPORT_SYMBOL(__debugger_fault_handler);
  82#endif
  83
  84/* Transactional Memory trap debug */
  85#ifdef TM_DEBUG_SW
  86#define TM_DEBUG(x...) printk(KERN_INFO x)
  87#else
  88#define TM_DEBUG(x...) do { } while(0)
  89#endif
  90
  91/*
  92 * Trap & Exception support
  93 */
  94
  95#ifdef CONFIG_PMAC_BACKLIGHT
  96static void pmac_backlight_unblank(void)
  97{
  98        mutex_lock(&pmac_backlight_mutex);
  99        if (pmac_backlight) {
 100                struct backlight_properties *props;
 101
 102                props = &pmac_backlight->props;
 103                props->brightness = props->max_brightness;
 104                props->power = FB_BLANK_UNBLANK;
 105                backlight_update_status(pmac_backlight);
 106        }
 107        mutex_unlock(&pmac_backlight_mutex);
 108}
 109#else
 110static inline void pmac_backlight_unblank(void) { }
 111#endif
 112
 113static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
 114static int die_owner = -1;
 115static unsigned int die_nest_count;
 116static int die_counter;
 117
 118static unsigned __kprobes long oops_begin(struct pt_regs *regs)
 119{
 120        int cpu;
 121        unsigned long flags;
 122
 123        if (debugger(regs))
 124                return 1;
 125
 126        oops_enter();
 127
 128        /* racy, but better than risking deadlock. */
 129        raw_local_irq_save(flags);
 130        cpu = smp_processor_id();
 131        if (!arch_spin_trylock(&die_lock)) {
 132                if (cpu == die_owner)
 133                        /* nested oops. should stop eventually */;
 134                else
 135                        arch_spin_lock(&die_lock);
 136        }
 137        die_nest_count++;
 138        die_owner = cpu;
 139        console_verbose();
 140        bust_spinlocks(1);
 141        if (machine_is(powermac))
 142                pmac_backlight_unblank();
 143        return flags;
 144}
 145
 146static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
 147                               int signr)
 148{
 149        bust_spinlocks(0);
 150        die_owner = -1;
 151        add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
 152        die_nest_count--;
 153        oops_exit();
 154        printk("\n");
 155        if (!die_nest_count)
 156                /* Nest count reaches zero, release the lock. */
 157                arch_spin_unlock(&die_lock);
 158        raw_local_irq_restore(flags);
 159
 160        crash_fadump(regs, "die oops");
 161
 162        /*
 163         * A system reset (0x100) is a request to dump, so we always send
 164         * it through the crashdump code.
 165         */
 166        if (kexec_should_crash(current) || (TRAP(regs) == 0x100)) {
 167                crash_kexec(regs);
 168
 169                /*
 170                 * We aren't the primary crash CPU. We need to send it
 171                 * to a holding pattern to avoid it ending up in the panic
 172                 * code.
 173                 */
 174                crash_kexec_secondary(regs);
 175        }
 176
 177        if (!signr)
 178                return;
 179
 180        /*
 181         * While our oops output is serialised by a spinlock, output
 182         * from panic() called below can race and corrupt it. If we
 183         * know we are going to panic, delay for 1 second so we have a
 184         * chance to get clean backtraces from all CPUs that are oopsing.
 185         */
 186        if (in_interrupt() || panic_on_oops || !current->pid ||
 187            is_global_init(current)) {
 188                mdelay(MSEC_PER_SEC);
 189        }
 190
 191        if (in_interrupt())
 192                panic("Fatal exception in interrupt");
 193        if (panic_on_oops)
 194                panic("Fatal exception");
 195        do_exit(signr);
 196}
 197
 198static int __kprobes __die(const char *str, struct pt_regs *regs, long err)
 199{
 200        printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
 201#ifdef CONFIG_PREEMPT
 202        printk("PREEMPT ");
 203#endif
 204#ifdef CONFIG_SMP
 205        printk("SMP NR_CPUS=%d ", NR_CPUS);
 206#endif
 207#ifdef CONFIG_DEBUG_PAGEALLOC
 208        printk("DEBUG_PAGEALLOC ");
 209#endif
 210#ifdef CONFIG_NUMA
 211        printk("NUMA ");
 212#endif
 213        printk("%s\n", ppc_md.name ? ppc_md.name : "");
 214
 215        if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP)
 216                return 1;
 217
 218        print_modules();
 219        show_regs(regs);
 220
 221        return 0;
 222}
 223
 224void die(const char *str, struct pt_regs *regs, long err)
 225{
 226        unsigned long flags = oops_begin(regs);
 227
 228        if (__die(str, regs, err))
 229                err = 0;
 230        oops_end(flags, regs, err);
 231}
 232
 233void user_single_step_siginfo(struct task_struct *tsk,
 234                                struct pt_regs *regs, siginfo_t *info)
 235{
 236        memset(info, 0, sizeof(*info));
 237        info->si_signo = SIGTRAP;
 238        info->si_code = TRAP_TRACE;
 239        info->si_addr = (void __user *)regs->nip;
 240}
 241
 242void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
 243{
 244        siginfo_t info;
 245        const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \
 246                        "at %08lx nip %08lx lr %08lx code %x\n";
 247        const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \
 248                        "at %016lx nip %016lx lr %016lx code %x\n";
 249
 250        if (!user_mode(regs)) {
 251                die("Exception in kernel mode", regs, signr);
 252                return;
 253        }
 254
 255        if (show_unhandled_signals && unhandled_signal(current, signr)) {
 256                printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
 257                                   current->comm, current->pid, signr,
 258                                   addr, regs->nip, regs->link, code);
 259        }
 260
 261        if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs))
 262                local_irq_enable();
 263
 264        current->thread.trap_nr = code;
 265        memset(&info, 0, sizeof(info));
 266        info.si_signo = signr;
 267        info.si_code = code;
 268        info.si_addr = (void __user *) addr;
 269        force_sig_info(signr, &info, current);
 270}
 271
 272#ifdef CONFIG_PPC64
 273void system_reset_exception(struct pt_regs *regs)
 274{
 275        /* See if any machine dependent calls */
 276        if (ppc_md.system_reset_exception) {
 277                if (ppc_md.system_reset_exception(regs))
 278                        return;
 279        }
 280
 281        die("System Reset", regs, SIGABRT);
 282
 283        /* Must die if the interrupt is not recoverable */
 284        if (!(regs->msr & MSR_RI))
 285                panic("Unrecoverable System Reset");
 286
 287        /* What should we do here? We could issue a shutdown or hard reset. */
 288}
 289#endif
 290
 291/*
 292 * I/O accesses can cause machine checks on powermacs.
 293 * Check if the NIP corresponds to the address of a sync
 294 * instruction for which there is an entry in the exception
 295 * table.
 296 * Note that the 601 only takes a machine check on TEA
 297 * (transfer error ack) signal assertion, and does not
 298 * set any of the top 16 bits of SRR1.
 299 *  -- paulus.
 300 */
 301static inline int check_io_access(struct pt_regs *regs)
 302{
 303#ifdef CONFIG_PPC32
 304        unsigned long msr = regs->msr;
 305        const struct exception_table_entry *entry;
 306        unsigned int *nip = (unsigned int *)regs->nip;
 307
 308        if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
 309            && (entry = search_exception_tables(regs->nip)) != NULL) {
 310                /*
 311                 * Check that it's a sync instruction, or somewhere
 312                 * in the twi; isync; nop sequence that inb/inw/inl uses.
 313                 * As the address is in the exception table
 314                 * we should be able to read the instr there.
 315                 * For the debug message, we look at the preceding
 316                 * load or store.
 317                 */
 318                if (*nip == 0x60000000)         /* nop */
 319                        nip -= 2;
 320                else if (*nip == 0x4c00012c)    /* isync */
 321                        --nip;
 322                if (*nip == 0x7c0004ac || (*nip >> 26) == 3) {
 323                        /* sync or twi */
 324                        unsigned int rb;
 325
 326                        --nip;
 327                        rb = (*nip >> 11) & 0x1f;
 328                        printk(KERN_DEBUG "%s bad port %lx at %p\n",
 329                               (*nip & 0x100)? "OUT to": "IN from",
 330                               regs->gpr[rb] - _IO_BASE, nip);
 331                        regs->msr |= MSR_RI;
 332                        regs->nip = entry->fixup;
 333                        return 1;
 334                }
 335        }
 336#endif /* CONFIG_PPC32 */
 337        return 0;
 338}
 339
 340#ifdef CONFIG_PPC_ADV_DEBUG_REGS
 341/* On 4xx, the reason for the machine check or program exception
 342   is in the ESR. */
 343#define get_reason(regs)        ((regs)->dsisr)
 344#ifndef CONFIG_FSL_BOOKE
 345#define get_mc_reason(regs)     ((regs)->dsisr)
 346#else
 347#define get_mc_reason(regs)     (mfspr(SPRN_MCSR))
 348#endif
 349#define REASON_FP               ESR_FP
 350#define REASON_ILLEGAL          (ESR_PIL | ESR_PUO)
 351#define REASON_PRIVILEGED       ESR_PPR
 352#define REASON_TRAP             ESR_PTR
 353
 354/* single-step stuff */
 355#define single_stepping(regs)   (current->thread.dbcr0 & DBCR0_IC)
 356#define clear_single_step(regs) (current->thread.dbcr0 &= ~DBCR0_IC)
 357
 358#else
 359/* On non-4xx, the reason for the machine check or program
 360   exception is in the MSR. */
 361#define get_reason(regs)        ((regs)->msr)
 362#define get_mc_reason(regs)     ((regs)->msr)
 363#define REASON_TM               0x200000
 364#define REASON_FP               0x100000
 365#define REASON_ILLEGAL          0x80000
 366#define REASON_PRIVILEGED       0x40000
 367#define REASON_TRAP             0x20000
 368
 369#define single_stepping(regs)   ((regs)->msr & MSR_SE)
 370#define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
 371#endif
 372
 373#if defined(CONFIG_4xx)
 374int machine_check_4xx(struct pt_regs *regs)
 375{
 376        unsigned long reason = get_mc_reason(regs);
 377
 378        if (reason & ESR_IMCP) {
 379                printk("Instruction");
 380                mtspr(SPRN_ESR, reason & ~ESR_IMCP);
 381        } else
 382                printk("Data");
 383        printk(" machine check in kernel mode.\n");
 384
 385        return 0;
 386}
 387
 388int machine_check_440A(struct pt_regs *regs)
 389{
 390        unsigned long reason = get_mc_reason(regs);
 391
 392        printk("Machine check in kernel mode.\n");
 393        if (reason & ESR_IMCP){
 394                printk("Instruction Synchronous Machine Check exception\n");
 395                mtspr(SPRN_ESR, reason & ~ESR_IMCP);
 396        }
 397        else {
 398                u32 mcsr = mfspr(SPRN_MCSR);
 399                if (mcsr & MCSR_IB)
 400                        printk("Instruction Read PLB Error\n");
 401                if (mcsr & MCSR_DRB)
 402                        printk("Data Read PLB Error\n");
 403                if (mcsr & MCSR_DWB)
 404                        printk("Data Write PLB Error\n");
 405                if (mcsr & MCSR_TLBP)
 406                        printk("TLB Parity Error\n");
 407                if (mcsr & MCSR_ICP){
 408                        flush_instruction_cache();
 409                        printk("I-Cache Parity Error\n");
 410                }
 411                if (mcsr & MCSR_DCSP)
 412                        printk("D-Cache Search Parity Error\n");
 413                if (mcsr & MCSR_DCFP)
 414                        printk("D-Cache Flush Parity Error\n");
 415                if (mcsr & MCSR_IMPE)
 416                        printk("Machine Check exception is imprecise\n");
 417
 418                /* Clear MCSR */
 419                mtspr(SPRN_MCSR, mcsr);
 420        }
 421        return 0;
 422}
 423
 424int machine_check_47x(struct pt_regs *regs)
 425{
 426        unsigned long reason = get_mc_reason(regs);
 427        u32 mcsr;
 428
 429        printk(KERN_ERR "Machine check in kernel mode.\n");
 430        if (reason & ESR_IMCP) {
 431                printk(KERN_ERR
 432                       "Instruction Synchronous Machine Check exception\n");
 433                mtspr(SPRN_ESR, reason & ~ESR_IMCP);
 434                return 0;
 435        }
 436        mcsr = mfspr(SPRN_MCSR);
 437        if (mcsr & MCSR_IB)
 438                printk(KERN_ERR "Instruction Read PLB Error\n");
 439        if (mcsr & MCSR_DRB)
 440                printk(KERN_ERR "Data Read PLB Error\n");
 441        if (mcsr & MCSR_DWB)
 442                printk(KERN_ERR "Data Write PLB Error\n");
 443        if (mcsr & MCSR_TLBP)
 444                printk(KERN_ERR "TLB Parity Error\n");
 445        if (mcsr & MCSR_ICP) {
 446                flush_instruction_cache();
 447                printk(KERN_ERR "I-Cache Parity Error\n");
 448        }
 449        if (mcsr & MCSR_DCSP)
 450                printk(KERN_ERR "D-Cache Search Parity Error\n");
 451        if (mcsr & PPC47x_MCSR_GPR)
 452                printk(KERN_ERR "GPR Parity Error\n");
 453        if (mcsr & PPC47x_MCSR_FPR)
 454                printk(KERN_ERR "FPR Parity Error\n");
 455        if (mcsr & PPC47x_MCSR_IPR)
 456                printk(KERN_ERR "Machine Check exception is imprecise\n");
 457
 458        /* Clear MCSR */
 459        mtspr(SPRN_MCSR, mcsr);
 460
 461        return 0;
 462}
 463#elif defined(CONFIG_E500)
 464int machine_check_e500mc(struct pt_regs *regs)
 465{
 466        unsigned long mcsr = mfspr(SPRN_MCSR);
 467        unsigned long reason = mcsr;
 468        int recoverable = 1;
 469
 470        if (reason & MCSR_LD) {
 471                recoverable = fsl_rio_mcheck_exception(regs);
 472                if (recoverable == 1)
 473                        goto silent_out;
 474        }
 475
 476        printk("Machine check in kernel mode.\n");
 477        printk("Caused by (from MCSR=%lx): ", reason);
 478
 479        if (reason & MCSR_MCP)
 480                printk("Machine Check Signal\n");
 481
 482        if (reason & MCSR_ICPERR) {
 483                printk("Instruction Cache Parity Error\n");
 484
 485                /*
 486                 * This is recoverable by invalidating the i-cache.
 487                 */
 488                mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI);
 489                while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI)
 490                        ;
 491
 492                /*
 493                 * This will generally be accompanied by an instruction
 494                 * fetch error report -- only treat MCSR_IF as fatal
 495                 * if it wasn't due to an L1 parity error.
 496                 */
 497                reason &= ~MCSR_IF;
 498        }
 499
 500        if (reason & MCSR_DCPERR_MC) {
 501                printk("Data Cache Parity Error\n");
 502
 503                /*
 504                 * In write shadow mode we auto-recover from the error, but it
 505                 * may still get logged and cause a machine check.  We should
 506                 * only treat the non-write shadow case as non-recoverable.
 507                 */
 508                if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS))
 509                        recoverable = 0;
 510        }
 511
 512        if (reason & MCSR_L2MMU_MHIT) {
 513                printk("Hit on multiple TLB entries\n");
 514                recoverable = 0;
 515        }
 516
 517        if (reason & MCSR_NMI)
 518                printk("Non-maskable interrupt\n");
 519
 520        if (reason & MCSR_IF) {
 521                printk("Instruction Fetch Error Report\n");
 522                recoverable = 0;
 523        }
 524
 525        if (reason & MCSR_LD) {
 526                printk("Load Error Report\n");
 527                recoverable = 0;
 528        }
 529
 530        if (reason & MCSR_ST) {
 531                printk("Store Error Report\n");
 532                recoverable = 0;
 533        }
 534
 535        if (reason & MCSR_LDG) {
 536                printk("Guarded Load Error Report\n");
 537                recoverable = 0;
 538        }
 539
 540        if (reason & MCSR_TLBSYNC)
 541                printk("Simultaneous tlbsync operations\n");
 542
 543        if (reason & MCSR_BSL2_ERR) {
 544                printk("Level 2 Cache Error\n");
 545                recoverable = 0;
 546        }
 547
 548        if (reason & MCSR_MAV) {
 549                u64 addr;
 550
 551                addr = mfspr(SPRN_MCAR);
 552                addr |= (u64)mfspr(SPRN_MCARU) << 32;
 553
 554                printk("Machine Check %s Address: %#llx\n",
 555                       reason & MCSR_MEA ? "Effective" : "Physical", addr);
 556        }
 557
 558silent_out:
 559        mtspr(SPRN_MCSR, mcsr);
 560        return mfspr(SPRN_MCSR) == 0 && recoverable;
 561}
 562
 563int machine_check_e500(struct pt_regs *regs)
 564{
 565        unsigned long reason = get_mc_reason(regs);
 566
 567        if (reason & MCSR_BUS_RBERR) {
 568                if (fsl_rio_mcheck_exception(regs))
 569                        return 1;
 570        }
 571
 572        printk("Machine check in kernel mode.\n");
 573        printk("Caused by (from MCSR=%lx): ", reason);
 574
 575        if (reason & MCSR_MCP)
 576                printk("Machine Check Signal\n");
 577        if (reason & MCSR_ICPERR)
 578                printk("Instruction Cache Parity Error\n");
 579        if (reason & MCSR_DCP_PERR)
 580                printk("Data Cache Push Parity Error\n");
 581        if (reason & MCSR_DCPERR)
 582                printk("Data Cache Parity Error\n");
 583        if (reason & MCSR_BUS_IAERR)
 584                printk("Bus - Instruction Address Error\n");
 585        if (reason & MCSR_BUS_RAERR)
 586                printk("Bus - Read Address Error\n");
 587        if (reason & MCSR_BUS_WAERR)
 588                printk("Bus - Write Address Error\n");
 589        if (reason & MCSR_BUS_IBERR)
 590                printk("Bus - Instruction Data Error\n");
 591        if (reason & MCSR_BUS_RBERR)
 592                printk("Bus - Read Data Bus Error\n");
 593        if (reason & MCSR_BUS_WBERR)
 594                printk("Bus - Read Data Bus Error\n");
 595        if (reason & MCSR_BUS_IPERR)
 596                printk("Bus - Instruction Parity Error\n");
 597        if (reason & MCSR_BUS_RPERR)
 598                printk("Bus - Read Parity Error\n");
 599
 600        return 0;
 601}
 602
 603int machine_check_generic(struct pt_regs *regs)
 604{
 605        return 0;
 606}
 607#elif defined(CONFIG_E200)
 608int machine_check_e200(struct pt_regs *regs)
 609{
 610        unsigned long reason = get_mc_reason(regs);
 611
 612        printk("Machine check in kernel mode.\n");
 613        printk("Caused by (from MCSR=%lx): ", reason);
 614
 615        if (reason & MCSR_MCP)
 616                printk("Machine Check Signal\n");
 617        if (reason & MCSR_CP_PERR)
 618                printk("Cache Push Parity Error\n");
 619        if (reason & MCSR_CPERR)
 620                printk("Cache Parity Error\n");
 621        if (reason & MCSR_EXCP_ERR)
 622                printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
 623        if (reason & MCSR_BUS_IRERR)
 624                printk("Bus - Read Bus Error on instruction fetch\n");
 625        if (reason & MCSR_BUS_DRERR)
 626                printk("Bus - Read Bus Error on data load\n");
 627        if (reason & MCSR_BUS_WRERR)
 628                printk("Bus - Write Bus Error on buffered store or cache line push\n");
 629
 630        return 0;
 631}
 632#else
 633int machine_check_generic(struct pt_regs *regs)
 634{
 635        unsigned long reason = get_mc_reason(regs);
 636
 637        printk("Machine check in kernel mode.\n");
 638        printk("Caused by (from SRR1=%lx): ", reason);
 639        switch (reason & 0x601F0000) {
 640        case 0x80000:
 641                printk("Machine check signal\n");
 642                break;
 643        case 0:         /* for 601 */
 644        case 0x40000:
 645        case 0x140000:  /* 7450 MSS error and TEA */
 646                printk("Transfer error ack signal\n");
 647                break;
 648        case 0x20000:
 649                printk("Data parity error signal\n");
 650                break;
 651        case 0x10000:
 652                printk("Address parity error signal\n");
 653                break;
 654        case 0x20000000:
 655                printk("L1 Data Cache error\n");
 656                break;
 657        case 0x40000000:
 658                printk("L1 Instruction Cache error\n");
 659                break;
 660        case 0x00100000:
 661                printk("L2 data cache parity error\n");
 662                break;
 663        default:
 664                printk("Unknown values in msr\n");
 665        }
 666        return 0;
 667}
 668#endif /* everything else */
 669
 670void machine_check_exception(struct pt_regs *regs)
 671{
 672        enum ctx_state prev_state = exception_enter();
 673        int recover = 0;
 674
 675        __get_cpu_var(irq_stat).mce_exceptions++;
 676
 677        /* See if any machine dependent calls. In theory, we would want
 678         * to call the CPU first, and call the ppc_md. one if the CPU
 679         * one returns a positive number. However there is existing code
 680         * that assumes the board gets a first chance, so let's keep it
 681         * that way for now and fix things later. --BenH.
 682         */
 683        if (ppc_md.machine_check_exception)
 684                recover = ppc_md.machine_check_exception(regs);
 685        else if (cur_cpu_spec->machine_check)
 686                recover = cur_cpu_spec->machine_check(regs);
 687
 688        if (recover > 0)
 689                goto bail;
 690
 691#if defined(CONFIG_8xx) && defined(CONFIG_PCI)
 692        /* the qspan pci read routines can cause machine checks -- Cort
 693         *
 694         * yuck !!! that totally needs to go away ! There are better ways
 695         * to deal with that than having a wart in the mcheck handler.
 696         * -- BenH
 697         */
 698        bad_page_fault(regs, regs->dar, SIGBUS);
 699        goto bail;
 700#endif
 701
 702        if (debugger_fault_handler(regs))
 703                goto bail;
 704
 705        if (check_io_access(regs))
 706                goto bail;
 707
 708        die("Machine check", regs, SIGBUS);
 709
 710        /* Must die if the interrupt is not recoverable */
 711        if (!(regs->msr & MSR_RI))
 712                panic("Unrecoverable Machine check");
 713
 714bail:
 715        exception_exit(prev_state);
 716}
 717
 718void SMIException(struct pt_regs *regs)
 719{
 720        die("System Management Interrupt", regs, SIGABRT);
 721}
 722
 723void unknown_exception(struct pt_regs *regs)
 724{
 725        enum ctx_state prev_state = exception_enter();
 726
 727        printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
 728               regs->nip, regs->msr, regs->trap);
 729
 730        _exception(SIGTRAP, regs, 0, 0);
 731
 732        exception_exit(prev_state);
 733}
 734
 735void instruction_breakpoint_exception(struct pt_regs *regs)
 736{
 737        enum ctx_state prev_state = exception_enter();
 738
 739        if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
 740                                        5, SIGTRAP) == NOTIFY_STOP)
 741                goto bail;
 742        if (debugger_iabr_match(regs))
 743                goto bail;
 744        _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
 745
 746bail:
 747        exception_exit(prev_state);
 748}
 749
 750void RunModeException(struct pt_regs *regs)
 751{
 752        _exception(SIGTRAP, regs, 0, 0);
 753}
 754
 755void __kprobes single_step_exception(struct pt_regs *regs)
 756{
 757        enum ctx_state prev_state = exception_enter();
 758
 759        clear_single_step(regs);
 760
 761        if (notify_die(DIE_SSTEP, "single_step", regs, 5,
 762                                        5, SIGTRAP) == NOTIFY_STOP)
 763                goto bail;
 764        if (debugger_sstep(regs))
 765                goto bail;
 766
 767        _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
 768
 769bail:
 770        exception_exit(prev_state);
 771}
 772
 773/*
 774 * After we have successfully emulated an instruction, we have to
 775 * check if the instruction was being single-stepped, and if so,
 776 * pretend we got a single-step exception.  This was pointed out
 777 * by Kumar Gala.  -- paulus
 778 */
 779static void emulate_single_step(struct pt_regs *regs)
 780{
 781        if (single_stepping(regs))
 782                single_step_exception(regs);
 783}
 784
 785static inline int __parse_fpscr(unsigned long fpscr)
 786{
 787        int ret = 0;
 788
 789        /* Invalid operation */
 790        if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
 791                ret = FPE_FLTINV;
 792
 793        /* Overflow */
 794        else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
 795                ret = FPE_FLTOVF;
 796
 797        /* Underflow */
 798        else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
 799                ret = FPE_FLTUND;
 800
 801        /* Divide by zero */
 802        else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
 803                ret = FPE_FLTDIV;
 804
 805        /* Inexact result */
 806        else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
 807                ret = FPE_FLTRES;
 808
 809        return ret;
 810}
 811
 812static void parse_fpe(struct pt_regs *regs)
 813{
 814        int code = 0;
 815
 816        flush_fp_to_thread(current);
 817
 818        code = __parse_fpscr(current->thread.fpscr.val);
 819
 820        _exception(SIGFPE, regs, code, regs->nip);
 821}
 822
 823/*
 824 * Illegal instruction emulation support.  Originally written to
 825 * provide the PVR to user applications using the mfspr rd, PVR.
 826 * Return non-zero if we can't emulate, or -EFAULT if the associated
 827 * memory access caused an access fault.  Return zero on success.
 828 *
 829 * There are a couple of ways to do this, either "decode" the instruction
 830 * or directly match lots of bits.  In this case, matching lots of
 831 * bits is faster and easier.
 832 *
 833 */
 834static int emulate_string_inst(struct pt_regs *regs, u32 instword)
 835{
 836        u8 rT = (instword >> 21) & 0x1f;
 837        u8 rA = (instword >> 16) & 0x1f;
 838        u8 NB_RB = (instword >> 11) & 0x1f;
 839        u32 num_bytes;
 840        unsigned long EA;
 841        int pos = 0;
 842
 843        /* Early out if we are an invalid form of lswx */
 844        if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX)
 845                if ((rT == rA) || (rT == NB_RB))
 846                        return -EINVAL;
 847
 848        EA = (rA == 0) ? 0 : regs->gpr[rA];
 849
 850        switch (instword & PPC_INST_STRING_MASK) {
 851                case PPC_INST_LSWX:
 852                case PPC_INST_STSWX:
 853                        EA += NB_RB;
 854                        num_bytes = regs->xer & 0x7f;
 855                        break;
 856                case PPC_INST_LSWI:
 857                case PPC_INST_STSWI:
 858                        num_bytes = (NB_RB == 0) ? 32 : NB_RB;
 859                        break;
 860                default:
 861                        return -EINVAL;
 862        }
 863
 864        while (num_bytes != 0)
 865        {
 866                u8 val;
 867                u32 shift = 8 * (3 - (pos & 0x3));
 868
 869                switch ((instword & PPC_INST_STRING_MASK)) {
 870                        case PPC_INST_LSWX:
 871                        case PPC_INST_LSWI:
 872                                if (get_user(val, (u8 __user *)EA))
 873                                        return -EFAULT;
 874                                /* first time updating this reg,
 875                                 * zero it out */
 876                                if (pos == 0)
 877                                        regs->gpr[rT] = 0;
 878                                regs->gpr[rT] |= val << shift;
 879                                break;
 880                        case PPC_INST_STSWI:
 881                        case PPC_INST_STSWX:
 882                                val = regs->gpr[rT] >> shift;
 883                                if (put_user(val, (u8 __user *)EA))
 884                                        return -EFAULT;
 885                                break;
 886                }
 887                /* move EA to next address */
 888                EA += 1;
 889                num_bytes--;
 890
 891                /* manage our position within the register */
 892                if (++pos == 4) {
 893                        pos = 0;
 894                        if (++rT == 32)
 895                                rT = 0;
 896                }
 897        }
 898
 899        return 0;
 900}
 901
 902static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword)
 903{
 904        u32 ra,rs;
 905        unsigned long tmp;
 906
 907        ra = (instword >> 16) & 0x1f;
 908        rs = (instword >> 21) & 0x1f;
 909
 910        tmp = regs->gpr[rs];
 911        tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL);
 912        tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL);
 913        tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
 914        regs->gpr[ra] = tmp;
 915
 916        return 0;
 917}
 918
 919static int emulate_isel(struct pt_regs *regs, u32 instword)
 920{
 921        u8 rT = (instword >> 21) & 0x1f;
 922        u8 rA = (instword >> 16) & 0x1f;
 923        u8 rB = (instword >> 11) & 0x1f;
 924        u8 BC = (instword >> 6) & 0x1f;
 925        u8 bit;
 926        unsigned long tmp;
 927
 928        tmp = (rA == 0) ? 0 : regs->gpr[rA];
 929        bit = (regs->ccr >> (31 - BC)) & 0x1;
 930
 931        regs->gpr[rT] = bit ? tmp : regs->gpr[rB];
 932
 933        return 0;
 934}
 935
 936#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 937static inline bool tm_abort_check(struct pt_regs *regs, int cause)
 938{
 939        /* If we're emulating a load/store in an active transaction, we cannot
 940         * emulate it as the kernel operates in transaction suspended context.
 941         * We need to abort the transaction.  This creates a persistent TM
 942         * abort so tell the user what caused it with a new code.
 943         */
 944        if (MSR_TM_TRANSACTIONAL(regs->msr)) {
 945                tm_enable();
 946                tm_abort(cause);
 947                return true;
 948        }
 949        return false;
 950}
 951#else
 952static inline bool tm_abort_check(struct pt_regs *regs, int reason)
 953{
 954        return false;
 955}
 956#endif
 957
 958static int emulate_instruction(struct pt_regs *regs)
 959{
 960        u32 instword;
 961        u32 rd;
 962
 963        if (!user_mode(regs) || (regs->msr & MSR_LE))
 964                return -EINVAL;
 965        CHECK_FULL_REGS(regs);
 966
 967        if (get_user(instword, (u32 __user *)(regs->nip)))
 968                return -EFAULT;
 969
 970        /* Emulate the mfspr rD, PVR. */
 971        if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) {
 972                PPC_WARN_EMULATED(mfpvr, regs);
 973                rd = (instword >> 21) & 0x1f;
 974                regs->gpr[rd] = mfspr(SPRN_PVR);
 975                return 0;
 976        }
 977
 978        /* Emulating the dcba insn is just a no-op.  */
 979        if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) {
 980                PPC_WARN_EMULATED(dcba, regs);
 981                return 0;
 982        }
 983
 984        /* Emulate the mcrxr insn.  */
 985        if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) {
 986                int shift = (instword >> 21) & 0x1c;
 987                unsigned long msk = 0xf0000000UL >> shift;
 988
 989                PPC_WARN_EMULATED(mcrxr, regs);
 990                regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
 991                regs->xer &= ~0xf0000000UL;
 992                return 0;
 993        }
 994
 995        /* Emulate load/store string insn. */
 996        if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
 997                if (tm_abort_check(regs,
 998                                   TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
 999                        return -EINVAL;
1000                PPC_WARN_EMULATED(string, regs);
1001                return emulate_string_inst(regs, instword);
1002        }
1003
1004        /* Emulate the popcntb (Population Count Bytes) instruction. */
1005        if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) {
1006                PPC_WARN_EMULATED(popcntb, regs);
1007                return emulate_popcntb_inst(regs, instword);
1008        }
1009
1010        /* Emulate isel (Integer Select) instruction */
1011        if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) {
1012                PPC_WARN_EMULATED(isel, regs);
1013                return emulate_isel(regs, instword);
1014        }
1015
1016#ifdef CONFIG_PPC64
1017        /* Emulate the mfspr rD, DSCR. */
1018        if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) ==
1019                PPC_INST_MFSPR_DSCR_USER) ||
1020             ((instword & PPC_INST_MFSPR_DSCR_MASK) ==
1021                PPC_INST_MFSPR_DSCR)) &&
1022                        cpu_has_feature(CPU_FTR_DSCR)) {
1023                PPC_WARN_EMULATED(mfdscr, regs);
1024                rd = (instword >> 21) & 0x1f;
1025                regs->gpr[rd] = mfspr(SPRN_DSCR);
1026                return 0;
1027        }
1028        /* Emulate the mtspr DSCR, rD. */
1029        if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) ==
1030                PPC_INST_MTSPR_DSCR_USER) ||
1031             ((instword & PPC_INST_MTSPR_DSCR_MASK) ==
1032                PPC_INST_MTSPR_DSCR)) &&
1033                        cpu_has_feature(CPU_FTR_DSCR)) {
1034                PPC_WARN_EMULATED(mtdscr, regs);
1035                rd = (instword >> 21) & 0x1f;
1036                current->thread.dscr = regs->gpr[rd];
1037                current->thread.dscr_inherit = 1;
1038                mtspr(SPRN_DSCR, current->thread.dscr);
1039                return 0;
1040        }
1041#endif
1042
1043        return -EINVAL;
1044}
1045
1046int is_valid_bugaddr(unsigned long addr)
1047{
1048        return is_kernel_addr(addr);
1049}
1050
1051void __kprobes program_check_exception(struct pt_regs *regs)
1052{
1053        enum ctx_state prev_state = exception_enter();
1054        unsigned int reason = get_reason(regs);
1055        extern int do_mathemu(struct pt_regs *regs);
1056
1057        /* We can now get here via a FP Unavailable exception if the core
1058         * has no FPU, in that case the reason flags will be 0 */
1059
1060        if (reason & REASON_FP) {
1061                /* IEEE FP exception */
1062                parse_fpe(regs);
1063                goto bail;
1064        }
1065        if (reason & REASON_TRAP) {
1066                /* Debugger is first in line to stop recursive faults in
1067                 * rcu_lock, notify_die, or atomic_notifier_call_chain */
1068                if (debugger_bpt(regs))
1069                        goto bail;
1070
1071                /* trap exception */
1072                if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
1073                                == NOTIFY_STOP)
1074                        goto bail;
1075
1076                if (!(regs->msr & MSR_PR) &&  /* not user-mode */
1077                    report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
1078                        regs->nip += 4;
1079                        goto bail;
1080                }
1081                _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
1082                goto bail;
1083        }
1084#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1085        if (reason & REASON_TM) {
1086                /* This is a TM "Bad Thing Exception" program check.
1087                 * This occurs when:
1088                 * -  An rfid/hrfid/mtmsrd attempts to cause an illegal
1089                 *    transition in TM states.
1090                 * -  A trechkpt is attempted when transactional.
1091                 * -  A treclaim is attempted when non transactional.
1092                 * -  A tend is illegally attempted.
1093                 * -  writing a TM SPR when transactional.
1094                 */
1095                if (!user_mode(regs) &&
1096                    report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
1097                        regs->nip += 4;
1098                        goto bail;
1099                }
1100                /* If usermode caused this, it's done something illegal and
1101                 * gets a SIGILL slap on the wrist.  We call it an illegal
1102                 * operand to distinguish from the instruction just being bad
1103                 * (e.g. executing a 'tend' on a CPU without TM!); it's an
1104                 * illegal /placement/ of a valid instruction.
1105                 */
1106                if (user_mode(regs)) {
1107                        _exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
1108                        goto bail;
1109                } else {
1110                        printk(KERN_EMERG "Unexpected TM Bad Thing exception "
1111                               "at %lx (msr 0x%x)\n", regs->nip, reason);
1112                        die("Unrecoverable exception", regs, SIGABRT);
1113                }
1114        }
1115#endif
1116
1117        /* We restore the interrupt state now */
1118        if (!arch_irq_disabled_regs(regs))
1119                local_irq_enable();
1120
1121#ifdef CONFIG_MATH_EMULATION
1122        /* (reason & REASON_ILLEGAL) would be the obvious thing here,
1123         * but there seems to be a hardware bug on the 405GP (RevD)
1124         * that means ESR is sometimes set incorrectly - either to
1125         * ESR_DST (!?) or 0.  In the process of chasing this with the
1126         * hardware people - not sure if it can happen on any illegal
1127         * instruction or only on FP instructions, whether there is a
1128         * pattern to occurrences etc. -dgibson 31/Mar/2003 */
1129        switch (do_mathemu(regs)) {
1130        case 0:
1131                emulate_single_step(regs);
1132                goto bail;
1133        case 1: {
1134                        int code = 0;
1135                        code = __parse_fpscr(current->thread.fpscr.val);
1136                        _exception(SIGFPE, regs, code, regs->nip);
1137                        goto bail;
1138                }
1139        case -EFAULT:
1140                _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1141                goto bail;
1142        }
1143        /* fall through on any other errors */
1144#endif /* CONFIG_MATH_EMULATION */
1145
1146        /* Try to emulate it if we should. */
1147        if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
1148                switch (emulate_instruction(regs)) {
1149                case 0:
1150                        regs->nip += 4;
1151                        emulate_single_step(regs);
1152                        goto bail;
1153                case -EFAULT:
1154                        _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1155                        goto bail;
1156                }
1157        }
1158
1159        if (reason & REASON_PRIVILEGED)
1160                _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1161        else
1162                _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1163
1164bail:
1165        exception_exit(prev_state);
1166}
1167
1168/*
1169 * This occurs when running in hypervisor mode on POWER6 or later
1170 * and an illegal instruction is encountered.
1171 */
1172void __kprobes emulation_assist_interrupt(struct pt_regs *regs)
1173{
1174        regs->msr |= REASON_ILLEGAL;
1175        program_check_exception(regs);
1176}
1177
1178void alignment_exception(struct pt_regs *regs)
1179{
1180        enum ctx_state prev_state = exception_enter();
1181        int sig, code, fixed = 0;
1182
1183        /* We restore the interrupt state now */
1184        if (!arch_irq_disabled_regs(regs))
1185                local_irq_enable();
1186
1187        if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
1188                goto bail;
1189
1190        /* we don't implement logging of alignment exceptions */
1191        if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
1192                fixed = fix_alignment(regs);
1193
1194        if (fixed == 1) {
1195                regs->nip += 4; /* skip over emulated instruction */
1196                emulate_single_step(regs);
1197                goto bail;
1198        }
1199
1200        /* Operand address was bad */
1201        if (fixed == -EFAULT) {
1202                sig = SIGSEGV;
1203                code = SEGV_ACCERR;
1204        } else {
1205                sig = SIGBUS;
1206                code = BUS_ADRALN;
1207        }
1208        if (user_mode(regs))
1209                _exception(sig, regs, code, regs->dar);
1210        else
1211                bad_page_fault(regs, regs->dar, sig);
1212
1213bail:
1214        exception_exit(prev_state);
1215}
1216
1217void StackOverflow(struct pt_regs *regs)
1218{
1219        printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
1220               current, regs->gpr[1]);
1221        debugger(regs);
1222        show_regs(regs);
1223        panic("kernel stack overflow");
1224}
1225
1226void nonrecoverable_exception(struct pt_regs *regs)
1227{
1228        printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
1229               regs->nip, regs->msr);
1230        debugger(regs);
1231        die("nonrecoverable exception", regs, SIGKILL);
1232}
1233
1234void trace_syscall(struct pt_regs *regs)
1235{
1236        printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld    %s\n",
1237               current, task_pid_nr(current), regs->nip, regs->link, regs->gpr[0],
1238               regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted());
1239}
1240
1241void kernel_fp_unavailable_exception(struct pt_regs *regs)
1242{
1243        enum ctx_state prev_state = exception_enter();
1244
1245        printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
1246                          "%lx at %lx\n", regs->trap, regs->nip);
1247        die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
1248
1249        exception_exit(prev_state);
1250}
1251
1252void altivec_unavailable_exception(struct pt_regs *regs)
1253{
1254        enum ctx_state prev_state = exception_enter();
1255
1256        if (user_mode(regs)) {
1257                /* A user program has executed an altivec instruction,
1258                   but this kernel doesn't support altivec. */
1259                _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1260                goto bail;
1261        }
1262
1263        printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
1264                        "%lx at %lx\n", regs->trap, regs->nip);
1265        die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
1266
1267bail:
1268        exception_exit(prev_state);
1269}
1270
1271void vsx_unavailable_exception(struct pt_regs *regs)
1272{
1273        if (user_mode(regs)) {
1274                /* A user program has executed an vsx instruction,
1275                   but this kernel doesn't support vsx. */
1276                _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1277                return;
1278        }
1279
1280        printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception "
1281                        "%lx at %lx\n", regs->trap, regs->nip);
1282        die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
1283}
1284
1285void tm_unavailable_exception(struct pt_regs *regs)
1286{
1287        /* We restore the interrupt state now */
1288        if (!arch_irq_disabled_regs(regs))
1289                local_irq_enable();
1290
1291        /* Currently we never expect a TMU exception.  Catch
1292         * this and kill the process!
1293         */
1294        printk(KERN_EMERG "Unexpected TM unavailable exception at %lx "
1295               "(msr %lx)\n",
1296               regs->nip, regs->msr);
1297
1298        if (user_mode(regs)) {
1299                _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1300                return;
1301        }
1302
1303        die("Unexpected TM unavailable exception", regs, SIGABRT);
1304}
1305
1306#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1307
1308extern void do_load_up_fpu(struct pt_regs *regs);
1309
1310void fp_unavailable_tm(struct pt_regs *regs)
1311{
1312        /* Note:  This does not handle any kind of FP laziness. */
1313
1314        TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n",
1315                 regs->nip, regs->msr);
1316        tm_enable();
1317
1318        /* We can only have got here if the task started using FP after
1319         * beginning the transaction.  So, the transactional regs are just a
1320         * copy of the checkpointed ones.  But, we still need to recheckpoint
1321         * as we're enabling FP for the process; it will return, abort the
1322         * transaction, and probably retry but now with FP enabled.  So the
1323         * checkpointed FP registers need to be loaded.
1324         */
1325        tm_reclaim(&current->thread, current->thread.regs->msr,
1326                   TM_CAUSE_FAC_UNAV);
1327        /* Reclaim didn't save out any FPRs to transact_fprs. */
1328
1329        /* Enable FP for the task: */
1330        regs->msr |= (MSR_FP | current->thread.fpexc_mode);
1331
1332        /* This loads and recheckpoints the FP registers from
1333         * thread.fpr[].  They will remain in registers after the
1334         * checkpoint so we don't need to reload them after.
1335         */
1336        tm_recheckpoint(&current->thread, regs->msr);
1337}
1338
1339#ifdef CONFIG_ALTIVEC
1340extern void do_load_up_altivec(struct pt_regs *regs);
1341
1342void altivec_unavailable_tm(struct pt_regs *regs)
1343{
1344        /* See the comments in fp_unavailable_tm().  This function operates
1345         * the same way.
1346         */
1347
1348        TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx,"
1349                 "MSR=%lx\n",
1350                 regs->nip, regs->msr);
1351        tm_enable();
1352        tm_reclaim(&current->thread, current->thread.regs->msr,
1353                   TM_CAUSE_FAC_UNAV);
1354        regs->msr |= MSR_VEC;
1355        tm_recheckpoint(&current->thread, regs->msr);
1356        current->thread.used_vr = 1;
1357}
1358#endif
1359
1360#ifdef CONFIG_VSX
1361void vsx_unavailable_tm(struct pt_regs *regs)
1362{
1363        /* See the comments in fp_unavailable_tm().  This works similarly,
1364         * though we're loading both FP and VEC registers in here.
1365         *
1366         * If FP isn't in use, load FP regs.  If VEC isn't in use, load VEC
1367         * regs.  Either way, set MSR_VSX.
1368         */
1369
1370        TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx,"
1371                 "MSR=%lx\n",
1372                 regs->nip, regs->msr);
1373
1374        tm_enable();
1375        /* This reclaims FP and/or VR regs if they're already enabled */
1376        tm_reclaim(&current->thread, current->thread.regs->msr,
1377                   TM_CAUSE_FAC_UNAV);
1378
1379        regs->msr |= MSR_VEC | MSR_FP | current->thread.fpexc_mode |
1380                MSR_VSX;
1381        /* This loads & recheckpoints FP and VRs. */
1382        tm_recheckpoint(&current->thread, regs->msr);
1383        current->thread.used_vsr = 1;
1384}
1385#endif
1386#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1387
1388void performance_monitor_exception(struct pt_regs *regs)
1389{
1390        __get_cpu_var(irq_stat).pmu_irqs++;
1391
1392        perf_irq(regs);
1393}
1394
1395#ifdef CONFIG_8xx
1396void SoftwareEmulation(struct pt_regs *regs)
1397{
1398        extern int do_mathemu(struct pt_regs *);
1399        extern int Soft_emulate_8xx(struct pt_regs *);
1400#if defined(CONFIG_MATH_EMULATION) || defined(CONFIG_8XX_MINIMAL_FPEMU)
1401        int errcode;
1402#endif
1403
1404        CHECK_FULL_REGS(regs);
1405
1406        if (!user_mode(regs)) {
1407                debugger(regs);
1408                die("Kernel Mode Software FPU Emulation", regs, SIGFPE);
1409        }
1410
1411#ifdef CONFIG_MATH_EMULATION
1412        errcode = do_mathemu(regs);
1413        if (errcode >= 0)
1414                PPC_WARN_EMULATED(math, regs);
1415
1416        switch (errcode) {
1417        case 0:
1418                emulate_single_step(regs);
1419                return;
1420        case 1: {
1421                        int code = 0;
1422                        code = __parse_fpscr(current->thread.fpscr.val);
1423                        _exception(SIGFPE, regs, code, regs->nip);
1424                        return;
1425                }
1426        case -EFAULT:
1427                _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1428                return;
1429        default:
1430                _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1431                return;
1432        }
1433
1434#elif defined(CONFIG_8XX_MINIMAL_FPEMU)
1435        errcode = Soft_emulate_8xx(regs);
1436        if (errcode >= 0)
1437                PPC_WARN_EMULATED(8xx, regs);
1438
1439        switch (errcode) {
1440        case 0:
1441                emulate_single_step(regs);
1442                return;
1443        case 1:
1444                _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1445                return;
1446        case -EFAULT:
1447                _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1448                return;
1449        }
1450#else
1451        _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1452#endif
1453}
1454#endif /* CONFIG_8xx */
1455
1456#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1457static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
1458{
1459        int changed = 0;
1460        /*
1461         * Determine the cause of the debug event, clear the
1462         * event flags and send a trap to the handler. Torez
1463         */
1464        if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
1465                dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W);
1466#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1467                current->thread.dbcr2 &= ~DBCR2_DAC12MODE;
1468#endif
1469                do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT,
1470                             5);
1471                changed |= 0x01;
1472        }  else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) {
1473                dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W);
1474                do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, TRAP_HWBKPT,
1475                             6);
1476                changed |= 0x01;
1477        }  else if (debug_status & DBSR_IAC1) {
1478                current->thread.dbcr0 &= ~DBCR0_IAC1;
1479                dbcr_iac_range(current) &= ~DBCR_IAC12MODE;
1480                do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT,
1481                             1);
1482                changed |= 0x01;
1483        }  else if (debug_status & DBSR_IAC2) {
1484                current->thread.dbcr0 &= ~DBCR0_IAC2;
1485                do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT,
1486                             2);
1487                changed |= 0x01;
1488        }  else if (debug_status & DBSR_IAC3) {
1489                current->thread.dbcr0 &= ~DBCR0_IAC3;
1490                dbcr_iac_range(current) &= ~DBCR_IAC34MODE;
1491                do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT,
1492                             3);
1493                changed |= 0x01;
1494        }  else if (debug_status & DBSR_IAC4) {
1495                current->thread.dbcr0 &= ~DBCR0_IAC4;
1496                do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT,
1497                             4);
1498                changed |= 0x01;
1499        }
1500        /*
1501         * At the point this routine was called, the MSR(DE) was turned off.
1502         * Check all other debug flags and see if that bit needs to be turned
1503         * back on or not.
1504         */
1505        if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0, current->thread.dbcr1))
1506                regs->msr |= MSR_DE;
1507        else
1508                /* Make sure the IDM flag is off */
1509                current->thread.dbcr0 &= ~DBCR0_IDM;
1510
1511        if (changed & 0x01)
1512                mtspr(SPRN_DBCR0, current->thread.dbcr0);
1513}
1514
1515void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
1516{
1517        current->thread.dbsr = debug_status;
1518
1519        /* Hack alert: On BookE, Branch Taken stops on the branch itself, while
1520         * on server, it stops on the target of the branch. In order to simulate
1521         * the server behaviour, we thus restart right away with a single step
1522         * instead of stopping here when hitting a BT
1523         */
1524        if (debug_status & DBSR_BT) {
1525                regs->msr &= ~MSR_DE;
1526
1527                /* Disable BT */
1528                mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT);
1529                /* Clear the BT event */
1530                mtspr(SPRN_DBSR, DBSR_BT);
1531
1532                /* Do the single step trick only when coming from userspace */
1533                if (user_mode(regs)) {
1534                        current->thread.dbcr0 &= ~DBCR0_BT;
1535                        current->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC;
1536                        regs->msr |= MSR_DE;
1537                        return;
1538                }
1539
1540                if (notify_die(DIE_SSTEP, "block_step", regs, 5,
1541                               5, SIGTRAP) == NOTIFY_STOP) {
1542                        return;
1543                }
1544                if (debugger_sstep(regs))
1545                        return;
1546        } else if (debug_status & DBSR_IC) {    /* Instruction complete */
1547                regs->msr &= ~MSR_DE;
1548
1549                /* Disable instruction completion */
1550                mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
1551                /* Clear the instruction completion event */
1552                mtspr(SPRN_DBSR, DBSR_IC);
1553
1554                if (notify_die(DIE_SSTEP, "single_step", regs, 5,
1555                               5, SIGTRAP) == NOTIFY_STOP) {
1556                        return;
1557                }
1558
1559                if (debugger_sstep(regs))
1560                        return;
1561
1562                if (user_mode(regs)) {
1563                        current->thread.dbcr0 &= ~DBCR0_IC;
1564                        if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0,
1565                                               current->thread.dbcr1))
1566                                regs->msr |= MSR_DE;
1567                        else
1568                                /* Make sure the IDM bit is off */
1569                                current->thread.dbcr0 &= ~DBCR0_IDM;
1570                }
1571
1572                _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
1573        } else
1574                handle_debug(regs, debug_status);
1575}
1576#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
1577
1578#if !defined(CONFIG_TAU_INT)
1579void TAUException(struct pt_regs *regs)
1580{
1581        printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx    %s\n",
1582               regs->nip, regs->msr, regs->trap, print_tainted());
1583}
1584#endif /* CONFIG_INT_TAU */
1585
1586#ifdef CONFIG_ALTIVEC
1587void altivec_assist_exception(struct pt_regs *regs)
1588{
1589        int err;
1590
1591        if (!user_mode(regs)) {
1592                printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
1593                       " at %lx\n", regs->nip);
1594                die("Kernel VMX/Altivec assist exception", regs, SIGILL);
1595        }
1596
1597        flush_altivec_to_thread(current);
1598
1599        PPC_WARN_EMULATED(altivec, regs);
1600        err = emulate_altivec(regs);
1601        if (err == 0) {
1602                regs->nip += 4;         /* skip emulated instruction */
1603                emulate_single_step(regs);
1604                return;
1605        }
1606
1607        if (err == -EFAULT) {
1608                /* got an error reading the instruction */
1609                _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1610        } else {
1611                /* didn't recognize the instruction */
1612                /* XXX quick hack for now: set the non-Java bit in the VSCR */
1613                printk_ratelimited(KERN_ERR "Unrecognized altivec instruction "
1614                                   "in %s at %lx\n", current->comm, regs->nip);
1615                current->thread.vscr.u[3] |= 0x10000;
1616        }
1617}
1618#endif /* CONFIG_ALTIVEC */
1619
1620#ifdef CONFIG_VSX
1621void vsx_assist_exception(struct pt_regs *regs)
1622{
1623        if (!user_mode(regs)) {
1624                printk(KERN_EMERG "VSX assist exception in kernel mode"
1625                       " at %lx\n", regs->nip);
1626                die("Kernel VSX assist exception", regs, SIGILL);
1627        }
1628
1629        flush_vsx_to_thread(current);
1630        printk(KERN_INFO "VSX assist not supported at %lx\n", regs->nip);
1631        _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1632}
1633#endif /* CONFIG_VSX */
1634
1635#ifdef CONFIG_FSL_BOOKE
1636void CacheLockingException(struct pt_regs *regs, unsigned long address,
1637                           unsigned long error_code)
1638{
1639        /* We treat cache locking instructions from the user
1640         * as priv ops, in the future we could try to do
1641         * something smarter
1642         */
1643        if (error_code & (ESR_DLK|ESR_ILK))
1644                _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1645        return;
1646}
1647#endif /* CONFIG_FSL_BOOKE */
1648
1649#ifdef CONFIG_SPE
1650void SPEFloatingPointException(struct pt_regs *regs)
1651{
1652        extern int do_spe_mathemu(struct pt_regs *regs);
1653        unsigned long spefscr;
1654        int fpexc_mode;
1655        int code = 0;
1656        int err;
1657
1658        flush_spe_to_thread(current);
1659
1660        spefscr = current->thread.spefscr;
1661        fpexc_mode = current->thread.fpexc_mode;
1662
1663        if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
1664                code = FPE_FLTOVF;
1665        }
1666        else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
1667                code = FPE_FLTUND;
1668        }
1669        else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
1670                code = FPE_FLTDIV;
1671        else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
1672                code = FPE_FLTINV;
1673        }
1674        else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
1675                code = FPE_FLTRES;
1676
1677        err = do_spe_mathemu(regs);
1678        if (err == 0) {
1679                regs->nip += 4;         /* skip emulated instruction */
1680                emulate_single_step(regs);
1681                return;
1682        }
1683
1684        if (err == -EFAULT) {
1685                /* got an error reading the instruction */
1686                _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1687        } else if (err == -EINVAL) {
1688                /* didn't recognize the instruction */
1689                printk(KERN_ERR "unrecognized spe instruction "
1690                       "in %s at %lx\n", current->comm, regs->nip);
1691        } else {
1692                _exception(SIGFPE, regs, code, regs->nip);
1693        }
1694
1695        return;
1696}
1697
1698void SPEFloatingPointRoundException(struct pt_regs *regs)
1699{
1700        extern int speround_handler(struct pt_regs *regs);
1701        int err;
1702
1703        preempt_disable();
1704        if (regs->msr & MSR_SPE)
1705                giveup_spe(current);
1706        preempt_enable();
1707
1708        regs->nip -= 4;
1709        err = speround_handler(regs);
1710        if (err == 0) {
1711                regs->nip += 4;         /* skip emulated instruction */
1712                emulate_single_step(regs);
1713                return;
1714        }
1715
1716        if (err == -EFAULT) {
1717                /* got an error reading the instruction */
1718                _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1719        } else if (err == -EINVAL) {
1720                /* didn't recognize the instruction */
1721                printk(KERN_ERR "unrecognized spe instruction "
1722                       "in %s at %lx\n", current->comm, regs->nip);
1723        } else {
1724                _exception(SIGFPE, regs, 0, regs->nip);
1725                return;
1726        }
1727}
1728#endif
1729
1730/*
1731 * We enter here if we get an unrecoverable exception, that is, one
1732 * that happened at a point where the RI (recoverable interrupt) bit
1733 * in the MSR is 0.  This indicates that SRR0/1 are live, and that
1734 * we therefore lost state by taking this exception.
1735 */
1736void unrecoverable_exception(struct pt_regs *regs)
1737{
1738        printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
1739               regs->trap, regs->nip);
1740        die("Unrecoverable exception", regs, SIGABRT);
1741}
1742
1743#if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x)
1744/*
1745 * Default handler for a Watchdog exception,
1746 * spins until a reboot occurs
1747 */
1748void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
1749{
1750        /* Generic WatchdogHandler, implement your own */
1751        mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
1752        return;
1753}
1754
1755void WatchdogException(struct pt_regs *regs)
1756{
1757        printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
1758        WatchdogHandler(regs);
1759}
1760#endif
1761
1762/*
1763 * We enter here if we discover during exception entry that we are
1764 * running in supervisor mode with a userspace value in the stack pointer.
1765 */
1766void kernel_bad_stack(struct pt_regs *regs)
1767{
1768        printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
1769               regs->gpr[1], regs->nip);
1770        die("Bad kernel stack pointer", regs, SIGABRT);
1771}
1772
1773void __init trap_init(void)
1774{
1775}
1776
1777
1778#ifdef CONFIG_PPC_EMULATED_STATS
1779
1780#define WARN_EMULATED_SETUP(type)       .type = { .name = #type }
1781
1782struct ppc_emulated ppc_emulated = {
1783#ifdef CONFIG_ALTIVEC
1784        WARN_EMULATED_SETUP(altivec),
1785#endif
1786        WARN_EMULATED_SETUP(dcba),
1787        WARN_EMULATED_SETUP(dcbz),
1788        WARN_EMULATED_SETUP(fp_pair),
1789        WARN_EMULATED_SETUP(isel),
1790        WARN_EMULATED_SETUP(mcrxr),
1791        WARN_EMULATED_SETUP(mfpvr),
1792        WARN_EMULATED_SETUP(multiple),
1793        WARN_EMULATED_SETUP(popcntb),
1794        WARN_EMULATED_SETUP(spe),
1795        WARN_EMULATED_SETUP(string),
1796        WARN_EMULATED_SETUP(unaligned),
1797#ifdef CONFIG_MATH_EMULATION
1798        WARN_EMULATED_SETUP(math),
1799#elif defined(CONFIG_8XX_MINIMAL_FPEMU)
1800        WARN_EMULATED_SETUP(8xx),
1801#endif
1802#ifdef CONFIG_VSX
1803        WARN_EMULATED_SETUP(vsx),
1804#endif
1805#ifdef CONFIG_PPC64
1806        WARN_EMULATED_SETUP(mfdscr),
1807        WARN_EMULATED_SETUP(mtdscr),
1808#endif
1809};
1810
1811u32 ppc_warn_emulated;
1812
1813void ppc_warn_emulated_print(const char *type)
1814{
1815        pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm,
1816                            type);
1817}
1818
1819static int __init ppc_warn_emulated_init(void)
1820{
1821        struct dentry *dir, *d;
1822        unsigned int i;
1823        struct ppc_emulated_entry *entries = (void *)&ppc_emulated;
1824
1825        if (!powerpc_debugfs_root)
1826                return -ENODEV;
1827
1828        dir = debugfs_create_dir("emulated_instructions",
1829                                 powerpc_debugfs_root);
1830        if (!dir)
1831                return -ENOMEM;
1832
1833        d = debugfs_create_u32("do_warn", S_IRUGO | S_IWUSR, dir,
1834                               &ppc_warn_emulated);
1835        if (!d)
1836                goto fail;
1837
1838        for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) {
1839                d = debugfs_create_u32(entries[i].name, S_IRUGO | S_IWUSR, dir,
1840                                       (u32 *)&entries[i].val.counter);
1841                if (!d)
1842                        goto fail;
1843        }
1844
1845        return 0;
1846
1847fail:
1848        debugfs_remove_recursive(dir);
1849        return -ENOMEM;
1850}
1851
1852device_initcall(ppc_warn_emulated_init);
1853
1854#endif /* CONFIG_PPC_EMULATED_STATS */
1855