linux/arch/powerpc/kernel/traps.c
<<
>>
Prefs
   1/*
   2 *  Copyright (C) 1995-1996  Gary Thomas (gdt@linuxppc.org)
   3 *  Copyright 2007-2010 Freescale Semiconductor, Inc.
   4 *
   5 *  This program is free software; you can redistribute it and/or
   6 *  modify it under the terms of the GNU General Public License
   7 *  as published by the Free Software Foundation; either version
   8 *  2 of the License, or (at your option) any later version.
   9 *
  10 *  Modified by Cort Dougan (cort@cs.nmt.edu)
  11 *  and Paul Mackerras (paulus@samba.org)
  12 */
  13
  14/*
  15 * This file handles the architecture-dependent parts of hardware exceptions
  16 */
  17
  18#include <linux/errno.h>
  19#include <linux/sched.h>
  20#include <linux/kernel.h>
  21#include <linux/mm.h>
  22#include <linux/stddef.h>
  23#include <linux/unistd.h>
  24#include <linux/ptrace.h>
  25#include <linux/user.h>
  26#include <linux/interrupt.h>
  27#include <linux/init.h>
  28#include <linux/extable.h>
  29#include <linux/module.h>       /* print_modules */
  30#include <linux/prctl.h>
  31#include <linux/delay.h>
  32#include <linux/kprobes.h>
  33#include <linux/kexec.h>
  34#include <linux/backlight.h>
  35#include <linux/bug.h>
  36#include <linux/kdebug.h>
  37#include <linux/debugfs.h>
  38#include <linux/ratelimit.h>
  39#include <linux/context_tracking.h>
  40
  41#include <asm/emulated_ops.h>
  42#include <asm/pgtable.h>
  43#include <asm/uaccess.h>
  44#include <asm/io.h>
  45#include <asm/machdep.h>
  46#include <asm/rtas.h>
  47#include <asm/pmc.h>
  48#include <asm/reg.h>
  49#ifdef CONFIG_PMAC_BACKLIGHT
  50#include <asm/backlight.h>
  51#endif
  52#ifdef CONFIG_PPC64
  53#include <asm/firmware.h>
  54#include <asm/processor.h>
  55#include <asm/tm.h>
  56#endif
  57#include <asm/kexec.h>
  58#include <asm/ppc-opcode.h>
  59#include <asm/rio.h>
  60#include <asm/fadump.h>
  61#include <asm/switch_to.h>
  62#include <asm/tm.h>
  63#include <asm/debug.h>
  64#include <asm/asm-prototypes.h>
  65#include <asm/hmi.h>
  66#include <sysdev/fsl_pci.h>
  67
  68#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
  69int (*__debugger)(struct pt_regs *regs) __read_mostly;
  70int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly;
  71int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly;
  72int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly;
  73int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly;
  74int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly;
  75int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly;
  76
  77EXPORT_SYMBOL(__debugger);
  78EXPORT_SYMBOL(__debugger_ipi);
  79EXPORT_SYMBOL(__debugger_bpt);
  80EXPORT_SYMBOL(__debugger_sstep);
  81EXPORT_SYMBOL(__debugger_iabr_match);
  82EXPORT_SYMBOL(__debugger_break_match);
  83EXPORT_SYMBOL(__debugger_fault_handler);
  84#endif
  85
  86/* Transactional Memory trap debug */
  87#ifdef TM_DEBUG_SW
  88#define TM_DEBUG(x...) printk(KERN_INFO x)
  89#else
  90#define TM_DEBUG(x...) do { } while(0)
  91#endif
  92
  93/*
  94 * Trap & Exception support
  95 */
  96
  97#ifdef CONFIG_PMAC_BACKLIGHT
  98static void pmac_backlight_unblank(void)
  99{
 100        mutex_lock(&pmac_backlight_mutex);
 101        if (pmac_backlight) {
 102                struct backlight_properties *props;
 103
 104                props = &pmac_backlight->props;
 105                props->brightness = props->max_brightness;
 106                props->power = FB_BLANK_UNBLANK;
 107                backlight_update_status(pmac_backlight);
 108        }
 109        mutex_unlock(&pmac_backlight_mutex);
 110}
 111#else
 112static inline void pmac_backlight_unblank(void) { }
 113#endif
 114
 115static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
 116static int die_owner = -1;
 117static unsigned int die_nest_count;
 118static int die_counter;
 119
 120static unsigned __kprobes long oops_begin(struct pt_regs *regs)
 121{
 122        int cpu;
 123        unsigned long flags;
 124
 125        if (debugger(regs))
 126                return 1;
 127
 128        oops_enter();
 129
 130        /* racy, but better than risking deadlock. */
 131        raw_local_irq_save(flags);
 132        cpu = smp_processor_id();
 133        if (!arch_spin_trylock(&die_lock)) {
 134                if (cpu == die_owner)
 135                        /* nested oops. should stop eventually */;
 136                else
 137                        arch_spin_lock(&die_lock);
 138        }
 139        die_nest_count++;
 140        die_owner = cpu;
 141        console_verbose();
 142        bust_spinlocks(1);
 143        if (machine_is(powermac))
 144                pmac_backlight_unblank();
 145        return flags;
 146}
 147
 148static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
 149                               int signr)
 150{
 151        bust_spinlocks(0);
 152        die_owner = -1;
 153        add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
 154        die_nest_count--;
 155        oops_exit();
 156        printk("\n");
 157        if (!die_nest_count)
 158                /* Nest count reaches zero, release the lock. */
 159                arch_spin_unlock(&die_lock);
 160        raw_local_irq_restore(flags);
 161
 162        crash_fadump(regs, "die oops");
 163
 164        /*
 165         * A system reset (0x100) is a request to dump, so we always send
 166         * it through the crashdump code.
 167         */
 168        if (kexec_should_crash(current) || (TRAP(regs) == 0x100)) {
 169                crash_kexec(regs);
 170
 171                /*
 172                 * We aren't the primary crash CPU. We need to send it
 173                 * to a holding pattern to avoid it ending up in the panic
 174                 * code.
 175                 */
 176                crash_kexec_secondary(regs);
 177        }
 178
 179        if (!signr)
 180                return;
 181
 182        /*
 183         * While our oops output is serialised by a spinlock, output
 184         * from panic() called below can race and corrupt it. If we
 185         * know we are going to panic, delay for 1 second so we have a
 186         * chance to get clean backtraces from all CPUs that are oopsing.
 187         */
 188        if (in_interrupt() || panic_on_oops || !current->pid ||
 189            is_global_init(current)) {
 190                mdelay(MSEC_PER_SEC);
 191        }
 192
 193        if (in_interrupt())
 194                panic("Fatal exception in interrupt");
 195        if (panic_on_oops)
 196                panic("Fatal exception");
 197        do_exit(signr);
 198}
 199
 200static int __kprobes __die(const char *str, struct pt_regs *regs, long err)
 201{
 202        printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
 203#ifdef CONFIG_PREEMPT
 204        printk("PREEMPT ");
 205#endif
 206#ifdef CONFIG_SMP
 207        printk("SMP NR_CPUS=%d ", NR_CPUS);
 208#endif
 209        if (debug_pagealloc_enabled())
 210                printk("DEBUG_PAGEALLOC ");
 211#ifdef CONFIG_NUMA
 212        printk("NUMA ");
 213#endif
 214        printk("%s\n", ppc_md.name ? ppc_md.name : "");
 215
 216        if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP)
 217                return 1;
 218
 219        print_modules();
 220        show_regs(regs);
 221
 222        return 0;
 223}
 224
 225void die(const char *str, struct pt_regs *regs, long err)
 226{
 227        unsigned long flags = oops_begin(regs);
 228
 229        if (__die(str, regs, err))
 230                err = 0;
 231        oops_end(flags, regs, err);
 232}
 233
 234void user_single_step_siginfo(struct task_struct *tsk,
 235                                struct pt_regs *regs, siginfo_t *info)
 236{
 237        memset(info, 0, sizeof(*info));
 238        info->si_signo = SIGTRAP;
 239        info->si_code = TRAP_TRACE;
 240        info->si_addr = (void __user *)regs->nip;
 241}
 242
 243void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
 244{
 245        siginfo_t info;
 246        const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \
 247                        "at %08lx nip %08lx lr %08lx code %x\n";
 248        const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \
 249                        "at %016lx nip %016lx lr %016lx code %x\n";
 250
 251        if (!user_mode(regs)) {
 252                die("Exception in kernel mode", regs, signr);
 253                return;
 254        }
 255
 256        if (show_unhandled_signals && unhandled_signal(current, signr)) {
 257                printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
 258                                   current->comm, current->pid, signr,
 259                                   addr, regs->nip, regs->link, code);
 260        }
 261
 262        if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs))
 263                local_irq_enable();
 264
 265        current->thread.trap_nr = code;
 266        memset(&info, 0, sizeof(info));
 267        info.si_signo = signr;
 268        info.si_code = code;
 269        info.si_addr = (void __user *) addr;
 270        force_sig_info(signr, &info, current);
 271}
 272
 273#ifdef CONFIG_PPC64
 274void system_reset_exception(struct pt_regs *regs)
 275{
 276        /* See if any machine dependent calls */
 277        if (ppc_md.system_reset_exception) {
 278                if (ppc_md.system_reset_exception(regs))
 279                        return;
 280        }
 281
 282        die("System Reset", regs, SIGABRT);
 283
 284        /* Must die if the interrupt is not recoverable */
 285        if (!(regs->msr & MSR_RI))
 286                panic("Unrecoverable System Reset");
 287
 288        /* What should we do here? We could issue a shutdown or hard reset. */
 289}
 290
 291/*
 292 * This function is called in real mode. Strictly no printk's please.
 293 *
 294 * regs->nip and regs->msr contains srr0 and ssr1.
 295 */
 296long machine_check_early(struct pt_regs *regs)
 297{
 298        long handled = 0;
 299
 300        __this_cpu_inc(irq_stat.mce_exceptions);
 301
 302        add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
 303
 304        if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
 305                handled = cur_cpu_spec->machine_check_early(regs);
 306        return handled;
 307}
 308
 309long hmi_exception_realmode(struct pt_regs *regs)
 310{
 311        __this_cpu_inc(irq_stat.hmi_exceptions);
 312
 313        wait_for_subcore_guest_exit();
 314
 315        if (ppc_md.hmi_exception_early)
 316                ppc_md.hmi_exception_early(regs);
 317
 318        wait_for_tb_resync();
 319
 320        return 0;
 321}
 322
 323#endif
 324
 325/*
 326 * I/O accesses can cause machine checks on powermacs.
 327 * Check if the NIP corresponds to the address of a sync
 328 * instruction for which there is an entry in the exception
 329 * table.
 330 * Note that the 601 only takes a machine check on TEA
 331 * (transfer error ack) signal assertion, and does not
 332 * set any of the top 16 bits of SRR1.
 333 *  -- paulus.
 334 */
 335static inline int check_io_access(struct pt_regs *regs)
 336{
 337#ifdef CONFIG_PPC32
 338        unsigned long msr = regs->msr;
 339        const struct exception_table_entry *entry;
 340        unsigned int *nip = (unsigned int *)regs->nip;
 341
 342        if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
 343            && (entry = search_exception_tables(regs->nip)) != NULL) {
 344                /*
 345                 * Check that it's a sync instruction, or somewhere
 346                 * in the twi; isync; nop sequence that inb/inw/inl uses.
 347                 * As the address is in the exception table
 348                 * we should be able to read the instr there.
 349                 * For the debug message, we look at the preceding
 350                 * load or store.
 351                 */
 352                if (*nip == 0x60000000)         /* nop */
 353                        nip -= 2;
 354                else if (*nip == 0x4c00012c)    /* isync */
 355                        --nip;
 356                if (*nip == 0x7c0004ac || (*nip >> 26) == 3) {
 357                        /* sync or twi */
 358                        unsigned int rb;
 359
 360                        --nip;
 361                        rb = (*nip >> 11) & 0x1f;
 362                        printk(KERN_DEBUG "%s bad port %lx at %p\n",
 363                               (*nip & 0x100)? "OUT to": "IN from",
 364                               regs->gpr[rb] - _IO_BASE, nip);
 365                        regs->msr |= MSR_RI;
 366                        regs->nip = entry->fixup;
 367                        return 1;
 368                }
 369        }
 370#endif /* CONFIG_PPC32 */
 371        return 0;
 372}
 373
 374#ifdef CONFIG_PPC_ADV_DEBUG_REGS
 375/* On 4xx, the reason for the machine check or program exception
 376   is in the ESR. */
 377#define get_reason(regs)        ((regs)->dsisr)
 378#ifndef CONFIG_FSL_BOOKE
 379#define get_mc_reason(regs)     ((regs)->dsisr)
 380#else
 381#define get_mc_reason(regs)     (mfspr(SPRN_MCSR))
 382#endif
 383#define REASON_FP               ESR_FP
 384#define REASON_ILLEGAL          (ESR_PIL | ESR_PUO)
 385#define REASON_PRIVILEGED       ESR_PPR
 386#define REASON_TRAP             ESR_PTR
 387
 388/* single-step stuff */
 389#define single_stepping(regs)   (current->thread.debug.dbcr0 & DBCR0_IC)
 390#define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC)
 391
 392#else
 393/* On non-4xx, the reason for the machine check or program
 394   exception is in the MSR. */
 395#define get_reason(regs)        ((regs)->msr)
 396#define get_mc_reason(regs)     ((regs)->msr)
 397#define REASON_TM               0x200000
 398#define REASON_FP               0x100000
 399#define REASON_ILLEGAL          0x80000
 400#define REASON_PRIVILEGED       0x40000
 401#define REASON_TRAP             0x20000
 402
 403#define single_stepping(regs)   ((regs)->msr & MSR_SE)
 404#define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
 405#endif
 406
 407#if defined(CONFIG_4xx)
 408int machine_check_4xx(struct pt_regs *regs)
 409{
 410        unsigned long reason = get_mc_reason(regs);
 411
 412        if (reason & ESR_IMCP) {
 413                printk("Instruction");
 414                mtspr(SPRN_ESR, reason & ~ESR_IMCP);
 415        } else
 416                printk("Data");
 417        printk(" machine check in kernel mode.\n");
 418
 419        return 0;
 420}
 421
 422int machine_check_440A(struct pt_regs *regs)
 423{
 424        unsigned long reason = get_mc_reason(regs);
 425
 426        printk("Machine check in kernel mode.\n");
 427        if (reason & ESR_IMCP){
 428                printk("Instruction Synchronous Machine Check exception\n");
 429                mtspr(SPRN_ESR, reason & ~ESR_IMCP);
 430        }
 431        else {
 432                u32 mcsr = mfspr(SPRN_MCSR);
 433                if (mcsr & MCSR_IB)
 434                        printk("Instruction Read PLB Error\n");
 435                if (mcsr & MCSR_DRB)
 436                        printk("Data Read PLB Error\n");
 437                if (mcsr & MCSR_DWB)
 438                        printk("Data Write PLB Error\n");
 439                if (mcsr & MCSR_TLBP)
 440                        printk("TLB Parity Error\n");
 441                if (mcsr & MCSR_ICP){
 442                        flush_instruction_cache();
 443                        printk("I-Cache Parity Error\n");
 444                }
 445                if (mcsr & MCSR_DCSP)
 446                        printk("D-Cache Search Parity Error\n");
 447                if (mcsr & MCSR_DCFP)
 448                        printk("D-Cache Flush Parity Error\n");
 449                if (mcsr & MCSR_IMPE)
 450                        printk("Machine Check exception is imprecise\n");
 451
 452                /* Clear MCSR */
 453                mtspr(SPRN_MCSR, mcsr);
 454        }
 455        return 0;
 456}
 457
 458int machine_check_47x(struct pt_regs *regs)
 459{
 460        unsigned long reason = get_mc_reason(regs);
 461        u32 mcsr;
 462
 463        printk(KERN_ERR "Machine check in kernel mode.\n");
 464        if (reason & ESR_IMCP) {
 465                printk(KERN_ERR
 466                       "Instruction Synchronous Machine Check exception\n");
 467                mtspr(SPRN_ESR, reason & ~ESR_IMCP);
 468                return 0;
 469        }
 470        mcsr = mfspr(SPRN_MCSR);
 471        if (mcsr & MCSR_IB)
 472                printk(KERN_ERR "Instruction Read PLB Error\n");
 473        if (mcsr & MCSR_DRB)
 474                printk(KERN_ERR "Data Read PLB Error\n");
 475        if (mcsr & MCSR_DWB)
 476                printk(KERN_ERR "Data Write PLB Error\n");
 477        if (mcsr & MCSR_TLBP)
 478                printk(KERN_ERR "TLB Parity Error\n");
 479        if (mcsr & MCSR_ICP) {
 480                flush_instruction_cache();
 481                printk(KERN_ERR "I-Cache Parity Error\n");
 482        }
 483        if (mcsr & MCSR_DCSP)
 484                printk(KERN_ERR "D-Cache Search Parity Error\n");
 485        if (mcsr & PPC47x_MCSR_GPR)
 486                printk(KERN_ERR "GPR Parity Error\n");
 487        if (mcsr & PPC47x_MCSR_FPR)
 488                printk(KERN_ERR "FPR Parity Error\n");
 489        if (mcsr & PPC47x_MCSR_IPR)
 490                printk(KERN_ERR "Machine Check exception is imprecise\n");
 491
 492        /* Clear MCSR */
 493        mtspr(SPRN_MCSR, mcsr);
 494
 495        return 0;
 496}
 497#elif defined(CONFIG_E500)
 498int machine_check_e500mc(struct pt_regs *regs)
 499{
 500        unsigned long mcsr = mfspr(SPRN_MCSR);
 501        unsigned long reason = mcsr;
 502        int recoverable = 1;
 503
 504        if (reason & MCSR_LD) {
 505                recoverable = fsl_rio_mcheck_exception(regs);
 506                if (recoverable == 1)
 507                        goto silent_out;
 508        }
 509
 510        printk("Machine check in kernel mode.\n");
 511        printk("Caused by (from MCSR=%lx): ", reason);
 512
 513        if (reason & MCSR_MCP)
 514                printk("Machine Check Signal\n");
 515
 516        if (reason & MCSR_ICPERR) {
 517                printk("Instruction Cache Parity Error\n");
 518
 519                /*
 520                 * This is recoverable by invalidating the i-cache.
 521                 */
 522                mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI);
 523                while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI)
 524                        ;
 525
 526                /*
 527                 * This will generally be accompanied by an instruction
 528                 * fetch error report -- only treat MCSR_IF as fatal
 529                 * if it wasn't due to an L1 parity error.
 530                 */
 531                reason &= ~MCSR_IF;
 532        }
 533
 534        if (reason & MCSR_DCPERR_MC) {
 535                printk("Data Cache Parity Error\n");
 536
 537                /*
 538                 * In write shadow mode we auto-recover from the error, but it
 539                 * may still get logged and cause a machine check.  We should
 540                 * only treat the non-write shadow case as non-recoverable.
 541                 */
 542                if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS))
 543                        recoverable = 0;
 544        }
 545
 546        if (reason & MCSR_L2MMU_MHIT) {
 547                printk("Hit on multiple TLB entries\n");
 548                recoverable = 0;
 549        }
 550
 551        if (reason & MCSR_NMI)
 552                printk("Non-maskable interrupt\n");
 553
 554        if (reason & MCSR_IF) {
 555                printk("Instruction Fetch Error Report\n");
 556                recoverable = 0;
 557        }
 558
 559        if (reason & MCSR_LD) {
 560                printk("Load Error Report\n");
 561                recoverable = 0;
 562        }
 563
 564        if (reason & MCSR_ST) {
 565                printk("Store Error Report\n");
 566                recoverable = 0;
 567        }
 568
 569        if (reason & MCSR_LDG) {
 570                printk("Guarded Load Error Report\n");
 571                recoverable = 0;
 572        }
 573
 574        if (reason & MCSR_TLBSYNC)
 575                printk("Simultaneous tlbsync operations\n");
 576
 577        if (reason & MCSR_BSL2_ERR) {
 578                printk("Level 2 Cache Error\n");
 579                recoverable = 0;
 580        }
 581
 582        if (reason & MCSR_MAV) {
 583                u64 addr;
 584
 585                addr = mfspr(SPRN_MCAR);
 586                addr |= (u64)mfspr(SPRN_MCARU) << 32;
 587
 588                printk("Machine Check %s Address: %#llx\n",
 589                       reason & MCSR_MEA ? "Effective" : "Physical", addr);
 590        }
 591
 592silent_out:
 593        mtspr(SPRN_MCSR, mcsr);
 594        return mfspr(SPRN_MCSR) == 0 && recoverable;
 595}
 596
 597int machine_check_e500(struct pt_regs *regs)
 598{
 599        unsigned long reason = get_mc_reason(regs);
 600
 601        if (reason & MCSR_BUS_RBERR) {
 602                if (fsl_rio_mcheck_exception(regs))
 603                        return 1;
 604                if (fsl_pci_mcheck_exception(regs))
 605                        return 1;
 606        }
 607
 608        printk("Machine check in kernel mode.\n");
 609        printk("Caused by (from MCSR=%lx): ", reason);
 610
 611        if (reason & MCSR_MCP)
 612                printk("Machine Check Signal\n");
 613        if (reason & MCSR_ICPERR)
 614                printk("Instruction Cache Parity Error\n");
 615        if (reason & MCSR_DCP_PERR)
 616                printk("Data Cache Push Parity Error\n");
 617        if (reason & MCSR_DCPERR)
 618                printk("Data Cache Parity Error\n");
 619        if (reason & MCSR_BUS_IAERR)
 620                printk("Bus - Instruction Address Error\n");
 621        if (reason & MCSR_BUS_RAERR)
 622                printk("Bus - Read Address Error\n");
 623        if (reason & MCSR_BUS_WAERR)
 624                printk("Bus - Write Address Error\n");
 625        if (reason & MCSR_BUS_IBERR)
 626                printk("Bus - Instruction Data Error\n");
 627        if (reason & MCSR_BUS_RBERR)
 628                printk("Bus - Read Data Bus Error\n");
 629        if (reason & MCSR_BUS_WBERR)
 630                printk("Bus - Write Data Bus Error\n");
 631        if (reason & MCSR_BUS_IPERR)
 632                printk("Bus - Instruction Parity Error\n");
 633        if (reason & MCSR_BUS_RPERR)
 634                printk("Bus - Read Parity Error\n");
 635
 636        return 0;
 637}
 638
 639int machine_check_generic(struct pt_regs *regs)
 640{
 641        return 0;
 642}
 643#elif defined(CONFIG_E200)
 644int machine_check_e200(struct pt_regs *regs)
 645{
 646        unsigned long reason = get_mc_reason(regs);
 647
 648        printk("Machine check in kernel mode.\n");
 649        printk("Caused by (from MCSR=%lx): ", reason);
 650
 651        if (reason & MCSR_MCP)
 652                printk("Machine Check Signal\n");
 653        if (reason & MCSR_CP_PERR)
 654                printk("Cache Push Parity Error\n");
 655        if (reason & MCSR_CPERR)
 656                printk("Cache Parity Error\n");
 657        if (reason & MCSR_EXCP_ERR)
 658                printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
 659        if (reason & MCSR_BUS_IRERR)
 660                printk("Bus - Read Bus Error on instruction fetch\n");
 661        if (reason & MCSR_BUS_DRERR)
 662                printk("Bus - Read Bus Error on data load\n");
 663        if (reason & MCSR_BUS_WRERR)
 664                printk("Bus - Write Bus Error on buffered store or cache line push\n");
 665
 666        return 0;
 667}
 668#else
 669int machine_check_generic(struct pt_regs *regs)
 670{
 671        unsigned long reason = get_mc_reason(regs);
 672
 673        printk("Machine check in kernel mode.\n");
 674        printk("Caused by (from SRR1=%lx): ", reason);
 675        switch (reason & 0x601F0000) {
 676        case 0x80000:
 677                printk("Machine check signal\n");
 678                break;
 679        case 0:         /* for 601 */
 680        case 0x40000:
 681        case 0x140000:  /* 7450 MSS error and TEA */
 682                printk("Transfer error ack signal\n");
 683                break;
 684        case 0x20000:
 685                printk("Data parity error signal\n");
 686                break;
 687        case 0x10000:
 688                printk("Address parity error signal\n");
 689                break;
 690        case 0x20000000:
 691                printk("L1 Data Cache error\n");
 692                break;
 693        case 0x40000000:
 694                printk("L1 Instruction Cache error\n");
 695                break;
 696        case 0x00100000:
 697                printk("L2 data cache parity error\n");
 698                break;
 699        default:
 700                printk("Unknown values in msr\n");
 701        }
 702        return 0;
 703}
 704#endif /* everything else */
 705
 706void machine_check_exception(struct pt_regs *regs)
 707{
 708        enum ctx_state prev_state = exception_enter();
 709        int recover = 0;
 710
 711        __this_cpu_inc(irq_stat.mce_exceptions);
 712
 713        /* See if any machine dependent calls. In theory, we would want
 714         * to call the CPU first, and call the ppc_md. one if the CPU
 715         * one returns a positive number. However there is existing code
 716         * that assumes the board gets a first chance, so let's keep it
 717         * that way for now and fix things later. --BenH.
 718         */
 719        if (ppc_md.machine_check_exception)
 720                recover = ppc_md.machine_check_exception(regs);
 721        else if (cur_cpu_spec->machine_check)
 722                recover = cur_cpu_spec->machine_check(regs);
 723
 724        if (recover > 0)
 725                goto bail;
 726
 727#if defined(CONFIG_8xx) && defined(CONFIG_PCI)
 728        /* the qspan pci read routines can cause machine checks -- Cort
 729         *
 730         * yuck !!! that totally needs to go away ! There are better ways
 731         * to deal with that than having a wart in the mcheck handler.
 732         * -- BenH
 733         */
 734        bad_page_fault(regs, regs->dar, SIGBUS);
 735        goto bail;
 736#endif
 737
 738        if (debugger_fault_handler(regs))
 739                goto bail;
 740
 741        if (check_io_access(regs))
 742                goto bail;
 743
 744        die("Machine check", regs, SIGBUS);
 745
 746        /* Must die if the interrupt is not recoverable */
 747        if (!(regs->msr & MSR_RI))
 748                panic("Unrecoverable Machine check");
 749
 750bail:
 751        exception_exit(prev_state);
 752}
 753
 754void SMIException(struct pt_regs *regs)
 755{
 756        die("System Management Interrupt", regs, SIGABRT);
 757}
 758
 759void handle_hmi_exception(struct pt_regs *regs)
 760{
 761        struct pt_regs *old_regs;
 762
 763        old_regs = set_irq_regs(regs);
 764        irq_enter();
 765
 766        if (ppc_md.handle_hmi_exception)
 767                ppc_md.handle_hmi_exception(regs);
 768
 769        irq_exit();
 770        set_irq_regs(old_regs);
 771}
 772
 773void unknown_exception(struct pt_regs *regs)
 774{
 775        enum ctx_state prev_state = exception_enter();
 776
 777        printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
 778               regs->nip, regs->msr, regs->trap);
 779
 780        _exception(SIGTRAP, regs, 0, 0);
 781
 782        exception_exit(prev_state);
 783}
 784
 785void instruction_breakpoint_exception(struct pt_regs *regs)
 786{
 787        enum ctx_state prev_state = exception_enter();
 788
 789        if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
 790                                        5, SIGTRAP) == NOTIFY_STOP)
 791                goto bail;
 792        if (debugger_iabr_match(regs))
 793                goto bail;
 794        _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
 795
 796bail:
 797        exception_exit(prev_state);
 798}
 799
 800void RunModeException(struct pt_regs *regs)
 801{
 802        _exception(SIGTRAP, regs, 0, 0);
 803}
 804
 805void __kprobes single_step_exception(struct pt_regs *regs)
 806{
 807        enum ctx_state prev_state = exception_enter();
 808
 809        clear_single_step(regs);
 810
 811        if (notify_die(DIE_SSTEP, "single_step", regs, 5,
 812                                        5, SIGTRAP) == NOTIFY_STOP)
 813                goto bail;
 814        if (debugger_sstep(regs))
 815                goto bail;
 816
 817        _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
 818
 819bail:
 820        exception_exit(prev_state);
 821}
 822
 823/*
 824 * After we have successfully emulated an instruction, we have to
 825 * check if the instruction was being single-stepped, and if so,
 826 * pretend we got a single-step exception.  This was pointed out
 827 * by Kumar Gala.  -- paulus
 828 */
 829static void emulate_single_step(struct pt_regs *regs)
 830{
 831        if (single_stepping(regs))
 832                single_step_exception(regs);
 833}
 834
 835static inline int __parse_fpscr(unsigned long fpscr)
 836{
 837        int ret = 0;
 838
 839        /* Invalid operation */
 840        if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
 841                ret = FPE_FLTINV;
 842
 843        /* Overflow */
 844        else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
 845                ret = FPE_FLTOVF;
 846
 847        /* Underflow */
 848        else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
 849                ret = FPE_FLTUND;
 850
 851        /* Divide by zero */
 852        else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
 853                ret = FPE_FLTDIV;
 854
 855        /* Inexact result */
 856        else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
 857                ret = FPE_FLTRES;
 858
 859        return ret;
 860}
 861
 862static void parse_fpe(struct pt_regs *regs)
 863{
 864        int code = 0;
 865
 866        flush_fp_to_thread(current);
 867
 868        code = __parse_fpscr(current->thread.fp_state.fpscr);
 869
 870        _exception(SIGFPE, regs, code, regs->nip);
 871}
 872
 873/*
 874 * Illegal instruction emulation support.  Originally written to
 875 * provide the PVR to user applications using the mfspr rd, PVR.
 876 * Return non-zero if we can't emulate, or -EFAULT if the associated
 877 * memory access caused an access fault.  Return zero on success.
 878 *
 879 * There are a couple of ways to do this, either "decode" the instruction
 880 * or directly match lots of bits.  In this case, matching lots of
 881 * bits is faster and easier.
 882 *
 883 */
 884static int emulate_string_inst(struct pt_regs *regs, u32 instword)
 885{
 886        u8 rT = (instword >> 21) & 0x1f;
 887        u8 rA = (instword >> 16) & 0x1f;
 888        u8 NB_RB = (instword >> 11) & 0x1f;
 889        u32 num_bytes;
 890        unsigned long EA;
 891        int pos = 0;
 892
 893        /* Early out if we are an invalid form of lswx */
 894        if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX)
 895                if ((rT == rA) || (rT == NB_RB))
 896                        return -EINVAL;
 897
 898        EA = (rA == 0) ? 0 : regs->gpr[rA];
 899
 900        switch (instword & PPC_INST_STRING_MASK) {
 901                case PPC_INST_LSWX:
 902                case PPC_INST_STSWX:
 903                        EA += NB_RB;
 904                        num_bytes = regs->xer & 0x7f;
 905                        break;
 906                case PPC_INST_LSWI:
 907                case PPC_INST_STSWI:
 908                        num_bytes = (NB_RB == 0) ? 32 : NB_RB;
 909                        break;
 910                default:
 911                        return -EINVAL;
 912        }
 913
 914        while (num_bytes != 0)
 915        {
 916                u8 val;
 917                u32 shift = 8 * (3 - (pos & 0x3));
 918
 919                /* if process is 32-bit, clear upper 32 bits of EA */
 920                if ((regs->msr & MSR_64BIT) == 0)
 921                        EA &= 0xFFFFFFFF;
 922
 923                switch ((instword & PPC_INST_STRING_MASK)) {
 924                        case PPC_INST_LSWX:
 925                        case PPC_INST_LSWI:
 926                                if (get_user(val, (u8 __user *)EA))
 927                                        return -EFAULT;
 928                                /* first time updating this reg,
 929                                 * zero it out */
 930                                if (pos == 0)
 931                                        regs->gpr[rT] = 0;
 932                                regs->gpr[rT] |= val << shift;
 933                                break;
 934                        case PPC_INST_STSWI:
 935                        case PPC_INST_STSWX:
 936                                val = regs->gpr[rT] >> shift;
 937                                if (put_user(val, (u8 __user *)EA))
 938                                        return -EFAULT;
 939                                break;
 940                }
 941                /* move EA to next address */
 942                EA += 1;
 943                num_bytes--;
 944
 945                /* manage our position within the register */
 946                if (++pos == 4) {
 947                        pos = 0;
 948                        if (++rT == 32)
 949                                rT = 0;
 950                }
 951        }
 952
 953        return 0;
 954}
 955
 956static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword)
 957{
 958        u32 ra,rs;
 959        unsigned long tmp;
 960
 961        ra = (instword >> 16) & 0x1f;
 962        rs = (instword >> 21) & 0x1f;
 963
 964        tmp = regs->gpr[rs];
 965        tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL);
 966        tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL);
 967        tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
 968        regs->gpr[ra] = tmp;
 969
 970        return 0;
 971}
 972
 973static int emulate_isel(struct pt_regs *regs, u32 instword)
 974{
 975        u8 rT = (instword >> 21) & 0x1f;
 976        u8 rA = (instword >> 16) & 0x1f;
 977        u8 rB = (instword >> 11) & 0x1f;
 978        u8 BC = (instword >> 6) & 0x1f;
 979        u8 bit;
 980        unsigned long tmp;
 981
 982        tmp = (rA == 0) ? 0 : regs->gpr[rA];
 983        bit = (regs->ccr >> (31 - BC)) & 0x1;
 984
 985        regs->gpr[rT] = bit ? tmp : regs->gpr[rB];
 986
 987        return 0;
 988}
 989
 990#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 991static inline bool tm_abort_check(struct pt_regs *regs, int cause)
 992{
 993        /* If we're emulating a load/store in an active transaction, we cannot
 994         * emulate it as the kernel operates in transaction suspended context.
 995         * We need to abort the transaction.  This creates a persistent TM
 996         * abort so tell the user what caused it with a new code.
 997         */
 998        if (MSR_TM_TRANSACTIONAL(regs->msr)) {
 999                tm_enable();
1000                tm_abort(cause);
1001                return true;
1002        }
1003        return false;
1004}
1005#else
1006static inline bool tm_abort_check(struct pt_regs *regs, int reason)
1007{
1008        return false;
1009}
1010#endif
1011
1012static int emulate_instruction(struct pt_regs *regs)
1013{
1014        u32 instword;
1015        u32 rd;
1016
1017        if (!user_mode(regs))
1018                return -EINVAL;
1019        CHECK_FULL_REGS(regs);
1020
1021        if (get_user(instword, (u32 __user *)(regs->nip)))
1022                return -EFAULT;
1023
1024        /* Emulate the mfspr rD, PVR. */
1025        if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) {
1026                PPC_WARN_EMULATED(mfpvr, regs);
1027                rd = (instword >> 21) & 0x1f;
1028                regs->gpr[rd] = mfspr(SPRN_PVR);
1029                return 0;
1030        }
1031
1032        /* Emulating the dcba insn is just a no-op.  */
1033        if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) {
1034                PPC_WARN_EMULATED(dcba, regs);
1035                return 0;
1036        }
1037
1038        /* Emulate the mcrxr insn.  */
1039        if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) {
1040                int shift = (instword >> 21) & 0x1c;
1041                unsigned long msk = 0xf0000000UL >> shift;
1042
1043                PPC_WARN_EMULATED(mcrxr, regs);
1044                regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
1045                regs->xer &= ~0xf0000000UL;
1046                return 0;
1047        }
1048
1049        /* Emulate load/store string insn. */
1050        if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
1051                if (tm_abort_check(regs,
1052                                   TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
1053                        return -EINVAL;
1054                PPC_WARN_EMULATED(string, regs);
1055                return emulate_string_inst(regs, instword);
1056        }
1057
1058        /* Emulate the popcntb (Population Count Bytes) instruction. */
1059        if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) {
1060                PPC_WARN_EMULATED(popcntb, regs);
1061                return emulate_popcntb_inst(regs, instword);
1062        }
1063
1064        /* Emulate isel (Integer Select) instruction */
1065        if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) {
1066                PPC_WARN_EMULATED(isel, regs);
1067                return emulate_isel(regs, instword);
1068        }
1069
1070        /* Emulate sync instruction variants */
1071        if ((instword & PPC_INST_SYNC_MASK) == PPC_INST_SYNC) {
1072                PPC_WARN_EMULATED(sync, regs);
1073                asm volatile("sync");
1074                return 0;
1075        }
1076
1077#ifdef CONFIG_PPC64
1078        /* Emulate the mfspr rD, DSCR. */
1079        if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) ==
1080                PPC_INST_MFSPR_DSCR_USER) ||
1081             ((instword & PPC_INST_MFSPR_DSCR_MASK) ==
1082                PPC_INST_MFSPR_DSCR)) &&
1083                        cpu_has_feature(CPU_FTR_DSCR)) {
1084                PPC_WARN_EMULATED(mfdscr, regs);
1085                rd = (instword >> 21) & 0x1f;
1086                regs->gpr[rd] = mfspr(SPRN_DSCR);
1087                return 0;
1088        }
1089        /* Emulate the mtspr DSCR, rD. */
1090        if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) ==
1091                PPC_INST_MTSPR_DSCR_USER) ||
1092             ((instword & PPC_INST_MTSPR_DSCR_MASK) ==
1093                PPC_INST_MTSPR_DSCR)) &&
1094                        cpu_has_feature(CPU_FTR_DSCR)) {
1095                PPC_WARN_EMULATED(mtdscr, regs);
1096                rd = (instword >> 21) & 0x1f;
1097                current->thread.dscr = regs->gpr[rd];
1098                current->thread.dscr_inherit = 1;
1099                mtspr(SPRN_DSCR, current->thread.dscr);
1100                return 0;
1101        }
1102#endif
1103
1104        return -EINVAL;
1105}
1106
1107int is_valid_bugaddr(unsigned long addr)
1108{
1109        return is_kernel_addr(addr);
1110}
1111
1112#ifdef CONFIG_MATH_EMULATION
1113static int emulate_math(struct pt_regs *regs)
1114{
1115        int ret;
1116        extern int do_mathemu(struct pt_regs *regs);
1117
1118        ret = do_mathemu(regs);
1119        if (ret >= 0)
1120                PPC_WARN_EMULATED(math, regs);
1121
1122        switch (ret) {
1123        case 0:
1124                emulate_single_step(regs);
1125                return 0;
1126        case 1: {
1127                        int code = 0;
1128                        code = __parse_fpscr(current->thread.fp_state.fpscr);
1129                        _exception(SIGFPE, regs, code, regs->nip);
1130                        return 0;
1131                }
1132        case -EFAULT:
1133                _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1134                return 0;
1135        }
1136
1137        return -1;
1138}
1139#else
1140static inline int emulate_math(struct pt_regs *regs) { return -1; }
1141#endif
1142
1143void __kprobes program_check_exception(struct pt_regs *regs)
1144{
1145        enum ctx_state prev_state = exception_enter();
1146        unsigned int reason = get_reason(regs);
1147
1148        /* We can now get here via a FP Unavailable exception if the core
1149         * has no FPU, in that case the reason flags will be 0 */
1150
1151        if (reason & REASON_FP) {
1152                /* IEEE FP exception */
1153                parse_fpe(regs);
1154                goto bail;
1155        }
1156        if (reason & REASON_TRAP) {
1157                unsigned long bugaddr;
1158                /* Debugger is first in line to stop recursive faults in
1159                 * rcu_lock, notify_die, or atomic_notifier_call_chain */
1160                if (debugger_bpt(regs))
1161                        goto bail;
1162
1163                /* trap exception */
1164                if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
1165                                == NOTIFY_STOP)
1166                        goto bail;
1167
1168                bugaddr = regs->nip;
1169                /*
1170                 * Fixup bugaddr for BUG_ON() in real mode
1171                 */
1172                if (!is_kernel_addr(bugaddr) && !(regs->msr & MSR_IR))
1173                        bugaddr += PAGE_OFFSET;
1174
1175                if (!(regs->msr & MSR_PR) &&  /* not user-mode */
1176                    report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) {
1177                        regs->nip += 4;
1178                        goto bail;
1179                }
1180                _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
1181                goto bail;
1182        }
1183#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1184        if (reason & REASON_TM) {
1185                /* This is a TM "Bad Thing Exception" program check.
1186                 * This occurs when:
1187                 * -  An rfid/hrfid/mtmsrd attempts to cause an illegal
1188                 *    transition in TM states.
1189                 * -  A trechkpt is attempted when transactional.
1190                 * -  A treclaim is attempted when non transactional.
1191                 * -  A tend is illegally attempted.
1192                 * -  writing a TM SPR when transactional.
1193                 */
1194                if (!user_mode(regs) &&
1195                    report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
1196                        regs->nip += 4;
1197                        goto bail;
1198                }
1199                /* If usermode caused this, it's done something illegal and
1200                 * gets a SIGILL slap on the wrist.  We call it an illegal
1201                 * operand to distinguish from the instruction just being bad
1202                 * (e.g. executing a 'tend' on a CPU without TM!); it's an
1203                 * illegal /placement/ of a valid instruction.
1204                 */
1205                if (user_mode(regs)) {
1206                        _exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
1207                        goto bail;
1208                } else {
1209                        printk(KERN_EMERG "Unexpected TM Bad Thing exception "
1210                               "at %lx (msr 0x%x)\n", regs->nip, reason);
1211                        die("Unrecoverable exception", regs, SIGABRT);
1212                }
1213        }
1214#endif
1215
1216        /*
1217         * If we took the program check in the kernel skip down to sending a
1218         * SIGILL. The subsequent cases all relate to emulating instructions
1219         * which we should only do for userspace. We also do not want to enable
1220         * interrupts for kernel faults because that might lead to further
1221         * faults, and loose the context of the original exception.
1222         */
1223        if (!user_mode(regs))
1224                goto sigill;
1225
1226        /* We restore the interrupt state now */
1227        if (!arch_irq_disabled_regs(regs))
1228                local_irq_enable();
1229
1230        /* (reason & REASON_ILLEGAL) would be the obvious thing here,
1231         * but there seems to be a hardware bug on the 405GP (RevD)
1232         * that means ESR is sometimes set incorrectly - either to
1233         * ESR_DST (!?) or 0.  In the process of chasing this with the
1234         * hardware people - not sure if it can happen on any illegal
1235         * instruction or only on FP instructions, whether there is a
1236         * pattern to occurrences etc. -dgibson 31/Mar/2003
1237         */
1238        if (!emulate_math(regs))
1239                goto bail;
1240
1241        /* Try to emulate it if we should. */
1242        if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
1243                switch (emulate_instruction(regs)) {
1244                case 0:
1245                        regs->nip += 4;
1246                        emulate_single_step(regs);
1247                        goto bail;
1248                case -EFAULT:
1249                        _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1250                        goto bail;
1251                }
1252        }
1253
1254sigill:
1255        if (reason & REASON_PRIVILEGED)
1256                _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1257        else
1258                _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1259
1260bail:
1261        exception_exit(prev_state);
1262}
1263
1264/*
1265 * This occurs when running in hypervisor mode on POWER6 or later
1266 * and an illegal instruction is encountered.
1267 */
1268void __kprobes emulation_assist_interrupt(struct pt_regs *regs)
1269{
1270        regs->msr |= REASON_ILLEGAL;
1271        program_check_exception(regs);
1272}
1273
1274void alignment_exception(struct pt_regs *regs)
1275{
1276        enum ctx_state prev_state = exception_enter();
1277        int sig, code, fixed = 0;
1278
1279        /* We restore the interrupt state now */
1280        if (!arch_irq_disabled_regs(regs))
1281                local_irq_enable();
1282
1283        if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
1284                goto bail;
1285
1286        /* we don't implement logging of alignment exceptions */
1287        if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
1288                fixed = fix_alignment(regs);
1289
1290        if (fixed == 1) {
1291                regs->nip += 4; /* skip over emulated instruction */
1292                emulate_single_step(regs);
1293                goto bail;
1294        }
1295
1296        /* Operand address was bad */
1297        if (fixed == -EFAULT) {
1298                sig = SIGSEGV;
1299                code = SEGV_ACCERR;
1300        } else {
1301                sig = SIGBUS;
1302                code = BUS_ADRALN;
1303        }
1304        if (user_mode(regs))
1305                _exception(sig, regs, code, regs->dar);
1306        else
1307                bad_page_fault(regs, regs->dar, sig);
1308
1309bail:
1310        exception_exit(prev_state);
1311}
1312
1313void StackOverflow(struct pt_regs *regs)
1314{
1315        printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
1316               current, regs->gpr[1]);
1317        debugger(regs);
1318        show_regs(regs);
1319        panic("kernel stack overflow");
1320}
1321
1322void nonrecoverable_exception(struct pt_regs *regs)
1323{
1324        printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
1325               regs->nip, regs->msr);
1326        debugger(regs);
1327        die("nonrecoverable exception", regs, SIGKILL);
1328}
1329
1330void kernel_fp_unavailable_exception(struct pt_regs *regs)
1331{
1332        enum ctx_state prev_state = exception_enter();
1333
1334        printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
1335                          "%lx at %lx\n", regs->trap, regs->nip);
1336        die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
1337
1338        exception_exit(prev_state);
1339}
1340
1341void altivec_unavailable_exception(struct pt_regs *regs)
1342{
1343        enum ctx_state prev_state = exception_enter();
1344
1345        if (user_mode(regs)) {
1346                /* A user program has executed an altivec instruction,
1347                   but this kernel doesn't support altivec. */
1348                _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1349                goto bail;
1350        }
1351
1352        printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
1353                        "%lx at %lx\n", regs->trap, regs->nip);
1354        die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
1355
1356bail:
1357        exception_exit(prev_state);
1358}
1359
1360void vsx_unavailable_exception(struct pt_regs *regs)
1361{
1362        if (user_mode(regs)) {
1363                /* A user program has executed an vsx instruction,
1364                   but this kernel doesn't support vsx. */
1365                _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1366                return;
1367        }
1368
1369        printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception "
1370                        "%lx at %lx\n", regs->trap, regs->nip);
1371        die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
1372}
1373
1374#ifdef CONFIG_PPC64
1375void facility_unavailable_exception(struct pt_regs *regs)
1376{
1377        static char *facility_strings[] = {
1378                [FSCR_FP_LG] = "FPU",
1379                [FSCR_VECVSX_LG] = "VMX/VSX",
1380                [FSCR_DSCR_LG] = "DSCR",
1381                [FSCR_PM_LG] = "PMU SPRs",
1382                [FSCR_BHRB_LG] = "BHRB",
1383                [FSCR_TM_LG] = "TM",
1384                [FSCR_EBB_LG] = "EBB",
1385                [FSCR_TAR_LG] = "TAR",
1386                [FSCR_LM_LG] = "LM",
1387        };
1388        char *facility = "unknown";
1389        u64 value;
1390        u32 instword, rd;
1391        u8 status;
1392        bool hv;
1393
1394        hv = (regs->trap == 0xf80);
1395        if (hv)
1396                value = mfspr(SPRN_HFSCR);
1397        else
1398                value = mfspr(SPRN_FSCR);
1399
1400        status = value >> 56;
1401        if (status == FSCR_DSCR_LG) {
1402                /*
1403                 * User is accessing the DSCR register using the problem
1404                 * state only SPR number (0x03) either through a mfspr or
1405                 * a mtspr instruction. If it is a write attempt through
1406                 * a mtspr, then we set the inherit bit. This also allows
1407                 * the user to write or read the register directly in the
1408                 * future by setting via the FSCR DSCR bit. But in case it
1409                 * is a read DSCR attempt through a mfspr instruction, we
1410                 * just emulate the instruction instead. This code path will
1411                 * always emulate all the mfspr instructions till the user
1412                 * has attempted at least one mtspr instruction. This way it
1413                 * preserves the same behaviour when the user is accessing
1414                 * the DSCR through privilege level only SPR number (0x11)
1415                 * which is emulated through illegal instruction exception.
1416                 * We always leave HFSCR DSCR set.
1417                 */
1418                if (get_user(instword, (u32 __user *)(regs->nip))) {
1419                        pr_err("Failed to fetch the user instruction\n");
1420                        return;
1421                }
1422
1423                /* Write into DSCR (mtspr 0x03, RS) */
1424                if ((instword & PPC_INST_MTSPR_DSCR_USER_MASK)
1425                                == PPC_INST_MTSPR_DSCR_USER) {
1426                        rd = (instword >> 21) & 0x1f;
1427                        current->thread.dscr = regs->gpr[rd];
1428                        current->thread.dscr_inherit = 1;
1429                        current->thread.fscr |= FSCR_DSCR;
1430                        mtspr(SPRN_FSCR, current->thread.fscr);
1431                }
1432
1433                /* Read from DSCR (mfspr RT, 0x03) */
1434                if ((instword & PPC_INST_MFSPR_DSCR_USER_MASK)
1435                                == PPC_INST_MFSPR_DSCR_USER) {
1436                        if (emulate_instruction(regs)) {
1437                                pr_err("DSCR based mfspr emulation failed\n");
1438                                return;
1439                        }
1440                        regs->nip += 4;
1441                        emulate_single_step(regs);
1442                }
1443                return;
1444        } else if ((status == FSCR_LM_LG) && cpu_has_feature(CPU_FTR_ARCH_300)) {
1445                /*
1446                 * This process has touched LM, so turn it on forever
1447                 * for this process
1448                 */
1449                current->thread.fscr |= FSCR_LM;
1450                mtspr(SPRN_FSCR, current->thread.fscr);
1451                return;
1452        }
1453
1454        if ((status < ARRAY_SIZE(facility_strings)) &&
1455            facility_strings[status])
1456                facility = facility_strings[status];
1457
1458        /* We restore the interrupt state now */
1459        if (!arch_irq_disabled_regs(regs))
1460                local_irq_enable();
1461
1462        pr_err_ratelimited(
1463                "%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n",
1464                hv ? "Hypervisor " : "", facility, regs->nip, regs->msr);
1465
1466        if (user_mode(regs)) {
1467                _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1468                return;
1469        }
1470
1471        die("Unexpected facility unavailable exception", regs, SIGABRT);
1472}
1473#endif
1474
1475#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1476
1477void fp_unavailable_tm(struct pt_regs *regs)
1478{
1479        /* Note:  This does not handle any kind of FP laziness. */
1480
1481        TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n",
1482                 regs->nip, regs->msr);
1483
1484        /* We can only have got here if the task started using FP after
1485         * beginning the transaction.  So, the transactional regs are just a
1486         * copy of the checkpointed ones.  But, we still need to recheckpoint
1487         * as we're enabling FP for the process; it will return, abort the
1488         * transaction, and probably retry but now with FP enabled.  So the
1489         * checkpointed FP registers need to be loaded.
1490         */
1491        tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1492        /* Reclaim didn't save out any FPRs to transact_fprs. */
1493
1494        /* Enable FP for the task: */
1495        regs->msr |= (MSR_FP | current->thread.fpexc_mode);
1496
1497        /* This loads and recheckpoints the FP registers from
1498         * thread.fpr[].  They will remain in registers after the
1499         * checkpoint so we don't need to reload them after.
1500         * If VMX is in use, the VRs now hold checkpointed values,
1501         * so we don't want to load the VRs from the thread_struct.
1502         */
1503        tm_recheckpoint(&current->thread, MSR_FP);
1504
1505        /* If VMX is in use, get the transactional values back */
1506        if (regs->msr & MSR_VEC) {
1507                do_load_up_transact_altivec(&current->thread);
1508                /* At this point all the VSX state is loaded, so enable it */
1509                regs->msr |= MSR_VSX;
1510        }
1511}
1512
1513void altivec_unavailable_tm(struct pt_regs *regs)
1514{
1515        /* See the comments in fp_unavailable_tm().  This function operates
1516         * the same way.
1517         */
1518
1519        TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx,"
1520                 "MSR=%lx\n",
1521                 regs->nip, regs->msr);
1522        tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1523        regs->msr |= MSR_VEC;
1524        tm_recheckpoint(&current->thread, MSR_VEC);
1525        current->thread.used_vr = 1;
1526
1527        if (regs->msr & MSR_FP) {
1528                do_load_up_transact_fpu(&current->thread);
1529                regs->msr |= MSR_VSX;
1530        }
1531}
1532
1533void vsx_unavailable_tm(struct pt_regs *regs)
1534{
1535        unsigned long orig_msr = regs->msr;
1536
1537        /* See the comments in fp_unavailable_tm().  This works similarly,
1538         * though we're loading both FP and VEC registers in here.
1539         *
1540         * If FP isn't in use, load FP regs.  If VEC isn't in use, load VEC
1541         * regs.  Either way, set MSR_VSX.
1542         */
1543
1544        TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx,"
1545                 "MSR=%lx\n",
1546                 regs->nip, regs->msr);
1547
1548        current->thread.used_vsr = 1;
1549
1550        /* If FP and VMX are already loaded, we have all the state we need */
1551        if ((orig_msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC)) {
1552                regs->msr |= MSR_VSX;
1553                return;
1554        }
1555
1556        /* This reclaims FP and/or VR regs if they're already enabled */
1557        tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1558
1559        regs->msr |= MSR_VEC | MSR_FP | current->thread.fpexc_mode |
1560                MSR_VSX;
1561
1562        /* This loads & recheckpoints FP and VRs; but we have
1563         * to be sure not to overwrite previously-valid state.
1564         */
1565        tm_recheckpoint(&current->thread, regs->msr & ~orig_msr);
1566
1567        if (orig_msr & MSR_FP)
1568                do_load_up_transact_fpu(&current->thread);
1569        if (orig_msr & MSR_VEC)
1570                do_load_up_transact_altivec(&current->thread);
1571}
1572#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1573
1574void performance_monitor_exception(struct pt_regs *regs)
1575{
1576        __this_cpu_inc(irq_stat.pmu_irqs);
1577
1578        perf_irq(regs);
1579}
1580
1581#ifdef CONFIG_8xx
1582void SoftwareEmulation(struct pt_regs *regs)
1583{
1584        CHECK_FULL_REGS(regs);
1585
1586        if (!user_mode(regs)) {
1587                debugger(regs);
1588                die("Kernel Mode Unimplemented Instruction or SW FPU Emulation",
1589                        regs, SIGFPE);
1590        }
1591
1592        if (!emulate_math(regs))
1593                return;
1594
1595        _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1596}
1597#endif /* CONFIG_8xx */
1598
1599#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1600static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
1601{
1602        int changed = 0;
1603        /*
1604         * Determine the cause of the debug event, clear the
1605         * event flags and send a trap to the handler. Torez
1606         */
1607        if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
1608                dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W);
1609#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1610                current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
1611#endif
1612                do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT,
1613                             5);
1614                changed |= 0x01;
1615        }  else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) {
1616                dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W);
1617                do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, TRAP_HWBKPT,
1618                             6);
1619                changed |= 0x01;
1620        }  else if (debug_status & DBSR_IAC1) {
1621                current->thread.debug.dbcr0 &= ~DBCR0_IAC1;
1622                dbcr_iac_range(current) &= ~DBCR_IAC12MODE;
1623                do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT,
1624                             1);
1625                changed |= 0x01;
1626        }  else if (debug_status & DBSR_IAC2) {
1627                current->thread.debug.dbcr0 &= ~DBCR0_IAC2;
1628                do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT,
1629                             2);
1630                changed |= 0x01;
1631        }  else if (debug_status & DBSR_IAC3) {
1632                current->thread.debug.dbcr0 &= ~DBCR0_IAC3;
1633                dbcr_iac_range(current) &= ~DBCR_IAC34MODE;
1634                do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT,
1635                             3);
1636                changed |= 0x01;
1637        }  else if (debug_status & DBSR_IAC4) {
1638                current->thread.debug.dbcr0 &= ~DBCR0_IAC4;
1639                do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT,
1640                             4);
1641                changed |= 0x01;
1642        }
1643        /*
1644         * At the point this routine was called, the MSR(DE) was turned off.
1645         * Check all other debug flags and see if that bit needs to be turned
1646         * back on or not.
1647         */
1648        if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
1649                               current->thread.debug.dbcr1))
1650                regs->msr |= MSR_DE;
1651        else
1652                /* Make sure the IDM flag is off */
1653                current->thread.debug.dbcr0 &= ~DBCR0_IDM;
1654
1655        if (changed & 0x01)
1656                mtspr(SPRN_DBCR0, current->thread.debug.dbcr0);
1657}
1658
1659void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
1660{
1661        current->thread.debug.dbsr = debug_status;
1662
1663        /* Hack alert: On BookE, Branch Taken stops on the branch itself, while
1664         * on server, it stops on the target of the branch. In order to simulate
1665         * the server behaviour, we thus restart right away with a single step
1666         * instead of stopping here when hitting a BT
1667         */
1668        if (debug_status & DBSR_BT) {
1669                regs->msr &= ~MSR_DE;
1670
1671                /* Disable BT */
1672                mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT);
1673                /* Clear the BT event */
1674                mtspr(SPRN_DBSR, DBSR_BT);
1675
1676                /* Do the single step trick only when coming from userspace */
1677                if (user_mode(regs)) {
1678                        current->thread.debug.dbcr0 &= ~DBCR0_BT;
1679                        current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
1680                        regs->msr |= MSR_DE;
1681                        return;
1682                }
1683
1684                if (notify_die(DIE_SSTEP, "block_step", regs, 5,
1685                               5, SIGTRAP) == NOTIFY_STOP) {
1686                        return;
1687                }
1688                if (debugger_sstep(regs))
1689                        return;
1690        } else if (debug_status & DBSR_IC) {    /* Instruction complete */
1691                regs->msr &= ~MSR_DE;
1692
1693                /* Disable instruction completion */
1694                mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
1695                /* Clear the instruction completion event */
1696                mtspr(SPRN_DBSR, DBSR_IC);
1697
1698                if (notify_die(DIE_SSTEP, "single_step", regs, 5,
1699                               5, SIGTRAP) == NOTIFY_STOP) {
1700                        return;
1701                }
1702
1703                if (debugger_sstep(regs))
1704                        return;
1705
1706                if (user_mode(regs)) {
1707                        current->thread.debug.dbcr0 &= ~DBCR0_IC;
1708                        if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
1709                                               current->thread.debug.dbcr1))
1710                                regs->msr |= MSR_DE;
1711                        else
1712                                /* Make sure the IDM bit is off */
1713                                current->thread.debug.dbcr0 &= ~DBCR0_IDM;
1714                }
1715
1716                _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
1717        } else
1718                handle_debug(regs, debug_status);
1719}
1720#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
1721
1722#if !defined(CONFIG_TAU_INT)
1723void TAUException(struct pt_regs *regs)
1724{
1725        printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx    %s\n",
1726               regs->nip, regs->msr, regs->trap, print_tainted());
1727}
1728#endif /* CONFIG_INT_TAU */
1729
1730#ifdef CONFIG_ALTIVEC
1731void altivec_assist_exception(struct pt_regs *regs)
1732{
1733        int err;
1734
1735        if (!user_mode(regs)) {
1736                printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
1737                       " at %lx\n", regs->nip);
1738                die("Kernel VMX/Altivec assist exception", regs, SIGILL);
1739        }
1740
1741        flush_altivec_to_thread(current);
1742
1743        PPC_WARN_EMULATED(altivec, regs);
1744        err = emulate_altivec(regs);
1745        if (err == 0) {
1746                regs->nip += 4;         /* skip emulated instruction */
1747                emulate_single_step(regs);
1748                return;
1749        }
1750
1751        if (err == -EFAULT) {
1752                /* got an error reading the instruction */
1753                _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1754        } else {
1755                /* didn't recognize the instruction */
1756                /* XXX quick hack for now: set the non-Java bit in the VSCR */
1757                printk_ratelimited(KERN_ERR "Unrecognized altivec instruction "
1758                                   "in %s at %lx\n", current->comm, regs->nip);
1759                current->thread.vr_state.vscr.u[3] |= 0x10000;
1760        }
1761}
1762#endif /* CONFIG_ALTIVEC */
1763
1764#ifdef CONFIG_FSL_BOOKE
1765void CacheLockingException(struct pt_regs *regs, unsigned long address,
1766                           unsigned long error_code)
1767{
1768        /* We treat cache locking instructions from the user
1769         * as priv ops, in the future we could try to do
1770         * something smarter
1771         */
1772        if (error_code & (ESR_DLK|ESR_ILK))
1773                _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1774        return;
1775}
1776#endif /* CONFIG_FSL_BOOKE */
1777
1778#ifdef CONFIG_SPE
1779void SPEFloatingPointException(struct pt_regs *regs)
1780{
1781        extern int do_spe_mathemu(struct pt_regs *regs);
1782        unsigned long spefscr;
1783        int fpexc_mode;
1784        int code = 0;
1785        int err;
1786
1787        flush_spe_to_thread(current);
1788
1789        spefscr = current->thread.spefscr;
1790        fpexc_mode = current->thread.fpexc_mode;
1791
1792        if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
1793                code = FPE_FLTOVF;
1794        }
1795        else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
1796                code = FPE_FLTUND;
1797        }
1798        else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
1799                code = FPE_FLTDIV;
1800        else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
1801                code = FPE_FLTINV;
1802        }
1803        else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
1804                code = FPE_FLTRES;
1805
1806        err = do_spe_mathemu(regs);
1807        if (err == 0) {
1808                regs->nip += 4;         /* skip emulated instruction */
1809                emulate_single_step(regs);
1810                return;
1811        }
1812
1813        if (err == -EFAULT) {
1814                /* got an error reading the instruction */
1815                _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1816        } else if (err == -EINVAL) {
1817                /* didn't recognize the instruction */
1818                printk(KERN_ERR "unrecognized spe instruction "
1819                       "in %s at %lx\n", current->comm, regs->nip);
1820        } else {
1821                _exception(SIGFPE, regs, code, regs->nip);
1822        }
1823
1824        return;
1825}
1826
1827void SPEFloatingPointRoundException(struct pt_regs *regs)
1828{
1829        extern int speround_handler(struct pt_regs *regs);
1830        int err;
1831
1832        preempt_disable();
1833        if (regs->msr & MSR_SPE)
1834                giveup_spe(current);
1835        preempt_enable();
1836
1837        regs->nip -= 4;
1838        err = speround_handler(regs);
1839        if (err == 0) {
1840                regs->nip += 4;         /* skip emulated instruction */
1841                emulate_single_step(regs);
1842                return;
1843        }
1844
1845        if (err == -EFAULT) {
1846                /* got an error reading the instruction */
1847                _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1848        } else if (err == -EINVAL) {
1849                /* didn't recognize the instruction */
1850                printk(KERN_ERR "unrecognized spe instruction "
1851                       "in %s at %lx\n", current->comm, regs->nip);
1852        } else {
1853                _exception(SIGFPE, regs, 0, regs->nip);
1854                return;
1855        }
1856}
1857#endif
1858
1859/*
1860 * We enter here if we get an unrecoverable exception, that is, one
1861 * that happened at a point where the RI (recoverable interrupt) bit
1862 * in the MSR is 0.  This indicates that SRR0/1 are live, and that
1863 * we therefore lost state by taking this exception.
1864 */
1865void unrecoverable_exception(struct pt_regs *regs)
1866{
1867        printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
1868               regs->trap, regs->nip);
1869        die("Unrecoverable exception", regs, SIGABRT);
1870}
1871
1872#if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x)
1873/*
1874 * Default handler for a Watchdog exception,
1875 * spins until a reboot occurs
1876 */
1877void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
1878{
1879        /* Generic WatchdogHandler, implement your own */
1880        mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
1881        return;
1882}
1883
1884void WatchdogException(struct pt_regs *regs)
1885{
1886        printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
1887        WatchdogHandler(regs);
1888}
1889#endif
1890
1891/*
1892 * We enter here if we discover during exception entry that we are
1893 * running in supervisor mode with a userspace value in the stack pointer.
1894 */
1895void kernel_bad_stack(struct pt_regs *regs)
1896{
1897        printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
1898               regs->gpr[1], regs->nip);
1899        die("Bad kernel stack pointer", regs, SIGABRT);
1900}
1901
1902void __init trap_init(void)
1903{
1904}
1905
1906
1907#ifdef CONFIG_PPC_EMULATED_STATS
1908
1909#define WARN_EMULATED_SETUP(type)       .type = { .name = #type }
1910
1911struct ppc_emulated ppc_emulated = {
1912#ifdef CONFIG_ALTIVEC
1913        WARN_EMULATED_SETUP(altivec),
1914#endif
1915        WARN_EMULATED_SETUP(dcba),
1916        WARN_EMULATED_SETUP(dcbz),
1917        WARN_EMULATED_SETUP(fp_pair),
1918        WARN_EMULATED_SETUP(isel),
1919        WARN_EMULATED_SETUP(mcrxr),
1920        WARN_EMULATED_SETUP(mfpvr),
1921        WARN_EMULATED_SETUP(multiple),
1922        WARN_EMULATED_SETUP(popcntb),
1923        WARN_EMULATED_SETUP(spe),
1924        WARN_EMULATED_SETUP(string),
1925        WARN_EMULATED_SETUP(sync),
1926        WARN_EMULATED_SETUP(unaligned),
1927#ifdef CONFIG_MATH_EMULATION
1928        WARN_EMULATED_SETUP(math),
1929#endif
1930#ifdef CONFIG_VSX
1931        WARN_EMULATED_SETUP(vsx),
1932#endif
1933#ifdef CONFIG_PPC64
1934        WARN_EMULATED_SETUP(mfdscr),
1935        WARN_EMULATED_SETUP(mtdscr),
1936        WARN_EMULATED_SETUP(lq_stq),
1937#endif
1938};
1939
1940u32 ppc_warn_emulated;
1941
1942void ppc_warn_emulated_print(const char *type)
1943{
1944        pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm,
1945                            type);
1946}
1947
1948static int __init ppc_warn_emulated_init(void)
1949{
1950        struct dentry *dir, *d;
1951        unsigned int i;
1952        struct ppc_emulated_entry *entries = (void *)&ppc_emulated;
1953
1954        if (!powerpc_debugfs_root)
1955                return -ENODEV;
1956
1957        dir = debugfs_create_dir("emulated_instructions",
1958                                 powerpc_debugfs_root);
1959        if (!dir)
1960                return -ENOMEM;
1961
1962        d = debugfs_create_u32("do_warn", S_IRUGO | S_IWUSR, dir,
1963                               &ppc_warn_emulated);
1964        if (!d)
1965                goto fail;
1966
1967        for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) {
1968                d = debugfs_create_u32(entries[i].name, S_IRUGO | S_IWUSR, dir,
1969                                       (u32 *)&entries[i].val.counter);
1970                if (!d)
1971                        goto fail;
1972        }
1973
1974        return 0;
1975
1976fail:
1977        debugfs_remove_recursive(dir);
1978        return -ENOMEM;
1979}
1980
1981device_initcall(ppc_warn_emulated_init);
1982
1983#endif /* CONFIG_PPC_EMULATED_STATS */
1984