linux/arch/powerpc/kernel/traps.c
<<
>>
Prefs
   1/*
   2 *  Copyright (C) 1995-1996  Gary Thomas (gdt@linuxppc.org)
   3 *  Copyright 2007-2010 Freescale Semiconductor, Inc.
   4 *
   5 *  This program is free software; you can redistribute it and/or
   6 *  modify it under the terms of the GNU General Public License
   7 *  as published by the Free Software Foundation; either version
   8 *  2 of the License, or (at your option) any later version.
   9 *
  10 *  Modified by Cort Dougan (cort@cs.nmt.edu)
  11 *  and Paul Mackerras (paulus@samba.org)
  12 */
  13
  14/*
  15 * This file handles the architecture-dependent parts of hardware exceptions
  16 */
  17
  18#include <linux/errno.h>
  19#include <linux/sched.h>
  20#include <linux/kernel.h>
  21#include <linux/mm.h>
  22#include <linux/stddef.h>
  23#include <linux/unistd.h>
  24#include <linux/ptrace.h>
  25#include <linux/user.h>
  26#include <linux/interrupt.h>
  27#include <linux/init.h>
  28#include <linux/module.h>
  29#include <linux/prctl.h>
  30#include <linux/delay.h>
  31#include <linux/kprobes.h>
  32#include <linux/kexec.h>
  33#include <linux/backlight.h>
  34#include <linux/bug.h>
  35#include <linux/kdebug.h>
  36#include <linux/debugfs.h>
  37
  38#include <asm/emulated_ops.h>
  39#include <asm/pgtable.h>
  40#include <asm/uaccess.h>
  41#include <asm/system.h>
  42#include <asm/io.h>
  43#include <asm/machdep.h>
  44#include <asm/rtas.h>
  45#include <asm/pmc.h>
  46#ifdef CONFIG_PPC32
  47#include <asm/reg.h>
  48#endif
  49#ifdef CONFIG_PMAC_BACKLIGHT
  50#include <asm/backlight.h>
  51#endif
  52#ifdef CONFIG_PPC64
  53#include <asm/firmware.h>
  54#include <asm/processor.h>
  55#endif
  56#include <asm/kexec.h>
  57#include <asm/ppc-opcode.h>
  58
  59#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
  60int (*__debugger)(struct pt_regs *regs) __read_mostly;
  61int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly;
  62int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly;
  63int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly;
  64int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly;
  65int (*__debugger_dabr_match)(struct pt_regs *regs) __read_mostly;
  66int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly;
  67
  68EXPORT_SYMBOL(__debugger);
  69EXPORT_SYMBOL(__debugger_ipi);
  70EXPORT_SYMBOL(__debugger_bpt);
  71EXPORT_SYMBOL(__debugger_sstep);
  72EXPORT_SYMBOL(__debugger_iabr_match);
  73EXPORT_SYMBOL(__debugger_dabr_match);
  74EXPORT_SYMBOL(__debugger_fault_handler);
  75#endif
  76
  77/*
  78 * Trap & Exception support
  79 */
  80
  81#ifdef CONFIG_PMAC_BACKLIGHT
  82static void pmac_backlight_unblank(void)
  83{
  84        mutex_lock(&pmac_backlight_mutex);
  85        if (pmac_backlight) {
  86                struct backlight_properties *props;
  87
  88                props = &pmac_backlight->props;
  89                props->brightness = props->max_brightness;
  90                props->power = FB_BLANK_UNBLANK;
  91                backlight_update_status(pmac_backlight);
  92        }
  93        mutex_unlock(&pmac_backlight_mutex);
  94}
  95#else
  96static inline void pmac_backlight_unblank(void) { }
  97#endif
  98
  99int die(const char *str, struct pt_regs *regs, long err)
 100{
 101        static struct {
 102                raw_spinlock_t lock;
 103                u32 lock_owner;
 104                int lock_owner_depth;
 105        } die = {
 106                .lock =                 __RAW_SPIN_LOCK_UNLOCKED(die.lock),
 107                .lock_owner =           -1,
 108                .lock_owner_depth =     0
 109        };
 110        static int die_counter;
 111        unsigned long flags;
 112
 113        if (debugger(regs))
 114                return 1;
 115
 116        oops_enter();
 117
 118        if (die.lock_owner != raw_smp_processor_id()) {
 119                console_verbose();
 120                raw_spin_lock_irqsave(&die.lock, flags);
 121                die.lock_owner = smp_processor_id();
 122                die.lock_owner_depth = 0;
 123                bust_spinlocks(1);
 124                if (machine_is(powermac))
 125                        pmac_backlight_unblank();
 126        } else {
 127                local_save_flags(flags);
 128        }
 129
 130        if (++die.lock_owner_depth < 3) {
 131                printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
 132#ifdef CONFIG_PREEMPT
 133                printk("PREEMPT ");
 134#endif
 135#ifdef CONFIG_SMP
 136                printk("SMP NR_CPUS=%d ", NR_CPUS);
 137#endif
 138#ifdef CONFIG_DEBUG_PAGEALLOC
 139                printk("DEBUG_PAGEALLOC ");
 140#endif
 141#ifdef CONFIG_NUMA
 142                printk("NUMA ");
 143#endif
 144                printk("%s\n", ppc_md.name ? ppc_md.name : "");
 145
 146                sysfs_printk_last_file();
 147                if (notify_die(DIE_OOPS, str, regs, err, 255,
 148                               SIGSEGV) == NOTIFY_STOP)
 149                        return 1;
 150
 151                print_modules();
 152                show_regs(regs);
 153        } else {
 154                printk("Recursive die() failure, output suppressed\n");
 155        }
 156
 157        bust_spinlocks(0);
 158        die.lock_owner = -1;
 159        add_taint(TAINT_DIE);
 160        raw_spin_unlock_irqrestore(&die.lock, flags);
 161
 162        if (kexec_should_crash(current) ||
 163                kexec_sr_activated(smp_processor_id()))
 164                crash_kexec(regs);
 165        crash_kexec_secondary(regs);
 166
 167        if (in_interrupt())
 168                panic("Fatal exception in interrupt");
 169
 170        if (panic_on_oops)
 171                panic("Fatal exception");
 172
 173        oops_exit();
 174        do_exit(err);
 175
 176        return 0;
 177}
 178
 179void user_single_step_siginfo(struct task_struct *tsk,
 180                                struct pt_regs *regs, siginfo_t *info)
 181{
 182        memset(info, 0, sizeof(*info));
 183        info->si_signo = SIGTRAP;
 184        info->si_code = TRAP_TRACE;
 185        info->si_addr = (void __user *)regs->nip;
 186}
 187
 188void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
 189{
 190        siginfo_t info;
 191        const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \
 192                        "at %08lx nip %08lx lr %08lx code %x\n";
 193        const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \
 194                        "at %016lx nip %016lx lr %016lx code %x\n";
 195
 196        if (!user_mode(regs)) {
 197                if (die("Exception in kernel mode", regs, signr))
 198                        return;
 199        } else if (show_unhandled_signals &&
 200                    unhandled_signal(current, signr) &&
 201                    printk_ratelimit()) {
 202                        printk(regs->msr & MSR_SF ? fmt64 : fmt32,
 203                                current->comm, current->pid, signr,
 204                                addr, regs->nip, regs->link, code);
 205                }
 206
 207        memset(&info, 0, sizeof(info));
 208        info.si_signo = signr;
 209        info.si_code = code;
 210        info.si_addr = (void __user *) addr;
 211        force_sig_info(signr, &info, current);
 212}
 213
 214#ifdef CONFIG_PPC64
 215void system_reset_exception(struct pt_regs *regs)
 216{
 217        /* See if any machine dependent calls */
 218        if (ppc_md.system_reset_exception) {
 219                if (ppc_md.system_reset_exception(regs))
 220                        return;
 221        }
 222
 223#ifdef CONFIG_KEXEC
 224        cpu_set(smp_processor_id(), cpus_in_sr);
 225#endif
 226
 227        die("System Reset", regs, SIGABRT);
 228
 229        /*
 230         * Some CPUs when released from the debugger will execute this path.
 231         * These CPUs entered the debugger via a soft-reset. If the CPU was
 232         * hung before entering the debugger it will return to the hung
 233         * state when exiting this function.  This causes a problem in
 234         * kdump since the hung CPU(s) will not respond to the IPI sent
 235         * from kdump. To prevent the problem we call crash_kexec_secondary()
 236         * here. If a kdump had not been initiated or we exit the debugger
 237         * with the "exit and recover" command (x) crash_kexec_secondary()
 238         * will return after 5ms and the CPU returns to its previous state.
 239         */
 240        crash_kexec_secondary(regs);
 241
 242        /* Must die if the interrupt is not recoverable */
 243        if (!(regs->msr & MSR_RI))
 244                panic("Unrecoverable System Reset");
 245
 246        /* What should we do here? We could issue a shutdown or hard reset. */
 247}
 248#endif
 249
 250/*
 251 * I/O accesses can cause machine checks on powermacs.
 252 * Check if the NIP corresponds to the address of a sync
 253 * instruction for which there is an entry in the exception
 254 * table.
 255 * Note that the 601 only takes a machine check on TEA
 256 * (transfer error ack) signal assertion, and does not
 257 * set any of the top 16 bits of SRR1.
 258 *  -- paulus.
 259 */
 260static inline int check_io_access(struct pt_regs *regs)
 261{
 262#ifdef CONFIG_PPC32
 263        unsigned long msr = regs->msr;
 264        const struct exception_table_entry *entry;
 265        unsigned int *nip = (unsigned int *)regs->nip;
 266
 267        if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
 268            && (entry = search_exception_tables(regs->nip)) != NULL) {
 269                /*
 270                 * Check that it's a sync instruction, or somewhere
 271                 * in the twi; isync; nop sequence that inb/inw/inl uses.
 272                 * As the address is in the exception table
 273                 * we should be able to read the instr there.
 274                 * For the debug message, we look at the preceding
 275                 * load or store.
 276                 */
 277                if (*nip == 0x60000000)         /* nop */
 278                        nip -= 2;
 279                else if (*nip == 0x4c00012c)    /* isync */
 280                        --nip;
 281                if (*nip == 0x7c0004ac || (*nip >> 26) == 3) {
 282                        /* sync or twi */
 283                        unsigned int rb;
 284
 285                        --nip;
 286                        rb = (*nip >> 11) & 0x1f;
 287                        printk(KERN_DEBUG "%s bad port %lx at %p\n",
 288                               (*nip & 0x100)? "OUT to": "IN from",
 289                               regs->gpr[rb] - _IO_BASE, nip);
 290                        regs->msr |= MSR_RI;
 291                        regs->nip = entry->fixup;
 292                        return 1;
 293                }
 294        }
 295#endif /* CONFIG_PPC32 */
 296        return 0;
 297}
 298
 299#ifdef CONFIG_PPC_ADV_DEBUG_REGS
 300/* On 4xx, the reason for the machine check or program exception
 301   is in the ESR. */
 302#define get_reason(regs)        ((regs)->dsisr)
 303#ifndef CONFIG_FSL_BOOKE
 304#define get_mc_reason(regs)     ((regs)->dsisr)
 305#else
 306#define get_mc_reason(regs)     (mfspr(SPRN_MCSR))
 307#endif
 308#define REASON_FP               ESR_FP
 309#define REASON_ILLEGAL          (ESR_PIL | ESR_PUO)
 310#define REASON_PRIVILEGED       ESR_PPR
 311#define REASON_TRAP             ESR_PTR
 312
 313/* single-step stuff */
 314#define single_stepping(regs)   (current->thread.dbcr0 & DBCR0_IC)
 315#define clear_single_step(regs) (current->thread.dbcr0 &= ~DBCR0_IC)
 316
 317#else
 318/* On non-4xx, the reason for the machine check or program
 319   exception is in the MSR. */
 320#define get_reason(regs)        ((regs)->msr)
 321#define get_mc_reason(regs)     ((regs)->msr)
 322#define REASON_FP               0x100000
 323#define REASON_ILLEGAL          0x80000
 324#define REASON_PRIVILEGED       0x40000
 325#define REASON_TRAP             0x20000
 326
 327#define single_stepping(regs)   ((regs)->msr & MSR_SE)
 328#define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
 329#endif
 330
 331#if defined(CONFIG_4xx)
 332int machine_check_4xx(struct pt_regs *regs)
 333{
 334        unsigned long reason = get_mc_reason(regs);
 335
 336        if (reason & ESR_IMCP) {
 337                printk("Instruction");
 338                mtspr(SPRN_ESR, reason & ~ESR_IMCP);
 339        } else
 340                printk("Data");
 341        printk(" machine check in kernel mode.\n");
 342
 343        return 0;
 344}
 345
 346int machine_check_440A(struct pt_regs *regs)
 347{
 348        unsigned long reason = get_mc_reason(regs);
 349
 350        printk("Machine check in kernel mode.\n");
 351        if (reason & ESR_IMCP){
 352                printk("Instruction Synchronous Machine Check exception\n");
 353                mtspr(SPRN_ESR, reason & ~ESR_IMCP);
 354        }
 355        else {
 356                u32 mcsr = mfspr(SPRN_MCSR);
 357                if (mcsr & MCSR_IB)
 358                        printk("Instruction Read PLB Error\n");
 359                if (mcsr & MCSR_DRB)
 360                        printk("Data Read PLB Error\n");
 361                if (mcsr & MCSR_DWB)
 362                        printk("Data Write PLB Error\n");
 363                if (mcsr & MCSR_TLBP)
 364                        printk("TLB Parity Error\n");
 365                if (mcsr & MCSR_ICP){
 366                        flush_instruction_cache();
 367                        printk("I-Cache Parity Error\n");
 368                }
 369                if (mcsr & MCSR_DCSP)
 370                        printk("D-Cache Search Parity Error\n");
 371                if (mcsr & MCSR_DCFP)
 372                        printk("D-Cache Flush Parity Error\n");
 373                if (mcsr & MCSR_IMPE)
 374                        printk("Machine Check exception is imprecise\n");
 375
 376                /* Clear MCSR */
 377                mtspr(SPRN_MCSR, mcsr);
 378        }
 379        return 0;
 380}
 381
 382int machine_check_47x(struct pt_regs *regs)
 383{
 384        unsigned long reason = get_mc_reason(regs);
 385        u32 mcsr;
 386
 387        printk(KERN_ERR "Machine check in kernel mode.\n");
 388        if (reason & ESR_IMCP) {
 389                printk(KERN_ERR
 390                       "Instruction Synchronous Machine Check exception\n");
 391                mtspr(SPRN_ESR, reason & ~ESR_IMCP);
 392                return 0;
 393        }
 394        mcsr = mfspr(SPRN_MCSR);
 395        if (mcsr & MCSR_IB)
 396                printk(KERN_ERR "Instruction Read PLB Error\n");
 397        if (mcsr & MCSR_DRB)
 398                printk(KERN_ERR "Data Read PLB Error\n");
 399        if (mcsr & MCSR_DWB)
 400                printk(KERN_ERR "Data Write PLB Error\n");
 401        if (mcsr & MCSR_TLBP)
 402                printk(KERN_ERR "TLB Parity Error\n");
 403        if (mcsr & MCSR_ICP) {
 404                flush_instruction_cache();
 405                printk(KERN_ERR "I-Cache Parity Error\n");
 406        }
 407        if (mcsr & MCSR_DCSP)
 408                printk(KERN_ERR "D-Cache Search Parity Error\n");
 409        if (mcsr & PPC47x_MCSR_GPR)
 410                printk(KERN_ERR "GPR Parity Error\n");
 411        if (mcsr & PPC47x_MCSR_FPR)
 412                printk(KERN_ERR "FPR Parity Error\n");
 413        if (mcsr & PPC47x_MCSR_IPR)
 414                printk(KERN_ERR "Machine Check exception is imprecise\n");
 415
 416        /* Clear MCSR */
 417        mtspr(SPRN_MCSR, mcsr);
 418
 419        return 0;
 420}
 421#elif defined(CONFIG_E500)
 422int machine_check_e500mc(struct pt_regs *regs)
 423{
 424        unsigned long mcsr = mfspr(SPRN_MCSR);
 425        unsigned long reason = mcsr;
 426        int recoverable = 1;
 427
 428        printk("Machine check in kernel mode.\n");
 429        printk("Caused by (from MCSR=%lx): ", reason);
 430
 431        if (reason & MCSR_MCP)
 432                printk("Machine Check Signal\n");
 433
 434        if (reason & MCSR_ICPERR) {
 435                printk("Instruction Cache Parity Error\n");
 436
 437                /*
 438                 * This is recoverable by invalidating the i-cache.
 439                 */
 440                mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI);
 441                while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI)
 442                        ;
 443
 444                /*
 445                 * This will generally be accompanied by an instruction
 446                 * fetch error report -- only treat MCSR_IF as fatal
 447                 * if it wasn't due to an L1 parity error.
 448                 */
 449                reason &= ~MCSR_IF;
 450        }
 451
 452        if (reason & MCSR_DCPERR_MC) {
 453                printk("Data Cache Parity Error\n");
 454                recoverable = 0;
 455        }
 456
 457        if (reason & MCSR_L2MMU_MHIT) {
 458                printk("Hit on multiple TLB entries\n");
 459                recoverable = 0;
 460        }
 461
 462        if (reason & MCSR_NMI)
 463                printk("Non-maskable interrupt\n");
 464
 465        if (reason & MCSR_IF) {
 466                printk("Instruction Fetch Error Report\n");
 467                recoverable = 0;
 468        }
 469
 470        if (reason & MCSR_LD) {
 471                printk("Load Error Report\n");
 472                recoverable = 0;
 473        }
 474
 475        if (reason & MCSR_ST) {
 476                printk("Store Error Report\n");
 477                recoverable = 0;
 478        }
 479
 480        if (reason & MCSR_LDG) {
 481                printk("Guarded Load Error Report\n");
 482                recoverable = 0;
 483        }
 484
 485        if (reason & MCSR_TLBSYNC)
 486                printk("Simultaneous tlbsync operations\n");
 487
 488        if (reason & MCSR_BSL2_ERR) {
 489                printk("Level 2 Cache Error\n");
 490                recoverable = 0;
 491        }
 492
 493        if (reason & MCSR_MAV) {
 494                u64 addr;
 495
 496                addr = mfspr(SPRN_MCAR);
 497                addr |= (u64)mfspr(SPRN_MCARU) << 32;
 498
 499                printk("Machine Check %s Address: %#llx\n",
 500                       reason & MCSR_MEA ? "Effective" : "Physical", addr);
 501        }
 502
 503        mtspr(SPRN_MCSR, mcsr);
 504        return mfspr(SPRN_MCSR) == 0 && recoverable;
 505}
 506
 507int machine_check_e500(struct pt_regs *regs)
 508{
 509        unsigned long reason = get_mc_reason(regs);
 510
 511        printk("Machine check in kernel mode.\n");
 512        printk("Caused by (from MCSR=%lx): ", reason);
 513
 514        if (reason & MCSR_MCP)
 515                printk("Machine Check Signal\n");
 516        if (reason & MCSR_ICPERR)
 517                printk("Instruction Cache Parity Error\n");
 518        if (reason & MCSR_DCP_PERR)
 519                printk("Data Cache Push Parity Error\n");
 520        if (reason & MCSR_DCPERR)
 521                printk("Data Cache Parity Error\n");
 522        if (reason & MCSR_BUS_IAERR)
 523                printk("Bus - Instruction Address Error\n");
 524        if (reason & MCSR_BUS_RAERR)
 525                printk("Bus - Read Address Error\n");
 526        if (reason & MCSR_BUS_WAERR)
 527                printk("Bus - Write Address Error\n");
 528        if (reason & MCSR_BUS_IBERR)
 529                printk("Bus - Instruction Data Error\n");
 530        if (reason & MCSR_BUS_RBERR)
 531                printk("Bus - Read Data Bus Error\n");
 532        if (reason & MCSR_BUS_WBERR)
 533                printk("Bus - Read Data Bus Error\n");
 534        if (reason & MCSR_BUS_IPERR)
 535                printk("Bus - Instruction Parity Error\n");
 536        if (reason & MCSR_BUS_RPERR)
 537                printk("Bus - Read Parity Error\n");
 538
 539        return 0;
 540}
 541
 542int machine_check_generic(struct pt_regs *regs)
 543{
 544        return 0;
 545}
 546#elif defined(CONFIG_E200)
 547int machine_check_e200(struct pt_regs *regs)
 548{
 549        unsigned long reason = get_mc_reason(regs);
 550
 551        printk("Machine check in kernel mode.\n");
 552        printk("Caused by (from MCSR=%lx): ", reason);
 553
 554        if (reason & MCSR_MCP)
 555                printk("Machine Check Signal\n");
 556        if (reason & MCSR_CP_PERR)
 557                printk("Cache Push Parity Error\n");
 558        if (reason & MCSR_CPERR)
 559                printk("Cache Parity Error\n");
 560        if (reason & MCSR_EXCP_ERR)
 561                printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
 562        if (reason & MCSR_BUS_IRERR)
 563                printk("Bus - Read Bus Error on instruction fetch\n");
 564        if (reason & MCSR_BUS_DRERR)
 565                printk("Bus - Read Bus Error on data load\n");
 566        if (reason & MCSR_BUS_WRERR)
 567                printk("Bus - Write Bus Error on buffered store or cache line push\n");
 568
 569        return 0;
 570}
 571#else
 572int machine_check_generic(struct pt_regs *regs)
 573{
 574        unsigned long reason = get_mc_reason(regs);
 575
 576        printk("Machine check in kernel mode.\n");
 577        printk("Caused by (from SRR1=%lx): ", reason);
 578        switch (reason & 0x601F0000) {
 579        case 0x80000:
 580                printk("Machine check signal\n");
 581                break;
 582        case 0:         /* for 601 */
 583        case 0x40000:
 584        case 0x140000:  /* 7450 MSS error and TEA */
 585                printk("Transfer error ack signal\n");
 586                break;
 587        case 0x20000:
 588                printk("Data parity error signal\n");
 589                break;
 590        case 0x10000:
 591                printk("Address parity error signal\n");
 592                break;
 593        case 0x20000000:
 594                printk("L1 Data Cache error\n");
 595                break;
 596        case 0x40000000:
 597                printk("L1 Instruction Cache error\n");
 598                break;
 599        case 0x00100000:
 600                printk("L2 data cache parity error\n");
 601                break;
 602        default:
 603                printk("Unknown values in msr\n");
 604        }
 605        return 0;
 606}
 607#endif /* everything else */
 608
 609void machine_check_exception(struct pt_regs *regs)
 610{
 611        int recover = 0;
 612
 613        __get_cpu_var(irq_stat).mce_exceptions++;
 614
 615        /* See if any machine dependent calls. In theory, we would want
 616         * to call the CPU first, and call the ppc_md. one if the CPU
 617         * one returns a positive number. However there is existing code
 618         * that assumes the board gets a first chance, so let's keep it
 619         * that way for now and fix things later. --BenH.
 620         */
 621        if (ppc_md.machine_check_exception)
 622                recover = ppc_md.machine_check_exception(regs);
 623        else if (cur_cpu_spec->machine_check)
 624                recover = cur_cpu_spec->machine_check(regs);
 625
 626        if (recover > 0)
 627                return;
 628
 629#if defined(CONFIG_8xx) && defined(CONFIG_PCI)
 630        /* the qspan pci read routines can cause machine checks -- Cort
 631         *
 632         * yuck !!! that totally needs to go away ! There are better ways
 633         * to deal with that than having a wart in the mcheck handler.
 634         * -- BenH
 635         */
 636        bad_page_fault(regs, regs->dar, SIGBUS);
 637        return;
 638#endif
 639
 640        if (debugger_fault_handler(regs))
 641                return;
 642
 643        if (check_io_access(regs))
 644                return;
 645
 646        die("Machine check", regs, SIGBUS);
 647
 648        /* Must die if the interrupt is not recoverable */
 649        if (!(regs->msr & MSR_RI))
 650                panic("Unrecoverable Machine check");
 651}
 652
 653void SMIException(struct pt_regs *regs)
 654{
 655        die("System Management Interrupt", regs, SIGABRT);
 656}
 657
 658void unknown_exception(struct pt_regs *regs)
 659{
 660        printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
 661               regs->nip, regs->msr, regs->trap);
 662
 663        _exception(SIGTRAP, regs, 0, 0);
 664}
 665
 666void instruction_breakpoint_exception(struct pt_regs *regs)
 667{
 668        if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
 669                                        5, SIGTRAP) == NOTIFY_STOP)
 670                return;
 671        if (debugger_iabr_match(regs))
 672                return;
 673        _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
 674}
 675
 676void RunModeException(struct pt_regs *regs)
 677{
 678        _exception(SIGTRAP, regs, 0, 0);
 679}
 680
 681void __kprobes single_step_exception(struct pt_regs *regs)
 682{
 683        clear_single_step(regs);
 684
 685        if (notify_die(DIE_SSTEP, "single_step", regs, 5,
 686                                        5, SIGTRAP) == NOTIFY_STOP)
 687                return;
 688        if (debugger_sstep(regs))
 689                return;
 690
 691        _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
 692}
 693
 694/*
 695 * After we have successfully emulated an instruction, we have to
 696 * check if the instruction was being single-stepped, and if so,
 697 * pretend we got a single-step exception.  This was pointed out
 698 * by Kumar Gala.  -- paulus
 699 */
 700static void emulate_single_step(struct pt_regs *regs)
 701{
 702        if (single_stepping(regs))
 703                single_step_exception(regs);
 704}
 705
 706static inline int __parse_fpscr(unsigned long fpscr)
 707{
 708        int ret = 0;
 709
 710        /* Invalid operation */
 711        if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
 712                ret = FPE_FLTINV;
 713
 714        /* Overflow */
 715        else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
 716                ret = FPE_FLTOVF;
 717
 718        /* Underflow */
 719        else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
 720                ret = FPE_FLTUND;
 721
 722        /* Divide by zero */
 723        else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
 724                ret = FPE_FLTDIV;
 725
 726        /* Inexact result */
 727        else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
 728                ret = FPE_FLTRES;
 729
 730        return ret;
 731}
 732
 733static void parse_fpe(struct pt_regs *regs)
 734{
 735        int code = 0;
 736
 737        flush_fp_to_thread(current);
 738
 739        code = __parse_fpscr(current->thread.fpscr.val);
 740
 741        _exception(SIGFPE, regs, code, regs->nip);
 742}
 743
 744/*
 745 * Illegal instruction emulation support.  Originally written to
 746 * provide the PVR to user applications using the mfspr rd, PVR.
 747 * Return non-zero if we can't emulate, or -EFAULT if the associated
 748 * memory access caused an access fault.  Return zero on success.
 749 *
 750 * There are a couple of ways to do this, either "decode" the instruction
 751 * or directly match lots of bits.  In this case, matching lots of
 752 * bits is faster and easier.
 753 *
 754 */
 755static int emulate_string_inst(struct pt_regs *regs, u32 instword)
 756{
 757        u8 rT = (instword >> 21) & 0x1f;
 758        u8 rA = (instword >> 16) & 0x1f;
 759        u8 NB_RB = (instword >> 11) & 0x1f;
 760        u32 num_bytes;
 761        unsigned long EA;
 762        int pos = 0;
 763
 764        /* Early out if we are an invalid form of lswx */
 765        if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX)
 766                if ((rT == rA) || (rT == NB_RB))
 767                        return -EINVAL;
 768
 769        EA = (rA == 0) ? 0 : regs->gpr[rA];
 770
 771        switch (instword & PPC_INST_STRING_MASK) {
 772                case PPC_INST_LSWX:
 773                case PPC_INST_STSWX:
 774                        EA += NB_RB;
 775                        num_bytes = regs->xer & 0x7f;
 776                        break;
 777                case PPC_INST_LSWI:
 778                case PPC_INST_STSWI:
 779                        num_bytes = (NB_RB == 0) ? 32 : NB_RB;
 780                        break;
 781                default:
 782                        return -EINVAL;
 783        }
 784
 785        while (num_bytes != 0)
 786        {
 787                u8 val;
 788                u32 shift = 8 * (3 - (pos & 0x3));
 789
 790                switch ((instword & PPC_INST_STRING_MASK)) {
 791                        case PPC_INST_LSWX:
 792                        case PPC_INST_LSWI:
 793                                if (get_user(val, (u8 __user *)EA))
 794                                        return -EFAULT;
 795                                /* first time updating this reg,
 796                                 * zero it out */
 797                                if (pos == 0)
 798                                        regs->gpr[rT] = 0;
 799                                regs->gpr[rT] |= val << shift;
 800                                break;
 801                        case PPC_INST_STSWI:
 802                        case PPC_INST_STSWX:
 803                                val = regs->gpr[rT] >> shift;
 804                                if (put_user(val, (u8 __user *)EA))
 805                                        return -EFAULT;
 806                                break;
 807                }
 808                /* move EA to next address */
 809                EA += 1;
 810                num_bytes--;
 811
 812                /* manage our position within the register */
 813                if (++pos == 4) {
 814                        pos = 0;
 815                        if (++rT == 32)
 816                                rT = 0;
 817                }
 818        }
 819
 820        return 0;
 821}
 822
 823static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword)
 824{
 825        u32 ra,rs;
 826        unsigned long tmp;
 827
 828        ra = (instword >> 16) & 0x1f;
 829        rs = (instword >> 21) & 0x1f;
 830
 831        tmp = regs->gpr[rs];
 832        tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL);
 833        tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL);
 834        tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
 835        regs->gpr[ra] = tmp;
 836
 837        return 0;
 838}
 839
 840static int emulate_isel(struct pt_regs *regs, u32 instword)
 841{
 842        u8 rT = (instword >> 21) & 0x1f;
 843        u8 rA = (instword >> 16) & 0x1f;
 844        u8 rB = (instword >> 11) & 0x1f;
 845        u8 BC = (instword >> 6) & 0x1f;
 846        u8 bit;
 847        unsigned long tmp;
 848
 849        tmp = (rA == 0) ? 0 : regs->gpr[rA];
 850        bit = (regs->ccr >> (31 - BC)) & 0x1;
 851
 852        regs->gpr[rT] = bit ? tmp : regs->gpr[rB];
 853
 854        return 0;
 855}
 856
 857static int emulate_instruction(struct pt_regs *regs)
 858{
 859        u32 instword;
 860        u32 rd;
 861
 862        if (!user_mode(regs) || (regs->msr & MSR_LE))
 863                return -EINVAL;
 864        CHECK_FULL_REGS(regs);
 865
 866        if (get_user(instword, (u32 __user *)(regs->nip)))
 867                return -EFAULT;
 868
 869        /* Emulate the mfspr rD, PVR. */
 870        if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) {
 871                PPC_WARN_EMULATED(mfpvr, regs);
 872                rd = (instword >> 21) & 0x1f;
 873                regs->gpr[rd] = mfspr(SPRN_PVR);
 874                return 0;
 875        }
 876
 877        /* Emulating the dcba insn is just a no-op.  */
 878        if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) {
 879                PPC_WARN_EMULATED(dcba, regs);
 880                return 0;
 881        }
 882
 883        /* Emulate the mcrxr insn.  */
 884        if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) {
 885                int shift = (instword >> 21) & 0x1c;
 886                unsigned long msk = 0xf0000000UL >> shift;
 887
 888                PPC_WARN_EMULATED(mcrxr, regs);
 889                regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
 890                regs->xer &= ~0xf0000000UL;
 891                return 0;
 892        }
 893
 894        /* Emulate load/store string insn. */
 895        if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
 896                PPC_WARN_EMULATED(string, regs);
 897                return emulate_string_inst(regs, instword);
 898        }
 899
 900        /* Emulate the popcntb (Population Count Bytes) instruction. */
 901        if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) {
 902                PPC_WARN_EMULATED(popcntb, regs);
 903                return emulate_popcntb_inst(regs, instword);
 904        }
 905
 906        /* Emulate isel (Integer Select) instruction */
 907        if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) {
 908                PPC_WARN_EMULATED(isel, regs);
 909                return emulate_isel(regs, instword);
 910        }
 911
 912        return -EINVAL;
 913}
 914
 915int is_valid_bugaddr(unsigned long addr)
 916{
 917        return is_kernel_addr(addr);
 918}
 919
 920void __kprobes program_check_exception(struct pt_regs *regs)
 921{
 922        unsigned int reason = get_reason(regs);
 923        extern int do_mathemu(struct pt_regs *regs);
 924
 925        /* We can now get here via a FP Unavailable exception if the core
 926         * has no FPU, in that case the reason flags will be 0 */
 927
 928        if (reason & REASON_FP) {
 929                /* IEEE FP exception */
 930                parse_fpe(regs);
 931                return;
 932        }
 933        if (reason & REASON_TRAP) {
 934                /* Debugger is first in line to stop recursive faults in
 935                 * rcu_lock, notify_die, or atomic_notifier_call_chain */
 936                if (debugger_bpt(regs))
 937                        return;
 938
 939                /* trap exception */
 940                if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
 941                                == NOTIFY_STOP)
 942                        return;
 943
 944                if (!(regs->msr & MSR_PR) &&  /* not user-mode */
 945                    report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
 946                        regs->nip += 4;
 947                        return;
 948                }
 949                _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
 950                return;
 951        }
 952
 953        local_irq_enable();
 954
 955#ifdef CONFIG_MATH_EMULATION
 956        /* (reason & REASON_ILLEGAL) would be the obvious thing here,
 957         * but there seems to be a hardware bug on the 405GP (RevD)
 958         * that means ESR is sometimes set incorrectly - either to
 959         * ESR_DST (!?) or 0.  In the process of chasing this with the
 960         * hardware people - not sure if it can happen on any illegal
 961         * instruction or only on FP instructions, whether there is a
 962         * pattern to occurences etc. -dgibson 31/Mar/2003 */
 963        switch (do_mathemu(regs)) {
 964        case 0:
 965                emulate_single_step(regs);
 966                return;
 967        case 1: {
 968                        int code = 0;
 969                        code = __parse_fpscr(current->thread.fpscr.val);
 970                        _exception(SIGFPE, regs, code, regs->nip);
 971                        return;
 972                }
 973        case -EFAULT:
 974                _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
 975                return;
 976        }
 977        /* fall through on any other errors */
 978#endif /* CONFIG_MATH_EMULATION */
 979
 980        /* Try to emulate it if we should. */
 981        if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
 982                switch (emulate_instruction(regs)) {
 983                case 0:
 984                        regs->nip += 4;
 985                        emulate_single_step(regs);
 986                        return;
 987                case -EFAULT:
 988                        _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
 989                        return;
 990                }
 991        }
 992
 993        if (reason & REASON_PRIVILEGED)
 994                _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
 995        else
 996                _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
 997}
 998
 999void alignment_exception(struct pt_regs *regs)
1000{
1001        int sig, code, fixed = 0;
1002
1003        /* we don't implement logging of alignment exceptions */
1004        if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
1005                fixed = fix_alignment(regs);
1006
1007        if (fixed == 1) {
1008                regs->nip += 4; /* skip over emulated instruction */
1009                emulate_single_step(regs);
1010                return;
1011        }
1012
1013        /* Operand address was bad */
1014        if (fixed == -EFAULT) {
1015                sig = SIGSEGV;
1016                code = SEGV_ACCERR;
1017        } else {
1018                sig = SIGBUS;
1019                code = BUS_ADRALN;
1020        }
1021        if (user_mode(regs))
1022                _exception(sig, regs, code, regs->dar);
1023        else
1024                bad_page_fault(regs, regs->dar, sig);
1025}
1026
1027void StackOverflow(struct pt_regs *regs)
1028{
1029        printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
1030               current, regs->gpr[1]);
1031        debugger(regs);
1032        show_regs(regs);
1033        panic("kernel stack overflow");
1034}
1035
1036void nonrecoverable_exception(struct pt_regs *regs)
1037{
1038        printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
1039               regs->nip, regs->msr);
1040        debugger(regs);
1041        die("nonrecoverable exception", regs, SIGKILL);
1042}
1043
1044void trace_syscall(struct pt_regs *regs)
1045{
1046        printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld    %s\n",
1047               current, task_pid_nr(current), regs->nip, regs->link, regs->gpr[0],
1048               regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted());
1049}
1050
1051void kernel_fp_unavailable_exception(struct pt_regs *regs)
1052{
1053        printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
1054                          "%lx at %lx\n", regs->trap, regs->nip);
1055        die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
1056}
1057
1058void altivec_unavailable_exception(struct pt_regs *regs)
1059{
1060        if (user_mode(regs)) {
1061                /* A user program has executed an altivec instruction,
1062                   but this kernel doesn't support altivec. */
1063                _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1064                return;
1065        }
1066
1067        printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
1068                        "%lx at %lx\n", regs->trap, regs->nip);
1069        die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
1070}
1071
1072void vsx_unavailable_exception(struct pt_regs *regs)
1073{
1074        if (user_mode(regs)) {
1075                /* A user program has executed an vsx instruction,
1076                   but this kernel doesn't support vsx. */
1077                _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1078                return;
1079        }
1080
1081        printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception "
1082                        "%lx at %lx\n", regs->trap, regs->nip);
1083        die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
1084}
1085
1086void performance_monitor_exception(struct pt_regs *regs)
1087{
1088        __get_cpu_var(irq_stat).pmu_irqs++;
1089
1090        perf_irq(regs);
1091}
1092
1093#ifdef CONFIG_8xx
1094void SoftwareEmulation(struct pt_regs *regs)
1095{
1096        extern int do_mathemu(struct pt_regs *);
1097        extern int Soft_emulate_8xx(struct pt_regs *);
1098#if defined(CONFIG_MATH_EMULATION) || defined(CONFIG_8XX_MINIMAL_FPEMU)
1099        int errcode;
1100#endif
1101
1102        CHECK_FULL_REGS(regs);
1103
1104        if (!user_mode(regs)) {
1105                debugger(regs);
1106                die("Kernel Mode Software FPU Emulation", regs, SIGFPE);
1107        }
1108
1109#ifdef CONFIG_MATH_EMULATION
1110        errcode = do_mathemu(regs);
1111        if (errcode >= 0)
1112                PPC_WARN_EMULATED(math, regs);
1113
1114        switch (errcode) {
1115        case 0:
1116                emulate_single_step(regs);
1117                return;
1118        case 1: {
1119                        int code = 0;
1120                        code = __parse_fpscr(current->thread.fpscr.val);
1121                        _exception(SIGFPE, regs, code, regs->nip);
1122                        return;
1123                }
1124        case -EFAULT:
1125                _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1126                return;
1127        default:
1128                _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1129                return;
1130        }
1131
1132#elif defined(CONFIG_8XX_MINIMAL_FPEMU)
1133        errcode = Soft_emulate_8xx(regs);
1134        if (errcode >= 0)
1135                PPC_WARN_EMULATED(8xx, regs);
1136
1137        switch (errcode) {
1138        case 0:
1139                emulate_single_step(regs);
1140                return;
1141        case 1:
1142                _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1143                return;
1144        case -EFAULT:
1145                _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1146                return;
1147        }
1148#else
1149        _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1150#endif
1151}
1152#endif /* CONFIG_8xx */
1153
1154#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1155static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
1156{
1157        int changed = 0;
1158        /*
1159         * Determine the cause of the debug event, clear the
1160         * event flags and send a trap to the handler. Torez
1161         */
1162        if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
1163                dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W);
1164#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1165                current->thread.dbcr2 &= ~DBCR2_DAC12MODE;
1166#endif
1167                do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT,
1168                             5);
1169                changed |= 0x01;
1170        }  else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) {
1171                dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W);
1172                do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, TRAP_HWBKPT,
1173                             6);
1174                changed |= 0x01;
1175        }  else if (debug_status & DBSR_IAC1) {
1176                current->thread.dbcr0 &= ~DBCR0_IAC1;
1177                dbcr_iac_range(current) &= ~DBCR_IAC12MODE;
1178                do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT,
1179                             1);
1180                changed |= 0x01;
1181        }  else if (debug_status & DBSR_IAC2) {
1182                current->thread.dbcr0 &= ~DBCR0_IAC2;
1183                do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT,
1184                             2);
1185                changed |= 0x01;
1186        }  else if (debug_status & DBSR_IAC3) {
1187                current->thread.dbcr0 &= ~DBCR0_IAC3;
1188                dbcr_iac_range(current) &= ~DBCR_IAC34MODE;
1189                do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT,
1190                             3);
1191                changed |= 0x01;
1192        }  else if (debug_status & DBSR_IAC4) {
1193                current->thread.dbcr0 &= ~DBCR0_IAC4;
1194                do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT,
1195                             4);
1196                changed |= 0x01;
1197        }
1198        /*
1199         * At the point this routine was called, the MSR(DE) was turned off.
1200         * Check all other debug flags and see if that bit needs to be turned
1201         * back on or not.
1202         */
1203        if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0, current->thread.dbcr1))
1204                regs->msr |= MSR_DE;
1205        else
1206                /* Make sure the IDM flag is off */
1207                current->thread.dbcr0 &= ~DBCR0_IDM;
1208
1209        if (changed & 0x01)
1210                mtspr(SPRN_DBCR0, current->thread.dbcr0);
1211}
1212
1213void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
1214{
1215        current->thread.dbsr = debug_status;
1216
1217        /* Hack alert: On BookE, Branch Taken stops on the branch itself, while
1218         * on server, it stops on the target of the branch. In order to simulate
1219         * the server behaviour, we thus restart right away with a single step
1220         * instead of stopping here when hitting a BT
1221         */
1222        if (debug_status & DBSR_BT) {
1223                regs->msr &= ~MSR_DE;
1224
1225                /* Disable BT */
1226                mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT);
1227                /* Clear the BT event */
1228                mtspr(SPRN_DBSR, DBSR_BT);
1229
1230                /* Do the single step trick only when coming from userspace */
1231                if (user_mode(regs)) {
1232                        current->thread.dbcr0 &= ~DBCR0_BT;
1233                        current->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC;
1234                        regs->msr |= MSR_DE;
1235                        return;
1236                }
1237
1238                if (notify_die(DIE_SSTEP, "block_step", regs, 5,
1239                               5, SIGTRAP) == NOTIFY_STOP) {
1240                        return;
1241                }
1242                if (debugger_sstep(regs))
1243                        return;
1244        } else if (debug_status & DBSR_IC) {    /* Instruction complete */
1245                regs->msr &= ~MSR_DE;
1246
1247                /* Disable instruction completion */
1248                mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
1249                /* Clear the instruction completion event */
1250                mtspr(SPRN_DBSR, DBSR_IC);
1251
1252                if (notify_die(DIE_SSTEP, "single_step", regs, 5,
1253                               5, SIGTRAP) == NOTIFY_STOP) {
1254                        return;
1255                }
1256
1257                if (debugger_sstep(regs))
1258                        return;
1259
1260                if (user_mode(regs)) {
1261                        current->thread.dbcr0 &= ~DBCR0_IC;
1262#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1263                        if (DBCR_ACTIVE_EVENTS(current->thread.dbcr0,
1264                                               current->thread.dbcr1))
1265                                regs->msr |= MSR_DE;
1266                        else
1267                                /* Make sure the IDM bit is off */
1268                                current->thread.dbcr0 &= ~DBCR0_IDM;
1269#endif
1270                }
1271
1272                _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
1273        } else
1274                handle_debug(regs, debug_status);
1275}
1276#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
1277
1278#if !defined(CONFIG_TAU_INT)
1279void TAUException(struct pt_regs *regs)
1280{
1281        printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx    %s\n",
1282               regs->nip, regs->msr, regs->trap, print_tainted());
1283}
1284#endif /* CONFIG_INT_TAU */
1285
1286#ifdef CONFIG_ALTIVEC
1287void altivec_assist_exception(struct pt_regs *regs)
1288{
1289        int err;
1290
1291        if (!user_mode(regs)) {
1292                printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
1293                       " at %lx\n", regs->nip);
1294                die("Kernel VMX/Altivec assist exception", regs, SIGILL);
1295        }
1296
1297        flush_altivec_to_thread(current);
1298
1299        PPC_WARN_EMULATED(altivec, regs);
1300        err = emulate_altivec(regs);
1301        if (err == 0) {
1302                regs->nip += 4;         /* skip emulated instruction */
1303                emulate_single_step(regs);
1304                return;
1305        }
1306
1307        if (err == -EFAULT) {
1308                /* got an error reading the instruction */
1309                _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1310        } else {
1311                /* didn't recognize the instruction */
1312                /* XXX quick hack for now: set the non-Java bit in the VSCR */
1313                if (printk_ratelimit())
1314                        printk(KERN_ERR "Unrecognized altivec instruction "
1315                               "in %s at %lx\n", current->comm, regs->nip);
1316                current->thread.vscr.u[3] |= 0x10000;
1317        }
1318}
1319#endif /* CONFIG_ALTIVEC */
1320
1321#ifdef CONFIG_VSX
1322void vsx_assist_exception(struct pt_regs *regs)
1323{
1324        if (!user_mode(regs)) {
1325                printk(KERN_EMERG "VSX assist exception in kernel mode"
1326                       " at %lx\n", regs->nip);
1327                die("Kernel VSX assist exception", regs, SIGILL);
1328        }
1329
1330        flush_vsx_to_thread(current);
1331        printk(KERN_INFO "VSX assist not supported at %lx\n", regs->nip);
1332        _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1333}
1334#endif /* CONFIG_VSX */
1335
1336#ifdef CONFIG_FSL_BOOKE
1337void CacheLockingException(struct pt_regs *regs, unsigned long address,
1338                           unsigned long error_code)
1339{
1340        /* We treat cache locking instructions from the user
1341         * as priv ops, in the future we could try to do
1342         * something smarter
1343         */
1344        if (error_code & (ESR_DLK|ESR_ILK))
1345                _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1346        return;
1347}
1348#endif /* CONFIG_FSL_BOOKE */
1349
1350#ifdef CONFIG_SPE
1351void SPEFloatingPointException(struct pt_regs *regs)
1352{
1353        extern int do_spe_mathemu(struct pt_regs *regs);
1354        unsigned long spefscr;
1355        int fpexc_mode;
1356        int code = 0;
1357        int err;
1358
1359        preempt_disable();
1360        if (regs->msr & MSR_SPE)
1361                giveup_spe(current);
1362        preempt_enable();
1363
1364        spefscr = current->thread.spefscr;
1365        fpexc_mode = current->thread.fpexc_mode;
1366
1367        if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
1368                code = FPE_FLTOVF;
1369        }
1370        else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
1371                code = FPE_FLTUND;
1372        }
1373        else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
1374                code = FPE_FLTDIV;
1375        else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
1376                code = FPE_FLTINV;
1377        }
1378        else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
1379                code = FPE_FLTRES;
1380
1381        err = do_spe_mathemu(regs);
1382        if (err == 0) {
1383                regs->nip += 4;         /* skip emulated instruction */
1384                emulate_single_step(regs);
1385                return;
1386        }
1387
1388        if (err == -EFAULT) {
1389                /* got an error reading the instruction */
1390                _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1391        } else if (err == -EINVAL) {
1392                /* didn't recognize the instruction */
1393                printk(KERN_ERR "unrecognized spe instruction "
1394                       "in %s at %lx\n", current->comm, regs->nip);
1395        } else {
1396                _exception(SIGFPE, regs, code, regs->nip);
1397        }
1398
1399        return;
1400}
1401
1402void SPEFloatingPointRoundException(struct pt_regs *regs)
1403{
1404        extern int speround_handler(struct pt_regs *regs);
1405        int err;
1406
1407        preempt_disable();
1408        if (regs->msr & MSR_SPE)
1409                giveup_spe(current);
1410        preempt_enable();
1411
1412        regs->nip -= 4;
1413        err = speround_handler(regs);
1414        if (err == 0) {
1415                regs->nip += 4;         /* skip emulated instruction */
1416                emulate_single_step(regs);
1417                return;
1418        }
1419
1420        if (err == -EFAULT) {
1421                /* got an error reading the instruction */
1422                _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1423        } else if (err == -EINVAL) {
1424                /* didn't recognize the instruction */
1425                printk(KERN_ERR "unrecognized spe instruction "
1426                       "in %s at %lx\n", current->comm, regs->nip);
1427        } else {
1428                _exception(SIGFPE, regs, 0, regs->nip);
1429                return;
1430        }
1431}
1432#endif
1433
1434/*
1435 * We enter here if we get an unrecoverable exception, that is, one
1436 * that happened at a point where the RI (recoverable interrupt) bit
1437 * in the MSR is 0.  This indicates that SRR0/1 are live, and that
1438 * we therefore lost state by taking this exception.
1439 */
1440void unrecoverable_exception(struct pt_regs *regs)
1441{
1442        printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
1443               regs->trap, regs->nip);
1444        die("Unrecoverable exception", regs, SIGABRT);
1445}
1446
1447#ifdef CONFIG_BOOKE_WDT
1448/*
1449 * Default handler for a Watchdog exception,
1450 * spins until a reboot occurs
1451 */
1452void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
1453{
1454        /* Generic WatchdogHandler, implement your own */
1455        mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
1456        return;
1457}
1458
1459void WatchdogException(struct pt_regs *regs)
1460{
1461        printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
1462        WatchdogHandler(regs);
1463}
1464#endif
1465
1466/*
1467 * We enter here if we discover during exception entry that we are
1468 * running in supervisor mode with a userspace value in the stack pointer.
1469 */
1470void kernel_bad_stack(struct pt_regs *regs)
1471{
1472        printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
1473               regs->gpr[1], regs->nip);
1474        die("Bad kernel stack pointer", regs, SIGABRT);
1475}
1476
1477void __init trap_init(void)
1478{
1479}
1480
1481
1482#ifdef CONFIG_PPC_EMULATED_STATS
1483
1484#define WARN_EMULATED_SETUP(type)       .type = { .name = #type }
1485
1486struct ppc_emulated ppc_emulated = {
1487#ifdef CONFIG_ALTIVEC
1488        WARN_EMULATED_SETUP(altivec),
1489#endif
1490        WARN_EMULATED_SETUP(dcba),
1491        WARN_EMULATED_SETUP(dcbz),
1492        WARN_EMULATED_SETUP(fp_pair),
1493        WARN_EMULATED_SETUP(isel),
1494        WARN_EMULATED_SETUP(mcrxr),
1495        WARN_EMULATED_SETUP(mfpvr),
1496        WARN_EMULATED_SETUP(multiple),
1497        WARN_EMULATED_SETUP(popcntb),
1498        WARN_EMULATED_SETUP(spe),
1499        WARN_EMULATED_SETUP(string),
1500        WARN_EMULATED_SETUP(unaligned),
1501#ifdef CONFIG_MATH_EMULATION
1502        WARN_EMULATED_SETUP(math),
1503#elif defined(CONFIG_8XX_MINIMAL_FPEMU)
1504        WARN_EMULATED_SETUP(8xx),
1505#endif
1506#ifdef CONFIG_VSX
1507        WARN_EMULATED_SETUP(vsx),
1508#endif
1509};
1510
1511u32 ppc_warn_emulated;
1512
1513void ppc_warn_emulated_print(const char *type)
1514{
1515        if (printk_ratelimit())
1516                pr_warning("%s used emulated %s instruction\n", current->comm,
1517                           type);
1518}
1519
1520static int __init ppc_warn_emulated_init(void)
1521{
1522        struct dentry *dir, *d;
1523        unsigned int i;
1524        struct ppc_emulated_entry *entries = (void *)&ppc_emulated;
1525
1526        if (!powerpc_debugfs_root)
1527                return -ENODEV;
1528
1529        dir = debugfs_create_dir("emulated_instructions",
1530                                 powerpc_debugfs_root);
1531        if (!dir)
1532                return -ENOMEM;
1533
1534        d = debugfs_create_u32("do_warn", S_IRUGO | S_IWUSR, dir,
1535                               &ppc_warn_emulated);
1536        if (!d)
1537                goto fail;
1538
1539        for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) {
1540                d = debugfs_create_u32(entries[i].name, S_IRUGO | S_IWUSR, dir,
1541                                       (u32 *)&entries[i].val.counter);
1542                if (!d)
1543                        goto fail;
1544        }
1545
1546        return 0;
1547
1548fail:
1549        debugfs_remove_recursive(dir);
1550        return -ENOMEM;
1551}
1552
1553device_initcall(ppc_warn_emulated_init);
1554
1555#endif /* CONFIG_PPC_EMULATED_STATS */
1556