linux/arch/sparc/kernel/traps_64.c
<<
>>
Prefs
   1/* arch/sparc64/kernel/traps.c
   2 *
   3 * Copyright (C) 1995,1997,2008,2009,2012 David S. Miller (davem@davemloft.net)
   4 * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com)
   5 */
   6
   7/*
   8 * I like traps on v9, :))))
   9 */
  10
  11#include <linux/module.h>
  12#include <linux/sched.h>
  13#include <linux/linkage.h>
  14#include <linux/kernel.h>
  15#include <linux/signal.h>
  16#include <linux/smp.h>
  17#include <linux/mm.h>
  18#include <linux/init.h>
  19#include <linux/kdebug.h>
  20#include <linux/ftrace.h>
  21#include <linux/reboot.h>
  22#include <linux/gfp.h>
  23
  24#include <asm/smp.h>
  25#include <asm/delay.h>
  26#include <asm/ptrace.h>
  27#include <asm/oplib.h>
  28#include <asm/page.h>
  29#include <asm/pgtable.h>
  30#include <asm/unistd.h>
  31#include <asm/uaccess.h>
  32#include <asm/fpumacro.h>
  33#include <asm/lsu.h>
  34#include <asm/dcu.h>
  35#include <asm/estate.h>
  36#include <asm/chafsr.h>
  37#include <asm/sfafsr.h>
  38#include <asm/psrcompat.h>
  39#include <asm/processor.h>
  40#include <asm/timer.h>
  41#include <asm/head.h>
  42#include <asm/prom.h>
  43#include <asm/memctrl.h>
  44#include <asm/cacheflush.h>
  45
  46#include "entry.h"
  47#include "kstack.h"
  48
  49/* When an irrecoverable trap occurs at tl > 0, the trap entry
  50 * code logs the trap state registers at every level in the trap
  51 * stack.  It is found at (pt_regs + sizeof(pt_regs)) and the layout
  52 * is as follows:
  53 */
  54struct tl1_traplog {
  55        struct {
  56                unsigned long tstate;
  57                unsigned long tpc;
  58                unsigned long tnpc;
  59                unsigned long tt;
  60        } trapstack[4];
  61        unsigned long tl;
  62};
  63
  64static void dump_tl1_traplog(struct tl1_traplog *p)
  65{
  66        int i, limit;
  67
  68        printk(KERN_EMERG "TRAPLOG: Error at trap level 0x%lx, "
  69               "dumping track stack.\n", p->tl);
  70
  71        limit = (tlb_type == hypervisor) ? 2 : 4;
  72        for (i = 0; i < limit; i++) {
  73                printk(KERN_EMERG
  74                       "TRAPLOG: Trap level %d TSTATE[%016lx] TPC[%016lx] "
  75                       "TNPC[%016lx] TT[%lx]\n",
  76                       i + 1,
  77                       p->trapstack[i].tstate, p->trapstack[i].tpc,
  78                       p->trapstack[i].tnpc, p->trapstack[i].tt);
  79                printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
  80        }
  81}
  82
  83void bad_trap(struct pt_regs *regs, long lvl)
  84{
  85        char buffer[32];
  86        siginfo_t info;
  87
  88        if (notify_die(DIE_TRAP, "bad trap", regs,
  89                       0, lvl, SIGTRAP) == NOTIFY_STOP)
  90                return;
  91
  92        if (lvl < 0x100) {
  93                sprintf(buffer, "Bad hw trap %lx at tl0\n", lvl);
  94                die_if_kernel(buffer, regs);
  95        }
  96
  97        lvl -= 0x100;
  98        if (regs->tstate & TSTATE_PRIV) {
  99                sprintf(buffer, "Kernel bad sw trap %lx", lvl);
 100                die_if_kernel(buffer, regs);
 101        }
 102        if (test_thread_flag(TIF_32BIT)) {
 103                regs->tpc &= 0xffffffff;
 104                regs->tnpc &= 0xffffffff;
 105        }
 106        info.si_signo = SIGILL;
 107        info.si_errno = 0;
 108        info.si_code = ILL_ILLTRP;
 109        info.si_addr = (void __user *)regs->tpc;
 110        info.si_trapno = lvl;
 111        force_sig_info(SIGILL, &info, current);
 112}
 113
 114void bad_trap_tl1(struct pt_regs *regs, long lvl)
 115{
 116        char buffer[32];
 117        
 118        if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
 119                       0, lvl, SIGTRAP) == NOTIFY_STOP)
 120                return;
 121
 122        dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
 123
 124        sprintf (buffer, "Bad trap %lx at tl>0", lvl);
 125        die_if_kernel (buffer, regs);
 126}
 127
 128#ifdef CONFIG_DEBUG_BUGVERBOSE
 129void do_BUG(const char *file, int line)
 130{
 131        bust_spinlocks(1);
 132        printk("kernel BUG at %s:%d!\n", file, line);
 133}
 134EXPORT_SYMBOL(do_BUG);
 135#endif
 136
 137static DEFINE_SPINLOCK(dimm_handler_lock);
 138static dimm_printer_t dimm_handler;
 139
 140static int sprintf_dimm(int synd_code, unsigned long paddr, char *buf, int buflen)
 141{
 142        unsigned long flags;
 143        int ret = -ENODEV;
 144
 145        spin_lock_irqsave(&dimm_handler_lock, flags);
 146        if (dimm_handler) {
 147                ret = dimm_handler(synd_code, paddr, buf, buflen);
 148        } else if (tlb_type == spitfire) {
 149                if (prom_getunumber(synd_code, paddr, buf, buflen) == -1)
 150                        ret = -EINVAL;
 151                else
 152                        ret = 0;
 153        } else
 154                ret = -ENODEV;
 155        spin_unlock_irqrestore(&dimm_handler_lock, flags);
 156
 157        return ret;
 158}
 159
 160int register_dimm_printer(dimm_printer_t func)
 161{
 162        unsigned long flags;
 163        int ret = 0;
 164
 165        spin_lock_irqsave(&dimm_handler_lock, flags);
 166        if (!dimm_handler)
 167                dimm_handler = func;
 168        else
 169                ret = -EEXIST;
 170        spin_unlock_irqrestore(&dimm_handler_lock, flags);
 171
 172        return ret;
 173}
 174EXPORT_SYMBOL_GPL(register_dimm_printer);
 175
 176void unregister_dimm_printer(dimm_printer_t func)
 177{
 178        unsigned long flags;
 179
 180        spin_lock_irqsave(&dimm_handler_lock, flags);
 181        if (dimm_handler == func)
 182                dimm_handler = NULL;
 183        spin_unlock_irqrestore(&dimm_handler_lock, flags);
 184}
 185EXPORT_SYMBOL_GPL(unregister_dimm_printer);
 186
 187void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
 188{
 189        siginfo_t info;
 190
 191        if (notify_die(DIE_TRAP, "instruction access exception", regs,
 192                       0, 0x8, SIGTRAP) == NOTIFY_STOP)
 193                return;
 194
 195        if (regs->tstate & TSTATE_PRIV) {
 196                printk("spitfire_insn_access_exception: SFSR[%016lx] "
 197                       "SFAR[%016lx], going.\n", sfsr, sfar);
 198                die_if_kernel("Iax", regs);
 199        }
 200        if (test_thread_flag(TIF_32BIT)) {
 201                regs->tpc &= 0xffffffff;
 202                regs->tnpc &= 0xffffffff;
 203        }
 204        info.si_signo = SIGSEGV;
 205        info.si_errno = 0;
 206        info.si_code = SEGV_MAPERR;
 207        info.si_addr = (void __user *)regs->tpc;
 208        info.si_trapno = 0;
 209        force_sig_info(SIGSEGV, &info, current);
 210}
 211
 212void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
 213{
 214        if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
 215                       0, 0x8, SIGTRAP) == NOTIFY_STOP)
 216                return;
 217
 218        dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
 219        spitfire_insn_access_exception(regs, sfsr, sfar);
 220}
 221
 222void sun4v_insn_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
 223{
 224        unsigned short type = (type_ctx >> 16);
 225        unsigned short ctx  = (type_ctx & 0xffff);
 226        siginfo_t info;
 227
 228        if (notify_die(DIE_TRAP, "instruction access exception", regs,
 229                       0, 0x8, SIGTRAP) == NOTIFY_STOP)
 230                return;
 231
 232        if (regs->tstate & TSTATE_PRIV) {
 233                printk("sun4v_insn_access_exception: ADDR[%016lx] "
 234                       "CTX[%04x] TYPE[%04x], going.\n",
 235                       addr, ctx, type);
 236                die_if_kernel("Iax", regs);
 237        }
 238
 239        if (test_thread_flag(TIF_32BIT)) {
 240                regs->tpc &= 0xffffffff;
 241                regs->tnpc &= 0xffffffff;
 242        }
 243        info.si_signo = SIGSEGV;
 244        info.si_errno = 0;
 245        info.si_code = SEGV_MAPERR;
 246        info.si_addr = (void __user *) addr;
 247        info.si_trapno = 0;
 248        force_sig_info(SIGSEGV, &info, current);
 249}
 250
 251void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
 252{
 253        if (notify_die(DIE_TRAP_TL1, "instruction access exception tl1", regs,
 254                       0, 0x8, SIGTRAP) == NOTIFY_STOP)
 255                return;
 256
 257        dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
 258        sun4v_insn_access_exception(regs, addr, type_ctx);
 259}
 260
 261void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
 262{
 263        siginfo_t info;
 264
 265        if (notify_die(DIE_TRAP, "data access exception", regs,
 266                       0, 0x30, SIGTRAP) == NOTIFY_STOP)
 267                return;
 268
 269        if (regs->tstate & TSTATE_PRIV) {
 270                /* Test if this comes from uaccess places. */
 271                const struct exception_table_entry *entry;
 272
 273                entry = search_exception_tables(regs->tpc);
 274                if (entry) {
 275                        /* Ouch, somebody is trying VM hole tricks on us... */
 276#ifdef DEBUG_EXCEPTIONS
 277                        printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
 278                        printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
 279                               regs->tpc, entry->fixup);
 280#endif
 281                        regs->tpc = entry->fixup;
 282                        regs->tnpc = regs->tpc + 4;
 283                        return;
 284                }
 285                /* Shit... */
 286                printk("spitfire_data_access_exception: SFSR[%016lx] "
 287                       "SFAR[%016lx], going.\n", sfsr, sfar);
 288                die_if_kernel("Dax", regs);
 289        }
 290
 291        info.si_signo = SIGSEGV;
 292        info.si_errno = 0;
 293        info.si_code = SEGV_MAPERR;
 294        info.si_addr = (void __user *)sfar;
 295        info.si_trapno = 0;
 296        force_sig_info(SIGSEGV, &info, current);
 297}
 298
 299void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar)
 300{
 301        if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
 302                       0, 0x30, SIGTRAP) == NOTIFY_STOP)
 303                return;
 304
 305        dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
 306        spitfire_data_access_exception(regs, sfsr, sfar);
 307}
 308
 309void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
 310{
 311        unsigned short type = (type_ctx >> 16);
 312        unsigned short ctx  = (type_ctx & 0xffff);
 313        siginfo_t info;
 314
 315        if (notify_die(DIE_TRAP, "data access exception", regs,
 316                       0, 0x8, SIGTRAP) == NOTIFY_STOP)
 317                return;
 318
 319        if (regs->tstate & TSTATE_PRIV) {
 320                /* Test if this comes from uaccess places. */
 321                const struct exception_table_entry *entry;
 322
 323                entry = search_exception_tables(regs->tpc);
 324                if (entry) {
 325                        /* Ouch, somebody is trying VM hole tricks on us... */
 326#ifdef DEBUG_EXCEPTIONS
 327                        printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
 328                        printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
 329                               regs->tpc, entry->fixup);
 330#endif
 331                        regs->tpc = entry->fixup;
 332                        regs->tnpc = regs->tpc + 4;
 333                        return;
 334                }
 335                printk("sun4v_data_access_exception: ADDR[%016lx] "
 336                       "CTX[%04x] TYPE[%04x], going.\n",
 337                       addr, ctx, type);
 338                die_if_kernel("Dax", regs);
 339        }
 340
 341        if (test_thread_flag(TIF_32BIT)) {
 342                regs->tpc &= 0xffffffff;
 343                regs->tnpc &= 0xffffffff;
 344        }
 345        info.si_signo = SIGSEGV;
 346        info.si_errno = 0;
 347        info.si_code = SEGV_MAPERR;
 348        info.si_addr = (void __user *) addr;
 349        info.si_trapno = 0;
 350        force_sig_info(SIGSEGV, &info, current);
 351}
 352
 353void sun4v_data_access_exception_tl1(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
 354{
 355        if (notify_die(DIE_TRAP_TL1, "data access exception tl1", regs,
 356                       0, 0x8, SIGTRAP) == NOTIFY_STOP)
 357                return;
 358
 359        dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
 360        sun4v_data_access_exception(regs, addr, type_ctx);
 361}
 362
 363#ifdef CONFIG_PCI
 364#include "pci_impl.h"
 365#endif
 366
 367/* When access exceptions happen, we must do this. */
 368static void spitfire_clean_and_reenable_l1_caches(void)
 369{
 370        unsigned long va;
 371
 372        if (tlb_type != spitfire)
 373                BUG();
 374
 375        /* Clean 'em. */
 376        for (va =  0; va < (PAGE_SIZE << 1); va += 32) {
 377                spitfire_put_icache_tag(va, 0x0);
 378                spitfire_put_dcache_tag(va, 0x0);
 379        }
 380
 381        /* Re-enable in LSU. */
 382        __asm__ __volatile__("flush %%g6\n\t"
 383                             "membar #Sync\n\t"
 384                             "stxa %0, [%%g0] %1\n\t"
 385                             "membar #Sync"
 386                             : /* no outputs */
 387                             : "r" (LSU_CONTROL_IC | LSU_CONTROL_DC |
 388                                    LSU_CONTROL_IM | LSU_CONTROL_DM),
 389                             "i" (ASI_LSU_CONTROL)
 390                             : "memory");
 391}
 392
 393static void spitfire_enable_estate_errors(void)
 394{
 395        __asm__ __volatile__("stxa      %0, [%%g0] %1\n\t"
 396                             "membar    #Sync"
 397                             : /* no outputs */
 398                             : "r" (ESTATE_ERR_ALL),
 399                               "i" (ASI_ESTATE_ERROR_EN));
 400}
 401
 402static char ecc_syndrome_table[] = {
 403        0x4c, 0x40, 0x41, 0x48, 0x42, 0x48, 0x48, 0x49,
 404        0x43, 0x48, 0x48, 0x49, 0x48, 0x49, 0x49, 0x4a,
 405        0x44, 0x48, 0x48, 0x20, 0x48, 0x39, 0x4b, 0x48,
 406        0x48, 0x25, 0x31, 0x48, 0x28, 0x48, 0x48, 0x2c,
 407        0x45, 0x48, 0x48, 0x21, 0x48, 0x3d, 0x04, 0x48,
 408        0x48, 0x4b, 0x35, 0x48, 0x2d, 0x48, 0x48, 0x29,
 409        0x48, 0x00, 0x01, 0x48, 0x0a, 0x48, 0x48, 0x4b,
 410        0x0f, 0x48, 0x48, 0x4b, 0x48, 0x49, 0x49, 0x48,
 411        0x46, 0x48, 0x48, 0x2a, 0x48, 0x3b, 0x27, 0x48,
 412        0x48, 0x4b, 0x33, 0x48, 0x22, 0x48, 0x48, 0x2e,
 413        0x48, 0x19, 0x1d, 0x48, 0x1b, 0x4a, 0x48, 0x4b,
 414        0x1f, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
 415        0x48, 0x4b, 0x24, 0x48, 0x07, 0x48, 0x48, 0x36,
 416        0x4b, 0x48, 0x48, 0x3e, 0x48, 0x30, 0x38, 0x48,
 417        0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x16, 0x48,
 418        0x48, 0x12, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
 419        0x47, 0x48, 0x48, 0x2f, 0x48, 0x3f, 0x4b, 0x48,
 420        0x48, 0x06, 0x37, 0x48, 0x23, 0x48, 0x48, 0x2b,
 421        0x48, 0x05, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x32,
 422        0x26, 0x48, 0x48, 0x3a, 0x48, 0x34, 0x3c, 0x48,
 423        0x48, 0x11, 0x15, 0x48, 0x13, 0x4a, 0x48, 0x4b,
 424        0x17, 0x48, 0x4a, 0x4b, 0x48, 0x4b, 0x4b, 0x48,
 425        0x49, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x1e, 0x48,
 426        0x48, 0x1a, 0x4b, 0x48, 0x49, 0x48, 0x48, 0x4b,
 427        0x48, 0x08, 0x0d, 0x48, 0x02, 0x48, 0x48, 0x49,
 428        0x03, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x4b, 0x48,
 429        0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x10, 0x48,
 430        0x48, 0x14, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
 431        0x49, 0x48, 0x48, 0x49, 0x48, 0x4b, 0x18, 0x48,
 432        0x48, 0x1c, 0x4b, 0x48, 0x4b, 0x48, 0x48, 0x4b,
 433        0x4a, 0x0c, 0x09, 0x48, 0x0e, 0x48, 0x48, 0x4b,
 434        0x0b, 0x48, 0x48, 0x4b, 0x48, 0x4b, 0x4b, 0x4a
 435};
 436
 437static char *syndrome_unknown = "<Unknown>";
 438
 439static void spitfire_log_udb_syndrome(unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long bit)
 440{
 441        unsigned short scode;
 442        char memmod_str[64], *p;
 443
 444        if (udbl & bit) {
 445                scode = ecc_syndrome_table[udbl & 0xff];
 446                if (sprintf_dimm(scode, afar, memmod_str, sizeof(memmod_str)) < 0)
 447                        p = syndrome_unknown;
 448                else
 449                        p = memmod_str;
 450                printk(KERN_WARNING "CPU[%d]: UDBL Syndrome[%x] "
 451                       "Memory Module \"%s\"\n",
 452                       smp_processor_id(), scode, p);
 453        }
 454
 455        if (udbh & bit) {
 456                scode = ecc_syndrome_table[udbh & 0xff];
 457                if (sprintf_dimm(scode, afar, memmod_str, sizeof(memmod_str)) < 0)
 458                        p = syndrome_unknown;
 459                else
 460                        p = memmod_str;
 461                printk(KERN_WARNING "CPU[%d]: UDBH Syndrome[%x] "
 462                       "Memory Module \"%s\"\n",
 463                       smp_processor_id(), scode, p);
 464        }
 465
 466}
 467
 468static void spitfire_cee_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, int tl1, struct pt_regs *regs)
 469{
 470
 471        printk(KERN_WARNING "CPU[%d]: Correctable ECC Error "
 472               "AFSR[%lx] AFAR[%016lx] UDBL[%lx] UDBH[%lx] TL>1[%d]\n",
 473               smp_processor_id(), afsr, afar, udbl, udbh, tl1);
 474
 475        spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_CE);
 476
 477        /* We always log it, even if someone is listening for this
 478         * trap.
 479         */
 480        notify_die(DIE_TRAP, "Correctable ECC Error", regs,
 481                   0, TRAP_TYPE_CEE, SIGTRAP);
 482
 483        /* The Correctable ECC Error trap does not disable I/D caches.  So
 484         * we only have to restore the ESTATE Error Enable register.
 485         */
 486        spitfire_enable_estate_errors();
 487}
 488
 489static void spitfire_ue_log(unsigned long afsr, unsigned long afar, unsigned long udbh, unsigned long udbl, unsigned long tt, int tl1, struct pt_regs *regs)
 490{
 491        siginfo_t info;
 492
 493        printk(KERN_WARNING "CPU[%d]: Uncorrectable Error AFSR[%lx] "
 494               "AFAR[%lx] UDBL[%lx] UDBH[%ld] TT[%lx] TL>1[%d]\n",
 495               smp_processor_id(), afsr, afar, udbl, udbh, tt, tl1);
 496
 497        /* XXX add more human friendly logging of the error status
 498         * XXX as is implemented for cheetah
 499         */
 500
 501        spitfire_log_udb_syndrome(afar, udbh, udbl, UDBE_UE);
 502
 503        /* We always log it, even if someone is listening for this
 504         * trap.
 505         */
 506        notify_die(DIE_TRAP, "Uncorrectable Error", regs,
 507                   0, tt, SIGTRAP);
 508
 509        if (regs->tstate & TSTATE_PRIV) {
 510                if (tl1)
 511                        dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
 512                die_if_kernel("UE", regs);
 513        }
 514
 515        /* XXX need more intelligent processing here, such as is implemented
 516         * XXX for cheetah errors, in fact if the E-cache still holds the
 517         * XXX line with bad parity this will loop
 518         */
 519
 520        spitfire_clean_and_reenable_l1_caches();
 521        spitfire_enable_estate_errors();
 522
 523        if (test_thread_flag(TIF_32BIT)) {
 524                regs->tpc &= 0xffffffff;
 525                regs->tnpc &= 0xffffffff;
 526        }
 527        info.si_signo = SIGBUS;
 528        info.si_errno = 0;
 529        info.si_code = BUS_OBJERR;
 530        info.si_addr = (void *)0;
 531        info.si_trapno = 0;
 532        force_sig_info(SIGBUS, &info, current);
 533}
 534
 535void spitfire_access_error(struct pt_regs *regs, unsigned long status_encoded, unsigned long afar)
 536{
 537        unsigned long afsr, tt, udbh, udbl;
 538        int tl1;
 539
 540        afsr = (status_encoded & SFSTAT_AFSR_MASK) >> SFSTAT_AFSR_SHIFT;
 541        tt = (status_encoded & SFSTAT_TRAP_TYPE) >> SFSTAT_TRAP_TYPE_SHIFT;
 542        tl1 = (status_encoded & SFSTAT_TL_GT_ONE) ? 1 : 0;
 543        udbl = (status_encoded & SFSTAT_UDBL_MASK) >> SFSTAT_UDBL_SHIFT;
 544        udbh = (status_encoded & SFSTAT_UDBH_MASK) >> SFSTAT_UDBH_SHIFT;
 545
 546#ifdef CONFIG_PCI
 547        if (tt == TRAP_TYPE_DAE &&
 548            pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
 549                spitfire_clean_and_reenable_l1_caches();
 550                spitfire_enable_estate_errors();
 551
 552                pci_poke_faulted = 1;
 553                regs->tnpc = regs->tpc + 4;
 554                return;
 555        }
 556#endif
 557
 558        if (afsr & SFAFSR_UE)
 559                spitfire_ue_log(afsr, afar, udbh, udbl, tt, tl1, regs);
 560
 561        if (tt == TRAP_TYPE_CEE) {
 562                /* Handle the case where we took a CEE trap, but ACK'd
 563                 * only the UE state in the UDB error registers.
 564                 */
 565                if (afsr & SFAFSR_UE) {
 566                        if (udbh & UDBE_CE) {
 567                                __asm__ __volatile__(
 568                                        "stxa   %0, [%1] %2\n\t"
 569                                        "membar #Sync"
 570                                        : /* no outputs */
 571                                        : "r" (udbh & UDBE_CE),
 572                                          "r" (0x0), "i" (ASI_UDB_ERROR_W));
 573                        }
 574                        if (udbl & UDBE_CE) {
 575                                __asm__ __volatile__(
 576                                        "stxa   %0, [%1] %2\n\t"
 577                                        "membar #Sync"
 578                                        : /* no outputs */
 579                                        : "r" (udbl & UDBE_CE),
 580                                          "r" (0x18), "i" (ASI_UDB_ERROR_W));
 581                        }
 582                }
 583
 584                spitfire_cee_log(afsr, afar, udbh, udbl, tl1, regs);
 585        }
 586}
 587
 588int cheetah_pcache_forced_on;
 589
 590void cheetah_enable_pcache(void)
 591{
 592        unsigned long dcr;
 593
 594        printk("CHEETAH: Enabling P-Cache on cpu %d.\n",
 595               smp_processor_id());
 596
 597        __asm__ __volatile__("ldxa [%%g0] %1, %0"
 598                             : "=r" (dcr)
 599                             : "i" (ASI_DCU_CONTROL_REG));
 600        dcr |= (DCU_PE | DCU_HPE | DCU_SPE | DCU_SL);
 601        __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
 602                             "membar #Sync"
 603                             : /* no outputs */
 604                             : "r" (dcr), "i" (ASI_DCU_CONTROL_REG));
 605}
 606
 607/* Cheetah error trap handling. */
 608static unsigned long ecache_flush_physbase;
 609static unsigned long ecache_flush_linesize;
 610static unsigned long ecache_flush_size;
 611
 612/* This table is ordered in priority of errors and matches the
 613 * AFAR overwrite policy as well.
 614 */
 615
 616struct afsr_error_table {
 617        unsigned long mask;
 618        const char *name;
 619};
 620
 621static const char CHAFSR_PERR_msg[] =
 622        "System interface protocol error";
 623static const char CHAFSR_IERR_msg[] =
 624        "Internal processor error";
 625static const char CHAFSR_ISAP_msg[] =
 626        "System request parity error on incoming address";
 627static const char CHAFSR_UCU_msg[] =
 628        "Uncorrectable E-cache ECC error for ifetch/data";
 629static const char CHAFSR_UCC_msg[] =
 630        "SW Correctable E-cache ECC error for ifetch/data";
 631static const char CHAFSR_UE_msg[] =
 632        "Uncorrectable system bus data ECC error for read";
 633static const char CHAFSR_EDU_msg[] =
 634        "Uncorrectable E-cache ECC error for stmerge/blkld";
 635static const char CHAFSR_EMU_msg[] =
 636        "Uncorrectable system bus MTAG error";
 637static const char CHAFSR_WDU_msg[] =
 638        "Uncorrectable E-cache ECC error for writeback";
 639static const char CHAFSR_CPU_msg[] =
 640        "Uncorrectable ECC error for copyout";
 641static const char CHAFSR_CE_msg[] =
 642        "HW corrected system bus data ECC error for read";
 643static const char CHAFSR_EDC_msg[] =
 644        "HW corrected E-cache ECC error for stmerge/blkld";
 645static const char CHAFSR_EMC_msg[] =
 646        "HW corrected system bus MTAG ECC error";
 647static const char CHAFSR_WDC_msg[] =
 648        "HW corrected E-cache ECC error for writeback";
 649static const char CHAFSR_CPC_msg[] =
 650        "HW corrected ECC error for copyout";
 651static const char CHAFSR_TO_msg[] =
 652        "Unmapped error from system bus";
 653static const char CHAFSR_BERR_msg[] =
 654        "Bus error response from system bus";
 655static const char CHAFSR_IVC_msg[] =
 656        "HW corrected system bus data ECC error for ivec read";
 657static const char CHAFSR_IVU_msg[] =
 658        "Uncorrectable system bus data ECC error for ivec read";
 659static struct afsr_error_table __cheetah_error_table[] = {
 660        {       CHAFSR_PERR,    CHAFSR_PERR_msg         },
 661        {       CHAFSR_IERR,    CHAFSR_IERR_msg         },
 662        {       CHAFSR_ISAP,    CHAFSR_ISAP_msg         },
 663        {       CHAFSR_UCU,     CHAFSR_UCU_msg          },
 664        {       CHAFSR_UCC,     CHAFSR_UCC_msg          },
 665        {       CHAFSR_UE,      CHAFSR_UE_msg           },
 666        {       CHAFSR_EDU,     CHAFSR_EDU_msg          },
 667        {       CHAFSR_EMU,     CHAFSR_EMU_msg          },
 668        {       CHAFSR_WDU,     CHAFSR_WDU_msg          },
 669        {       CHAFSR_CPU,     CHAFSR_CPU_msg          },
 670        {       CHAFSR_CE,      CHAFSR_CE_msg           },
 671        {       CHAFSR_EDC,     CHAFSR_EDC_msg          },
 672        {       CHAFSR_EMC,     CHAFSR_EMC_msg          },
 673        {       CHAFSR_WDC,     CHAFSR_WDC_msg          },
 674        {       CHAFSR_CPC,     CHAFSR_CPC_msg          },
 675        {       CHAFSR_TO,      CHAFSR_TO_msg           },
 676        {       CHAFSR_BERR,    CHAFSR_BERR_msg         },
 677        /* These two do not update the AFAR. */
 678        {       CHAFSR_IVC,     CHAFSR_IVC_msg          },
 679        {       CHAFSR_IVU,     CHAFSR_IVU_msg          },
 680        {       0,              NULL                    },
 681};
 682static const char CHPAFSR_DTO_msg[] =
 683        "System bus unmapped error for prefetch/storequeue-read";
 684static const char CHPAFSR_DBERR_msg[] =
 685        "System bus error for prefetch/storequeue-read";
 686static const char CHPAFSR_THCE_msg[] =
 687        "Hardware corrected E-cache Tag ECC error";
 688static const char CHPAFSR_TSCE_msg[] =
 689        "SW handled correctable E-cache Tag ECC error";
 690static const char CHPAFSR_TUE_msg[] =
 691        "Uncorrectable E-cache Tag ECC error";
 692static const char CHPAFSR_DUE_msg[] =
 693        "System bus uncorrectable data ECC error due to prefetch/store-fill";
 694static struct afsr_error_table __cheetah_plus_error_table[] = {
 695        {       CHAFSR_PERR,    CHAFSR_PERR_msg         },
 696        {       CHAFSR_IERR,    CHAFSR_IERR_msg         },
 697        {       CHAFSR_ISAP,    CHAFSR_ISAP_msg         },
 698        {       CHAFSR_UCU,     CHAFSR_UCU_msg          },
 699        {       CHAFSR_UCC,     CHAFSR_UCC_msg          },
 700        {       CHAFSR_UE,      CHAFSR_UE_msg           },
 701        {       CHAFSR_EDU,     CHAFSR_EDU_msg          },
 702        {       CHAFSR_EMU,     CHAFSR_EMU_msg          },
 703        {       CHAFSR_WDU,     CHAFSR_WDU_msg          },
 704        {       CHAFSR_CPU,     CHAFSR_CPU_msg          },
 705        {       CHAFSR_CE,      CHAFSR_CE_msg           },
 706        {       CHAFSR_EDC,     CHAFSR_EDC_msg          },
 707        {       CHAFSR_EMC,     CHAFSR_EMC_msg          },
 708        {       CHAFSR_WDC,     CHAFSR_WDC_msg          },
 709        {       CHAFSR_CPC,     CHAFSR_CPC_msg          },
 710        {       CHAFSR_TO,      CHAFSR_TO_msg           },
 711        {       CHAFSR_BERR,    CHAFSR_BERR_msg         },
 712        {       CHPAFSR_DTO,    CHPAFSR_DTO_msg         },
 713        {       CHPAFSR_DBERR,  CHPAFSR_DBERR_msg       },
 714        {       CHPAFSR_THCE,   CHPAFSR_THCE_msg        },
 715        {       CHPAFSR_TSCE,   CHPAFSR_TSCE_msg        },
 716        {       CHPAFSR_TUE,    CHPAFSR_TUE_msg         },
 717        {       CHPAFSR_DUE,    CHPAFSR_DUE_msg         },
 718        /* These two do not update the AFAR. */
 719        {       CHAFSR_IVC,     CHAFSR_IVC_msg          },
 720        {       CHAFSR_IVU,     CHAFSR_IVU_msg          },
 721        {       0,              NULL                    },
 722};
 723static const char JPAFSR_JETO_msg[] =
 724        "System interface protocol error, hw timeout caused";
 725static const char JPAFSR_SCE_msg[] =
 726        "Parity error on system snoop results";
 727static const char JPAFSR_JEIC_msg[] =
 728        "System interface protocol error, illegal command detected";
 729static const char JPAFSR_JEIT_msg[] =
 730        "System interface protocol error, illegal ADTYPE detected";
 731static const char JPAFSR_OM_msg[] =
 732        "Out of range memory error has occurred";
 733static const char JPAFSR_ETP_msg[] =
 734        "Parity error on L2 cache tag SRAM";
 735static const char JPAFSR_UMS_msg[] =
 736        "Error due to unsupported store";
 737static const char JPAFSR_RUE_msg[] =
 738        "Uncorrectable ECC error from remote cache/memory";
 739static const char JPAFSR_RCE_msg[] =
 740        "Correctable ECC error from remote cache/memory";
 741static const char JPAFSR_BP_msg[] =
 742        "JBUS parity error on returned read data";
 743static const char JPAFSR_WBP_msg[] =
 744        "JBUS parity error on data for writeback or block store";
 745static const char JPAFSR_FRC_msg[] =
 746        "Foreign read to DRAM incurring correctable ECC error";
 747static const char JPAFSR_FRU_msg[] =
 748        "Foreign read to DRAM incurring uncorrectable ECC error";
 749static struct afsr_error_table __jalapeno_error_table[] = {
 750        {       JPAFSR_JETO,    JPAFSR_JETO_msg         },
 751        {       JPAFSR_SCE,     JPAFSR_SCE_msg          },
 752        {       JPAFSR_JEIC,    JPAFSR_JEIC_msg         },
 753        {       JPAFSR_JEIT,    JPAFSR_JEIT_msg         },
 754        {       CHAFSR_PERR,    CHAFSR_PERR_msg         },
 755        {       CHAFSR_IERR,    CHAFSR_IERR_msg         },
 756        {       CHAFSR_ISAP,    CHAFSR_ISAP_msg         },
 757        {       CHAFSR_UCU,     CHAFSR_UCU_msg          },
 758        {       CHAFSR_UCC,     CHAFSR_UCC_msg          },
 759        {       CHAFSR_UE,      CHAFSR_UE_msg           },
 760        {       CHAFSR_EDU,     CHAFSR_EDU_msg          },
 761        {       JPAFSR_OM,      JPAFSR_OM_msg           },
 762        {       CHAFSR_WDU,     CHAFSR_WDU_msg          },
 763        {       CHAFSR_CPU,     CHAFSR_CPU_msg          },
 764        {       CHAFSR_CE,      CHAFSR_CE_msg           },
 765        {       CHAFSR_EDC,     CHAFSR_EDC_msg          },
 766        {       JPAFSR_ETP,     JPAFSR_ETP_msg          },
 767        {       CHAFSR_WDC,     CHAFSR_WDC_msg          },
 768        {       CHAFSR_CPC,     CHAFSR_CPC_msg          },
 769        {       CHAFSR_TO,      CHAFSR_TO_msg           },
 770        {       CHAFSR_BERR,    CHAFSR_BERR_msg         },
 771        {       JPAFSR_UMS,     JPAFSR_UMS_msg          },
 772        {       JPAFSR_RUE,     JPAFSR_RUE_msg          },
 773        {       JPAFSR_RCE,     JPAFSR_RCE_msg          },
 774        {       JPAFSR_BP,      JPAFSR_BP_msg           },
 775        {       JPAFSR_WBP,     JPAFSR_WBP_msg          },
 776        {       JPAFSR_FRC,     JPAFSR_FRC_msg          },
 777        {       JPAFSR_FRU,     JPAFSR_FRU_msg          },
 778        /* These two do not update the AFAR. */
 779        {       CHAFSR_IVU,     CHAFSR_IVU_msg          },
 780        {       0,              NULL                    },
 781};
 782static struct afsr_error_table *cheetah_error_table;
 783static unsigned long cheetah_afsr_errors;
 784
 785struct cheetah_err_info *cheetah_error_log;
 786
 787static inline struct cheetah_err_info *cheetah_get_error_log(unsigned long afsr)
 788{
 789        struct cheetah_err_info *p;
 790        int cpu = smp_processor_id();
 791
 792        if (!cheetah_error_log)
 793                return NULL;
 794
 795        p = cheetah_error_log + (cpu * 2);
 796        if ((afsr & CHAFSR_TL1) != 0UL)
 797                p++;
 798
 799        return p;
 800}
 801
 802extern unsigned int tl0_icpe[], tl1_icpe[];
 803extern unsigned int tl0_dcpe[], tl1_dcpe[];
 804extern unsigned int tl0_fecc[], tl1_fecc[];
 805extern unsigned int tl0_cee[], tl1_cee[];
 806extern unsigned int tl0_iae[], tl1_iae[];
 807extern unsigned int tl0_dae[], tl1_dae[];
 808extern unsigned int cheetah_plus_icpe_trap_vector[], cheetah_plus_icpe_trap_vector_tl1[];
 809extern unsigned int cheetah_plus_dcpe_trap_vector[], cheetah_plus_dcpe_trap_vector_tl1[];
 810extern unsigned int cheetah_fecc_trap_vector[], cheetah_fecc_trap_vector_tl1[];
 811extern unsigned int cheetah_cee_trap_vector[], cheetah_cee_trap_vector_tl1[];
 812extern unsigned int cheetah_deferred_trap_vector[], cheetah_deferred_trap_vector_tl1[];
 813
 814void __init cheetah_ecache_flush_init(void)
 815{
 816        unsigned long largest_size, smallest_linesize, order, ver;
 817        int i, sz;
 818
 819        /* Scan all cpu device tree nodes, note two values:
 820         * 1) largest E-cache size
 821         * 2) smallest E-cache line size
 822         */
 823        largest_size = 0UL;
 824        smallest_linesize = ~0UL;
 825
 826        for (i = 0; i < NR_CPUS; i++) {
 827                unsigned long val;
 828
 829                val = cpu_data(i).ecache_size;
 830                if (!val)
 831                        continue;
 832
 833                if (val > largest_size)
 834                        largest_size = val;
 835
 836                val = cpu_data(i).ecache_line_size;
 837                if (val < smallest_linesize)
 838                        smallest_linesize = val;
 839
 840        }
 841
 842        if (largest_size == 0UL || smallest_linesize == ~0UL) {
 843                prom_printf("cheetah_ecache_flush_init: Cannot probe cpu E-cache "
 844                            "parameters.\n");
 845                prom_halt();
 846        }
 847
 848        ecache_flush_size = (2 * largest_size);
 849        ecache_flush_linesize = smallest_linesize;
 850
 851        ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size);
 852
 853        if (ecache_flush_physbase == ~0UL) {
 854                prom_printf("cheetah_ecache_flush_init: Cannot find %ld byte "
 855                            "contiguous physical memory.\n",
 856                            ecache_flush_size);
 857                prom_halt();
 858        }
 859
 860        /* Now allocate error trap reporting scoreboard. */
 861        sz = NR_CPUS * (2 * sizeof(struct cheetah_err_info));
 862        for (order = 0; order < MAX_ORDER; order++) {
 863                if ((PAGE_SIZE << order) >= sz)
 864                        break;
 865        }
 866        cheetah_error_log = (struct cheetah_err_info *)
 867                __get_free_pages(GFP_KERNEL, order);
 868        if (!cheetah_error_log) {
 869                prom_printf("cheetah_ecache_flush_init: Failed to allocate "
 870                            "error logging scoreboard (%d bytes).\n", sz);
 871                prom_halt();
 872        }
 873        memset(cheetah_error_log, 0, PAGE_SIZE << order);
 874
 875        /* Mark all AFSRs as invalid so that the trap handler will
 876         * log new new information there.
 877         */
 878        for (i = 0; i < 2 * NR_CPUS; i++)
 879                cheetah_error_log[i].afsr = CHAFSR_INVALID;
 880
 881        __asm__ ("rdpr %%ver, %0" : "=r" (ver));
 882        if ((ver >> 32) == __JALAPENO_ID ||
 883            (ver >> 32) == __SERRANO_ID) {
 884                cheetah_error_table = &__jalapeno_error_table[0];
 885                cheetah_afsr_errors = JPAFSR_ERRORS;
 886        } else if ((ver >> 32) == 0x003e0015) {
 887                cheetah_error_table = &__cheetah_plus_error_table[0];
 888                cheetah_afsr_errors = CHPAFSR_ERRORS;
 889        } else {
 890                cheetah_error_table = &__cheetah_error_table[0];
 891                cheetah_afsr_errors = CHAFSR_ERRORS;
 892        }
 893
 894        /* Now patch trap tables. */
 895        memcpy(tl0_fecc, cheetah_fecc_trap_vector, (8 * 4));
 896        memcpy(tl1_fecc, cheetah_fecc_trap_vector_tl1, (8 * 4));
 897        memcpy(tl0_cee, cheetah_cee_trap_vector, (8 * 4));
 898        memcpy(tl1_cee, cheetah_cee_trap_vector_tl1, (8 * 4));
 899        memcpy(tl0_iae, cheetah_deferred_trap_vector, (8 * 4));
 900        memcpy(tl1_iae, cheetah_deferred_trap_vector_tl1, (8 * 4));
 901        memcpy(tl0_dae, cheetah_deferred_trap_vector, (8 * 4));
 902        memcpy(tl1_dae, cheetah_deferred_trap_vector_tl1, (8 * 4));
 903        if (tlb_type == cheetah_plus) {
 904                memcpy(tl0_dcpe, cheetah_plus_dcpe_trap_vector, (8 * 4));
 905                memcpy(tl1_dcpe, cheetah_plus_dcpe_trap_vector_tl1, (8 * 4));
 906                memcpy(tl0_icpe, cheetah_plus_icpe_trap_vector, (8 * 4));
 907                memcpy(tl1_icpe, cheetah_plus_icpe_trap_vector_tl1, (8 * 4));
 908        }
 909        flushi(PAGE_OFFSET);
 910}
 911
 912static void cheetah_flush_ecache(void)
 913{
 914        unsigned long flush_base = ecache_flush_physbase;
 915        unsigned long flush_linesize = ecache_flush_linesize;
 916        unsigned long flush_size = ecache_flush_size;
 917
 918        __asm__ __volatile__("1: subcc  %0, %4, %0\n\t"
 919                             "   bne,pt %%xcc, 1b\n\t"
 920                             "    ldxa  [%2 + %0] %3, %%g0\n\t"
 921                             : "=&r" (flush_size)
 922                             : "0" (flush_size), "r" (flush_base),
 923                               "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
 924}
 925
 926static void cheetah_flush_ecache_line(unsigned long physaddr)
 927{
 928        unsigned long alias;
 929
 930        physaddr &= ~(8UL - 1UL);
 931        physaddr = (ecache_flush_physbase +
 932                    (physaddr & ((ecache_flush_size>>1UL) - 1UL)));
 933        alias = physaddr + (ecache_flush_size >> 1UL);
 934        __asm__ __volatile__("ldxa [%0] %2, %%g0\n\t"
 935                             "ldxa [%1] %2, %%g0\n\t"
 936                             "membar #Sync"
 937                             : /* no outputs */
 938                             : "r" (physaddr), "r" (alias),
 939                               "i" (ASI_PHYS_USE_EC));
 940}
 941
 942/* Unfortunately, the diagnostic access to the I-cache tags we need to
 943 * use to clear the thing interferes with I-cache coherency transactions.
 944 *
 945 * So we must only flush the I-cache when it is disabled.
 946 */
 947static void __cheetah_flush_icache(void)
 948{
 949        unsigned int icache_size, icache_line_size;
 950        unsigned long addr;
 951
 952        icache_size = local_cpu_data().icache_size;
 953        icache_line_size = local_cpu_data().icache_line_size;
 954
 955        /* Clear the valid bits in all the tags. */
 956        for (addr = 0; addr < icache_size; addr += icache_line_size) {
 957                __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
 958                                     "membar #Sync"
 959                                     : /* no outputs */
 960                                     : "r" (addr | (2 << 3)),
 961                                       "i" (ASI_IC_TAG));
 962        }
 963}
 964
 965static void cheetah_flush_icache(void)
 966{
 967        unsigned long dcu_save;
 968
 969        /* Save current DCU, disable I-cache. */
 970        __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
 971                             "or %0, %2, %%g1\n\t"
 972                             "stxa %%g1, [%%g0] %1\n\t"
 973                             "membar #Sync"
 974                             : "=r" (dcu_save)
 975                             : "i" (ASI_DCU_CONTROL_REG), "i" (DCU_IC)
 976                             : "g1");
 977
 978        __cheetah_flush_icache();
 979
 980        /* Restore DCU register */
 981        __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
 982                             "membar #Sync"
 983                             : /* no outputs */
 984                             : "r" (dcu_save), "i" (ASI_DCU_CONTROL_REG));
 985}
 986
 987static void cheetah_flush_dcache(void)
 988{
 989        unsigned int dcache_size, dcache_line_size;
 990        unsigned long addr;
 991
 992        dcache_size = local_cpu_data().dcache_size;
 993        dcache_line_size = local_cpu_data().dcache_line_size;
 994
 995        for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
 996                __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
 997                                     "membar #Sync"
 998                                     : /* no outputs */
 999                                     : "r" (addr), "i" (ASI_DCACHE_TAG));
1000        }
1001}
1002
1003/* In order to make the even parity correct we must do two things.
1004 * First, we clear DC_data_parity and set DC_utag to an appropriate value.
1005 * Next, we clear out all 32-bytes of data for that line.  Data of
1006 * all-zero + tag parity value of zero == correct parity.
1007 */
1008static void cheetah_plus_zap_dcache_parity(void)
1009{
1010        unsigned int dcache_size, dcache_line_size;
1011        unsigned long addr;
1012
1013        dcache_size = local_cpu_data().dcache_size;
1014        dcache_line_size = local_cpu_data().dcache_line_size;
1015
1016        for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
1017                unsigned long tag = (addr >> 14);
1018                unsigned long line;
1019
1020                __asm__ __volatile__("membar    #Sync\n\t"
1021                                     "stxa      %0, [%1] %2\n\t"
1022                                     "membar    #Sync"
1023                                     : /* no outputs */
1024                                     : "r" (tag), "r" (addr),
1025                                       "i" (ASI_DCACHE_UTAG));
1026                for (line = addr; line < addr + dcache_line_size; line += 8)
1027                        __asm__ __volatile__("membar    #Sync\n\t"
1028                                             "stxa      %%g0, [%0] %1\n\t"
1029                                             "membar    #Sync"
1030                                             : /* no outputs */
1031                                             : "r" (line),
1032                                               "i" (ASI_DCACHE_DATA));
1033        }
1034}
1035
1036/* Conversion tables used to frob Cheetah AFSR syndrome values into
1037 * something palatable to the memory controller driver get_unumber
1038 * routine.
1039 */
1040#define MT0     137
1041#define MT1     138
1042#define MT2     139
1043#define NONE    254
1044#define MTC0    140
1045#define MTC1    141
1046#define MTC2    142
1047#define MTC3    143
1048#define C0      128
1049#define C1      129
1050#define C2      130
1051#define C3      131
1052#define C4      132
1053#define C5      133
1054#define C6      134
1055#define C7      135
1056#define C8      136
1057#define M2      144
1058#define M3      145
1059#define M4      146
1060#define M       147
1061static unsigned char cheetah_ecc_syntab[] = {
1062/*00*/NONE, C0, C1, M2, C2, M2, M3, 47, C3, M2, M2, 53, M2, 41, 29, M,
1063/*01*/C4, M, M, 50, M2, 38, 25, M2, M2, 33, 24, M2, 11, M, M2, 16,
1064/*02*/C5, M, M, 46, M2, 37, 19, M2, M, 31, 32, M, 7, M2, M2, 10,
1065/*03*/M2, 40, 13, M2, 59, M, M2, 66, M, M2, M2, 0, M2, 67, 71, M,
1066/*04*/C6, M, M, 43, M, 36, 18, M, M2, 49, 15, M, 63, M2, M2, 6,
1067/*05*/M2, 44, 28, M2, M, M2, M2, 52, 68, M2, M2, 62, M2, M3, M3, M4,
1068/*06*/M2, 26, 106, M2, 64, M, M2, 2, 120, M, M2, M3, M, M3, M3, M4,
1069/*07*/116, M2, M2, M3, M2, M3, M, M4, M2, 58, 54, M2, M, M4, M4, M3,
1070/*08*/C7, M2, M, 42, M, 35, 17, M2, M, 45, 14, M2, 21, M2, M2, 5,
1071/*09*/M, 27, M, M, 99, M, M, 3, 114, M2, M2, 20, M2, M3, M3, M,
1072/*0a*/M2, 23, 113, M2, 112, M2, M, 51, 95, M, M2, M3, M2, M3, M3, M2,
1073/*0b*/103, M, M2, M3, M2, M3, M3, M4, M2, 48, M, M, 73, M2, M, M3,
1074/*0c*/M2, 22, 110, M2, 109, M2, M, 9, 108, M2, M, M3, M2, M3, M3, M,
1075/*0d*/102, M2, M, M, M2, M3, M3, M, M2, M3, M3, M2, M, M4, M, M3,
1076/*0e*/98, M, M2, M3, M2, M, M3, M4, M2, M3, M3, M4, M3, M, M, M,
1077/*0f*/M2, M3, M3, M, M3, M, M, M, 56, M4, M, M3, M4, M, M, M,
1078/*10*/C8, M, M2, 39, M, 34, 105, M2, M, 30, 104, M, 101, M, M, 4,
1079/*11*/M, M, 100, M, 83, M, M2, 12, 87, M, M, 57, M2, M, M3, M,
1080/*12*/M2, 97, 82, M2, 78, M2, M2, 1, 96, M, M, M, M, M, M3, M2,
1081/*13*/94, M, M2, M3, M2, M, M3, M, M2, M, 79, M, 69, M, M4, M,
1082/*14*/M2, 93, 92, M, 91, M, M2, 8, 90, M2, M2, M, M, M, M, M4,
1083/*15*/89, M, M, M3, M2, M3, M3, M, M, M, M3, M2, M3, M2, M, M3,
1084/*16*/86, M, M2, M3, M2, M, M3, M, M2, M, M3, M, M3, M, M, M3,
1085/*17*/M, M, M3, M2, M3, M2, M4, M, 60, M, M2, M3, M4, M, M, M2,
1086/*18*/M2, 88, 85, M2, 84, M, M2, 55, 81, M2, M2, M3, M2, M3, M3, M4,
1087/*19*/77, M, M, M, M2, M3, M, M, M2, M3, M3, M4, M3, M2, M, M,
1088/*1a*/74, M, M2, M3, M, M, M3, M, M, M, M3, M, M3, M, M4, M3,
1089/*1b*/M2, 70, 107, M4, 65, M2, M2, M, 127, M, M, M, M2, M3, M3, M,
1090/*1c*/80, M2, M2, 72, M, 119, 118, M, M2, 126, 76, M, 125, M, M4, M3,
1091/*1d*/M2, 115, 124, M, 75, M, M, M3, 61, M, M4, M, M4, M, M, M,
1092/*1e*/M, 123, 122, M4, 121, M4, M, M3, 117, M2, M2, M3, M4, M3, M, M,
1093/*1f*/111, M, M, M, M4, M3, M3, M, M, M, M3, M, M3, M2, M, M
1094};
1095static unsigned char cheetah_mtag_syntab[] = {
1096       NONE, MTC0,
1097       MTC1, NONE,
1098       MTC2, NONE,
1099       NONE, MT0,
1100       MTC3, NONE,
1101       NONE, MT1,
1102       NONE, MT2,
1103       NONE, NONE
1104};
1105
1106/* Return the highest priority error conditon mentioned. */
1107static inline unsigned long cheetah_get_hipri(unsigned long afsr)
1108{
1109        unsigned long tmp = 0;
1110        int i;
1111
1112        for (i = 0; cheetah_error_table[i].mask; i++) {
1113                if ((tmp = (afsr & cheetah_error_table[i].mask)) != 0UL)
1114                        return tmp;
1115        }
1116        return tmp;
1117}
1118
1119static const char *cheetah_get_string(unsigned long bit)
1120{
1121        int i;
1122
1123        for (i = 0; cheetah_error_table[i].mask; i++) {
1124                if ((bit & cheetah_error_table[i].mask) != 0UL)
1125                        return cheetah_error_table[i].name;
1126        }
1127        return "???";
1128}
1129
1130static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *info,
1131                               unsigned long afsr, unsigned long afar, int recoverable)
1132{
1133        unsigned long hipri;
1134        char unum[256];
1135
1136        printk("%s" "ERROR(%d): Cheetah error trap taken afsr[%016lx] afar[%016lx] TL1(%d)\n",
1137               (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1138               afsr, afar,
1139               (afsr & CHAFSR_TL1) ? 1 : 0);
1140        printk("%s" "ERROR(%d): TPC[%lx] TNPC[%lx] O7[%lx] TSTATE[%lx]\n",
1141               (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1142               regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
1143        printk("%s" "ERROR(%d): ",
1144               (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
1145        printk("TPC<%pS>\n", (void *) regs->tpc);
1146        printk("%s" "ERROR(%d): M_SYND(%lx),  E_SYND(%lx)%s%s\n",
1147               (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1148               (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
1149               (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT,
1150               (afsr & CHAFSR_ME) ? ", Multiple Errors" : "",
1151               (afsr & CHAFSR_PRIV) ? ", Privileged" : "");
1152        hipri = cheetah_get_hipri(afsr);
1153        printk("%s" "ERROR(%d): Highest priority error (%016lx) \"%s\"\n",
1154               (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1155               hipri, cheetah_get_string(hipri));
1156
1157        /* Try to get unumber if relevant. */
1158#define ESYND_ERRORS    (CHAFSR_IVC | CHAFSR_IVU | \
1159                         CHAFSR_CPC | CHAFSR_CPU | \
1160                         CHAFSR_UE  | CHAFSR_CE  | \
1161                         CHAFSR_EDC | CHAFSR_EDU  | \
1162                         CHAFSR_UCC | CHAFSR_UCU  | \
1163                         CHAFSR_WDU | CHAFSR_WDC)
1164#define MSYND_ERRORS    (CHAFSR_EMC | CHAFSR_EMU)
1165        if (afsr & ESYND_ERRORS) {
1166                int syndrome;
1167                int ret;
1168
1169                syndrome = (afsr & CHAFSR_E_SYNDROME) >> CHAFSR_E_SYNDROME_SHIFT;
1170                syndrome = cheetah_ecc_syntab[syndrome];
1171                ret = sprintf_dimm(syndrome, afar, unum, sizeof(unum));
1172                if (ret != -1)
1173                        printk("%s" "ERROR(%d): AFAR E-syndrome [%s]\n",
1174                               (recoverable ? KERN_WARNING : KERN_CRIT),
1175                               smp_processor_id(), unum);
1176        } else if (afsr & MSYND_ERRORS) {
1177                int syndrome;
1178                int ret;
1179
1180                syndrome = (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT;
1181                syndrome = cheetah_mtag_syntab[syndrome];
1182                ret = sprintf_dimm(syndrome, afar, unum, sizeof(unum));
1183                if (ret != -1)
1184                        printk("%s" "ERROR(%d): AFAR M-syndrome [%s]\n",
1185                               (recoverable ? KERN_WARNING : KERN_CRIT),
1186                               smp_processor_id(), unum);
1187        }
1188
1189        /* Now dump the cache snapshots. */
1190        printk("%s" "ERROR(%d): D-cache idx[%x] tag[%016llx] utag[%016llx] stag[%016llx]\n",
1191               (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1192               (int) info->dcache_index,
1193               info->dcache_tag,
1194               info->dcache_utag,
1195               info->dcache_stag);
1196        printk("%s" "ERROR(%d): D-cache data0[%016llx] data1[%016llx] data2[%016llx] data3[%016llx]\n",
1197               (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1198               info->dcache_data[0],
1199               info->dcache_data[1],
1200               info->dcache_data[2],
1201               info->dcache_data[3]);
1202        printk("%s" "ERROR(%d): I-cache idx[%x] tag[%016llx] utag[%016llx] stag[%016llx] "
1203               "u[%016llx] l[%016llx]\n",
1204               (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1205               (int) info->icache_index,
1206               info->icache_tag,
1207               info->icache_utag,
1208               info->icache_stag,
1209               info->icache_upper,
1210               info->icache_lower);
1211        printk("%s" "ERROR(%d): I-cache INSN0[%016llx] INSN1[%016llx] INSN2[%016llx] INSN3[%016llx]\n",
1212               (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1213               info->icache_data[0],
1214               info->icache_data[1],
1215               info->icache_data[2],
1216               info->icache_data[3]);
1217        printk("%s" "ERROR(%d): I-cache INSN4[%016llx] INSN5[%016llx] INSN6[%016llx] INSN7[%016llx]\n",
1218               (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1219               info->icache_data[4],
1220               info->icache_data[5],
1221               info->icache_data[6],
1222               info->icache_data[7]);
1223        printk("%s" "ERROR(%d): E-cache idx[%x] tag[%016llx]\n",
1224               (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1225               (int) info->ecache_index, info->ecache_tag);
1226        printk("%s" "ERROR(%d): E-cache data0[%016llx] data1[%016llx] data2[%016llx] data3[%016llx]\n",
1227               (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
1228               info->ecache_data[0],
1229               info->ecache_data[1],
1230               info->ecache_data[2],
1231               info->ecache_data[3]);
1232
1233        afsr = (afsr & ~hipri) & cheetah_afsr_errors;
1234        while (afsr != 0UL) {
1235                unsigned long bit = cheetah_get_hipri(afsr);
1236
1237                printk("%s" "ERROR: Multiple-error (%016lx) \"%s\"\n",
1238                       (recoverable ? KERN_WARNING : KERN_CRIT),
1239                       bit, cheetah_get_string(bit));
1240
1241                afsr &= ~bit;
1242        }
1243
1244        if (!recoverable)
1245                printk(KERN_CRIT "ERROR: This condition is not recoverable.\n");
1246}
1247
1248static int cheetah_recheck_errors(struct cheetah_err_info *logp)
1249{
1250        unsigned long afsr, afar;
1251        int ret = 0;
1252
1253        __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1254                             : "=r" (afsr)
1255                             : "i" (ASI_AFSR));
1256        if ((afsr & cheetah_afsr_errors) != 0) {
1257                if (logp != NULL) {
1258                        __asm__ __volatile__("ldxa [%%g0] %1, %0\n\t"
1259                                             : "=r" (afar)
1260                                             : "i" (ASI_AFAR));
1261                        logp->afsr = afsr;
1262                        logp->afar = afar;
1263                }
1264                ret = 1;
1265        }
1266        __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
1267                             "membar #Sync\n\t"
1268                             : : "r" (afsr), "i" (ASI_AFSR));
1269
1270        return ret;
1271}
1272
1273void cheetah_fecc_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1274{
1275        struct cheetah_err_info local_snapshot, *p;
1276        int recoverable;
1277
1278        /* Flush E-cache */
1279        cheetah_flush_ecache();
1280
1281        p = cheetah_get_error_log(afsr);
1282        if (!p) {
1283                prom_printf("ERROR: Early Fast-ECC error afsr[%016lx] afar[%016lx]\n",
1284                            afsr, afar);
1285                prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1286                            smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1287                prom_halt();
1288        }
1289
1290        /* Grab snapshot of logged error. */
1291        memcpy(&local_snapshot, p, sizeof(local_snapshot));
1292
1293        /* If the current trap snapshot does not match what the
1294         * trap handler passed along into our args, big trouble.
1295         * In such a case, mark the local copy as invalid.
1296         *
1297         * Else, it matches and we mark the afsr in the non-local
1298         * copy as invalid so we may log new error traps there.
1299         */
1300        if (p->afsr != afsr || p->afar != afar)
1301                local_snapshot.afsr = CHAFSR_INVALID;
1302        else
1303                p->afsr = CHAFSR_INVALID;
1304
1305        cheetah_flush_icache();
1306        cheetah_flush_dcache();
1307
1308        /* Re-enable I-cache/D-cache */
1309        __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1310                             "or %%g1, %1, %%g1\n\t"
1311                             "stxa %%g1, [%%g0] %0\n\t"
1312                             "membar #Sync"
1313                             : /* no outputs */
1314                             : "i" (ASI_DCU_CONTROL_REG),
1315                               "i" (DCU_DC | DCU_IC)
1316                             : "g1");
1317
1318        /* Re-enable error reporting */
1319        __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1320                             "or %%g1, %1, %%g1\n\t"
1321                             "stxa %%g1, [%%g0] %0\n\t"
1322                             "membar #Sync"
1323                             : /* no outputs */
1324                             : "i" (ASI_ESTATE_ERROR_EN),
1325                               "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1326                             : "g1");
1327
1328        /* Decide if we can continue after handling this trap and
1329         * logging the error.
1330         */
1331        recoverable = 1;
1332        if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1333                recoverable = 0;
1334
1335        /* Re-check AFSR/AFAR.  What we are looking for here is whether a new
1336         * error was logged while we had error reporting traps disabled.
1337         */
1338        if (cheetah_recheck_errors(&local_snapshot)) {
1339                unsigned long new_afsr = local_snapshot.afsr;
1340
1341                /* If we got a new asynchronous error, die... */
1342                if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1343                                CHAFSR_WDU | CHAFSR_CPU |
1344                                CHAFSR_IVU | CHAFSR_UE |
1345                                CHAFSR_BERR | CHAFSR_TO))
1346                        recoverable = 0;
1347        }
1348
1349        /* Log errors. */
1350        cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1351
1352        if (!recoverable)
1353                panic("Irrecoverable Fast-ECC error trap.\n");
1354
1355        /* Flush E-cache to kick the error trap handlers out. */
1356        cheetah_flush_ecache();
1357}
1358
1359/* Try to fix a correctable error by pushing the line out from
1360 * the E-cache.  Recheck error reporting registers to see if the
1361 * problem is intermittent.
1362 */
1363static int cheetah_fix_ce(unsigned long physaddr)
1364{
1365        unsigned long orig_estate;
1366        unsigned long alias1, alias2;
1367        int ret;
1368
1369        /* Make sure correctable error traps are disabled. */
1370        __asm__ __volatile__("ldxa      [%%g0] %2, %0\n\t"
1371                             "andn      %0, %1, %%g1\n\t"
1372                             "stxa      %%g1, [%%g0] %2\n\t"
1373                             "membar    #Sync"
1374                             : "=&r" (orig_estate)
1375                             : "i" (ESTATE_ERROR_CEEN),
1376                               "i" (ASI_ESTATE_ERROR_EN)
1377                             : "g1");
1378
1379        /* We calculate alias addresses that will force the
1380         * cache line in question out of the E-cache.  Then
1381         * we bring it back in with an atomic instruction so
1382         * that we get it in some modified/exclusive state,
1383         * then we displace it again to try and get proper ECC
1384         * pushed back into the system.
1385         */
1386        physaddr &= ~(8UL - 1UL);
1387        alias1 = (ecache_flush_physbase +
1388                  (physaddr & ((ecache_flush_size >> 1) - 1)));
1389        alias2 = alias1 + (ecache_flush_size >> 1);
1390        __asm__ __volatile__("ldxa      [%0] %3, %%g0\n\t"
1391                             "ldxa      [%1] %3, %%g0\n\t"
1392                             "casxa     [%2] %3, %%g0, %%g0\n\t"
1393                             "ldxa      [%0] %3, %%g0\n\t"
1394                             "ldxa      [%1] %3, %%g0\n\t"
1395                             "membar    #Sync"
1396                             : /* no outputs */
1397                             : "r" (alias1), "r" (alias2),
1398                               "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1399
1400        /* Did that trigger another error? */
1401        if (cheetah_recheck_errors(NULL)) {
1402                /* Try one more time. */
1403                __asm__ __volatile__("ldxa [%0] %1, %%g0\n\t"
1404                                     "membar #Sync"
1405                                     : : "r" (physaddr), "i" (ASI_PHYS_USE_EC));
1406                if (cheetah_recheck_errors(NULL))
1407                        ret = 2;
1408                else
1409                        ret = 1;
1410        } else {
1411                /* No new error, intermittent problem. */
1412                ret = 0;
1413        }
1414
1415        /* Restore error enables. */
1416        __asm__ __volatile__("stxa      %0, [%%g0] %1\n\t"
1417                             "membar    #Sync"
1418                             : : "r" (orig_estate), "i" (ASI_ESTATE_ERROR_EN));
1419
1420        return ret;
1421}
1422
1423/* Return non-zero if PADDR is a valid physical memory address. */
1424static int cheetah_check_main_memory(unsigned long paddr)
1425{
1426        unsigned long vaddr = PAGE_OFFSET + paddr;
1427
1428        if (vaddr > (unsigned long) high_memory)
1429                return 0;
1430
1431        return kern_addr_valid(vaddr);
1432}
1433
1434void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1435{
1436        struct cheetah_err_info local_snapshot, *p;
1437        int recoverable, is_memory;
1438
1439        p = cheetah_get_error_log(afsr);
1440        if (!p) {
1441                prom_printf("ERROR: Early CEE error afsr[%016lx] afar[%016lx]\n",
1442                            afsr, afar);
1443                prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1444                            smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1445                prom_halt();
1446        }
1447
1448        /* Grab snapshot of logged error. */
1449        memcpy(&local_snapshot, p, sizeof(local_snapshot));
1450
1451        /* If the current trap snapshot does not match what the
1452         * trap handler passed along into our args, big trouble.
1453         * In such a case, mark the local copy as invalid.
1454         *
1455         * Else, it matches and we mark the afsr in the non-local
1456         * copy as invalid so we may log new error traps there.
1457         */
1458        if (p->afsr != afsr || p->afar != afar)
1459                local_snapshot.afsr = CHAFSR_INVALID;
1460        else
1461                p->afsr = CHAFSR_INVALID;
1462
1463        is_memory = cheetah_check_main_memory(afar);
1464
1465        if (is_memory && (afsr & CHAFSR_CE) != 0UL) {
1466                /* XXX Might want to log the results of this operation
1467                 * XXX somewhere... -DaveM
1468                 */
1469                cheetah_fix_ce(afar);
1470        }
1471
1472        {
1473                int flush_all, flush_line;
1474
1475                flush_all = flush_line = 0;
1476                if ((afsr & CHAFSR_EDC) != 0UL) {
1477                        if ((afsr & cheetah_afsr_errors) == CHAFSR_EDC)
1478                                flush_line = 1;
1479                        else
1480                                flush_all = 1;
1481                } else if ((afsr & CHAFSR_CPC) != 0UL) {
1482                        if ((afsr & cheetah_afsr_errors) == CHAFSR_CPC)
1483                                flush_line = 1;
1484                        else
1485                                flush_all = 1;
1486                }
1487
1488                /* Trap handler only disabled I-cache, flush it. */
1489                cheetah_flush_icache();
1490
1491                /* Re-enable I-cache */
1492                __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1493                                     "or %%g1, %1, %%g1\n\t"
1494                                     "stxa %%g1, [%%g0] %0\n\t"
1495                                     "membar #Sync"
1496                                     : /* no outputs */
1497                                     : "i" (ASI_DCU_CONTROL_REG),
1498                                     "i" (DCU_IC)
1499                                     : "g1");
1500
1501                if (flush_all)
1502                        cheetah_flush_ecache();
1503                else if (flush_line)
1504                        cheetah_flush_ecache_line(afar);
1505        }
1506
1507        /* Re-enable error reporting */
1508        __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1509                             "or %%g1, %1, %%g1\n\t"
1510                             "stxa %%g1, [%%g0] %0\n\t"
1511                             "membar #Sync"
1512                             : /* no outputs */
1513                             : "i" (ASI_ESTATE_ERROR_EN),
1514                               "i" (ESTATE_ERROR_CEEN)
1515                             : "g1");
1516
1517        /* Decide if we can continue after handling this trap and
1518         * logging the error.
1519         */
1520        recoverable = 1;
1521        if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1522                recoverable = 0;
1523
1524        /* Re-check AFSR/AFAR */
1525        (void) cheetah_recheck_errors(&local_snapshot);
1526
1527        /* Log errors. */
1528        cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1529
1530        if (!recoverable)
1531                panic("Irrecoverable Correctable-ECC error trap.\n");
1532}
1533
1534void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
1535{
1536        struct cheetah_err_info local_snapshot, *p;
1537        int recoverable, is_memory;
1538
1539#ifdef CONFIG_PCI
1540        /* Check for the special PCI poke sequence. */
1541        if (pci_poke_in_progress && pci_poke_cpu == smp_processor_id()) {
1542                cheetah_flush_icache();
1543                cheetah_flush_dcache();
1544
1545                /* Re-enable I-cache/D-cache */
1546                __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1547                                     "or %%g1, %1, %%g1\n\t"
1548                                     "stxa %%g1, [%%g0] %0\n\t"
1549                                     "membar #Sync"
1550                                     : /* no outputs */
1551                                     : "i" (ASI_DCU_CONTROL_REG),
1552                                       "i" (DCU_DC | DCU_IC)
1553                                     : "g1");
1554
1555                /* Re-enable error reporting */
1556                __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1557                                     "or %%g1, %1, %%g1\n\t"
1558                                     "stxa %%g1, [%%g0] %0\n\t"
1559                                     "membar #Sync"
1560                                     : /* no outputs */
1561                                     : "i" (ASI_ESTATE_ERROR_EN),
1562                                       "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1563                                     : "g1");
1564
1565                (void) cheetah_recheck_errors(NULL);
1566
1567                pci_poke_faulted = 1;
1568                regs->tpc += 4;
1569                regs->tnpc = regs->tpc + 4;
1570                return;
1571        }
1572#endif
1573
1574        p = cheetah_get_error_log(afsr);
1575        if (!p) {
1576                prom_printf("ERROR: Early deferred error afsr[%016lx] afar[%016lx]\n",
1577                            afsr, afar);
1578                prom_printf("ERROR: CPU(%d) TPC[%016lx] TNPC[%016lx] TSTATE[%016lx]\n",
1579                            smp_processor_id(), regs->tpc, regs->tnpc, regs->tstate);
1580                prom_halt();
1581        }
1582
1583        /* Grab snapshot of logged error. */
1584        memcpy(&local_snapshot, p, sizeof(local_snapshot));
1585
1586        /* If the current trap snapshot does not match what the
1587         * trap handler passed along into our args, big trouble.
1588         * In such a case, mark the local copy as invalid.
1589         *
1590         * Else, it matches and we mark the afsr in the non-local
1591         * copy as invalid so we may log new error traps there.
1592         */
1593        if (p->afsr != afsr || p->afar != afar)
1594                local_snapshot.afsr = CHAFSR_INVALID;
1595        else
1596                p->afsr = CHAFSR_INVALID;
1597
1598        is_memory = cheetah_check_main_memory(afar);
1599
1600        {
1601                int flush_all, flush_line;
1602
1603                flush_all = flush_line = 0;
1604                if ((afsr & CHAFSR_EDU) != 0UL) {
1605                        if ((afsr & cheetah_afsr_errors) == CHAFSR_EDU)
1606                                flush_line = 1;
1607                        else
1608                                flush_all = 1;
1609                } else if ((afsr & CHAFSR_BERR) != 0UL) {
1610                        if ((afsr & cheetah_afsr_errors) == CHAFSR_BERR)
1611                                flush_line = 1;
1612                        else
1613                                flush_all = 1;
1614                }
1615
1616                cheetah_flush_icache();
1617                cheetah_flush_dcache();
1618
1619                /* Re-enable I/D caches */
1620                __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1621                                     "or %%g1, %1, %%g1\n\t"
1622                                     "stxa %%g1, [%%g0] %0\n\t"
1623                                     "membar #Sync"
1624                                     : /* no outputs */
1625                                     : "i" (ASI_DCU_CONTROL_REG),
1626                                     "i" (DCU_IC | DCU_DC)
1627                                     : "g1");
1628
1629                if (flush_all)
1630                        cheetah_flush_ecache();
1631                else if (flush_line)
1632                        cheetah_flush_ecache_line(afar);
1633        }
1634
1635        /* Re-enable error reporting */
1636        __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1637                             "or %%g1, %1, %%g1\n\t"
1638                             "stxa %%g1, [%%g0] %0\n\t"
1639                             "membar #Sync"
1640                             : /* no outputs */
1641                             : "i" (ASI_ESTATE_ERROR_EN),
1642                             "i" (ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN)
1643                             : "g1");
1644
1645        /* Decide if we can continue after handling this trap and
1646         * logging the error.
1647         */
1648        recoverable = 1;
1649        if (afsr & (CHAFSR_PERR | CHAFSR_IERR | CHAFSR_ISAP))
1650                recoverable = 0;
1651
1652        /* Re-check AFSR/AFAR.  What we are looking for here is whether a new
1653         * error was logged while we had error reporting traps disabled.
1654         */
1655        if (cheetah_recheck_errors(&local_snapshot)) {
1656                unsigned long new_afsr = local_snapshot.afsr;
1657
1658                /* If we got a new asynchronous error, die... */
1659                if (new_afsr & (CHAFSR_EMU | CHAFSR_EDU |
1660                                CHAFSR_WDU | CHAFSR_CPU |
1661                                CHAFSR_IVU | CHAFSR_UE |
1662                                CHAFSR_BERR | CHAFSR_TO))
1663                        recoverable = 0;
1664        }
1665
1666        /* Log errors. */
1667        cheetah_log_errors(regs, &local_snapshot, afsr, afar, recoverable);
1668
1669        /* "Recoverable" here means we try to yank the page from ever
1670         * being newly used again.  This depends upon a few things:
1671         * 1) Must be main memory, and AFAR must be valid.
1672         * 2) If we trapped from user, OK.
1673         * 3) Else, if we trapped from kernel we must find exception
1674         *    table entry (ie. we have to have been accessing user
1675         *    space).
1676         *
1677         * If AFAR is not in main memory, or we trapped from kernel
1678         * and cannot find an exception table entry, it is unacceptable
1679         * to try and continue.
1680         */
1681        if (recoverable && is_memory) {
1682                if ((regs->tstate & TSTATE_PRIV) == 0UL) {
1683                        /* OK, usermode access. */
1684                        recoverable = 1;
1685                } else {
1686                        const struct exception_table_entry *entry;
1687
1688                        entry = search_exception_tables(regs->tpc);
1689                        if (entry) {
1690                                /* OK, kernel access to userspace. */
1691                                recoverable = 1;
1692
1693                        } else {
1694                                /* BAD, privileged state is corrupted. */
1695                                recoverable = 0;
1696                        }
1697
1698                        if (recoverable) {
1699                                if (pfn_valid(afar >> PAGE_SHIFT))
1700                                        get_page(pfn_to_page(afar >> PAGE_SHIFT));
1701                                else
1702                                        recoverable = 0;
1703
1704                                /* Only perform fixup if we still have a
1705                                 * recoverable condition.
1706                                 */
1707                                if (recoverable) {
1708                                        regs->tpc = entry->fixup;
1709                                        regs->tnpc = regs->tpc + 4;
1710                                }
1711                        }
1712                }
1713        } else {
1714                recoverable = 0;
1715        }
1716
1717        if (!recoverable)
1718                panic("Irrecoverable deferred error trap.\n");
1719}
1720
1721/* Handle a D/I cache parity error trap.  TYPE is encoded as:
1722 *
1723 * Bit0:        0=dcache,1=icache
1724 * Bit1:        0=recoverable,1=unrecoverable
1725 *
1726 * The hardware has disabled both the I-cache and D-cache in
1727 * the %dcr register.  
1728 */
1729void cheetah_plus_parity_error(int type, struct pt_regs *regs)
1730{
1731        if (type & 0x1)
1732                __cheetah_flush_icache();
1733        else
1734                cheetah_plus_zap_dcache_parity();
1735        cheetah_flush_dcache();
1736
1737        /* Re-enable I-cache/D-cache */
1738        __asm__ __volatile__("ldxa [%%g0] %0, %%g1\n\t"
1739                             "or %%g1, %1, %%g1\n\t"
1740                             "stxa %%g1, [%%g0] %0\n\t"
1741                             "membar #Sync"
1742                             : /* no outputs */
1743                             : "i" (ASI_DCU_CONTROL_REG),
1744                               "i" (DCU_DC | DCU_IC)
1745                             : "g1");
1746
1747        if (type & 0x2) {
1748                printk(KERN_EMERG "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1749                       smp_processor_id(),
1750                       (type & 0x1) ? 'I' : 'D',
1751                       regs->tpc);
1752                printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
1753                panic("Irrecoverable Cheetah+ parity error.");
1754        }
1755
1756        printk(KERN_WARNING "CPU[%d]: Cheetah+ %c-cache parity error at TPC[%016lx]\n",
1757               smp_processor_id(),
1758               (type & 0x1) ? 'I' : 'D',
1759               regs->tpc);
1760        printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
1761}
1762
1763struct sun4v_error_entry {
1764        /* Unique error handle */
1765/*0x00*/u64             err_handle;
1766
1767        /* %stick value at the time of the error */
1768/*0x08*/u64             err_stick;
1769
1770/*0x10*/u8              reserved_1[3];
1771
1772        /* Error type */
1773/*0x13*/u8              err_type;
1774#define SUN4V_ERR_TYPE_UNDEFINED        0
1775#define SUN4V_ERR_TYPE_UNCORRECTED_RES  1
1776#define SUN4V_ERR_TYPE_PRECISE_NONRES   2
1777#define SUN4V_ERR_TYPE_DEFERRED_NONRES  3
1778#define SUN4V_ERR_TYPE_SHUTDOWN_RQST    4
1779#define SUN4V_ERR_TYPE_DUMP_CORE        5
1780#define SUN4V_ERR_TYPE_SP_STATE_CHANGE  6
1781#define SUN4V_ERR_TYPE_NUM              7
1782
1783        /* Error attributes */
1784/*0x14*/u32             err_attrs;
1785#define SUN4V_ERR_ATTRS_PROCESSOR       0x00000001
1786#define SUN4V_ERR_ATTRS_MEMORY          0x00000002
1787#define SUN4V_ERR_ATTRS_PIO             0x00000004
1788#define SUN4V_ERR_ATTRS_INT_REGISTERS   0x00000008
1789#define SUN4V_ERR_ATTRS_FPU_REGISTERS   0x00000010
1790#define SUN4V_ERR_ATTRS_SHUTDOWN_RQST   0x00000020
1791#define SUN4V_ERR_ATTRS_ASR             0x00000040
1792#define SUN4V_ERR_ATTRS_ASI             0x00000080
1793#define SUN4V_ERR_ATTRS_PRIV_REG        0x00000100
1794#define SUN4V_ERR_ATTRS_SPSTATE_MSK     0x00000600
1795#define SUN4V_ERR_ATTRS_SPSTATE_SHFT    9
1796#define SUN4V_ERR_ATTRS_MODE_MSK        0x03000000
1797#define SUN4V_ERR_ATTRS_MODE_SHFT       24
1798#define SUN4V_ERR_ATTRS_RES_QUEUE_FULL  0x80000000
1799
1800#define SUN4V_ERR_SPSTATE_FAULTED       0
1801#define SUN4V_ERR_SPSTATE_AVAILABLE     1
1802#define SUN4V_ERR_SPSTATE_NOT_PRESENT   2
1803
1804#define SUN4V_ERR_MODE_USER             1
1805#define SUN4V_ERR_MODE_PRIV             2
1806
1807        /* Real address of the memory region or PIO transaction */
1808/*0x18*/u64             err_raddr;
1809
1810        /* Size of the operation triggering the error, in bytes */
1811/*0x20*/u32             err_size;
1812
1813        /* ID of the CPU */
1814/*0x24*/u16             err_cpu;
1815
1816        /* Grace periof for shutdown, in seconds */
1817/*0x26*/u16             err_secs;
1818
1819        /* Value of the %asi register */
1820/*0x28*/u8              err_asi;
1821
1822/*0x29*/u8              reserved_2;
1823
1824        /* Value of the ASR register number */
1825/*0x2a*/u16             err_asr;
1826#define SUN4V_ERR_ASR_VALID             0x8000
1827
1828/*0x2c*/u32             reserved_3;
1829/*0x30*/u64             reserved_4;
1830/*0x38*/u64             reserved_5;
1831};
1832
1833static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
1834static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
1835
1836static const char *sun4v_err_type_to_str(u8 type)
1837{
1838        static const char *types[SUN4V_ERR_TYPE_NUM] = {
1839                "undefined",
1840                "uncorrected resumable",
1841                "precise nonresumable",
1842                "deferred nonresumable",
1843                "shutdown request",
1844                "dump core",
1845                "SP state change",
1846        };
1847
1848        if (type < SUN4V_ERR_TYPE_NUM)
1849                return types[type];
1850
1851        return "unknown";
1852}
1853
1854static void sun4v_emit_err_attr_strings(u32 attrs)
1855{
1856        static const char *attr_names[] = {
1857                "processor",
1858                "memory",
1859                "PIO",
1860                "int-registers",
1861                "fpu-registers",
1862                "shutdown-request",
1863                "ASR",
1864                "ASI",
1865                "priv-reg",
1866        };
1867        static const char *sp_states[] = {
1868                "sp-faulted",
1869                "sp-available",
1870                "sp-not-present",
1871                "sp-state-reserved",
1872        };
1873        static const char *modes[] = {
1874                "mode-reserved0",
1875                "user",
1876                "priv",
1877                "mode-reserved1",
1878        };
1879        u32 sp_state, mode;
1880        int i;
1881
1882        for (i = 0; i < ARRAY_SIZE(attr_names); i++) {
1883                if (attrs & (1U << i)) {
1884                        const char *s = attr_names[i];
1885
1886                        pr_cont("%s ", s);
1887                }
1888        }
1889
1890        sp_state = ((attrs & SUN4V_ERR_ATTRS_SPSTATE_MSK) >>
1891                    SUN4V_ERR_ATTRS_SPSTATE_SHFT);
1892        pr_cont("%s ", sp_states[sp_state]);
1893
1894        mode = ((attrs & SUN4V_ERR_ATTRS_MODE_MSK) >>
1895                SUN4V_ERR_ATTRS_MODE_SHFT);
1896        pr_cont("%s ", modes[mode]);
1897
1898        if (attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL)
1899                pr_cont("res-queue-full ");
1900}
1901
1902/* When the report contains a real-address of "-1" it means that the
1903 * hardware did not provide the address.  So we compute the effective
1904 * address of the load or store instruction at regs->tpc and report
1905 * that.  Usually when this happens it's a PIO and in such a case we
1906 * are using physical addresses with bypass ASIs anyways, so what we
1907 * report here is exactly what we want.
1908 */
1909static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
1910{
1911        unsigned int insn;
1912        u64 addr;
1913
1914        if (!(regs->tstate & TSTATE_PRIV))
1915                return;
1916
1917        insn = *(unsigned int *) regs->tpc;
1918
1919        addr = compute_effective_address(regs, insn, 0);
1920
1921        printk("%s: insn effective address [0x%016llx]\n",
1922               pfx, addr);
1923}
1924
1925static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
1926                            int cpu, const char *pfx, atomic_t *ocnt)
1927{
1928        u64 *raw_ptr = (u64 *) ent;
1929        u32 attrs;
1930        int cnt;
1931
1932        printk("%s: Reporting on cpu %d\n", pfx, cpu);
1933        printk("%s: TPC [0x%016lx] <%pS>\n",
1934               pfx, regs->tpc, (void *) regs->tpc);
1935
1936        printk("%s: RAW [%016llx:%016llx:%016llx:%016llx\n",
1937               pfx, raw_ptr[0], raw_ptr[1], raw_ptr[2], raw_ptr[3]);
1938        printk("%s:      %016llx:%016llx:%016llx:%016llx]\n",
1939               pfx, raw_ptr[4], raw_ptr[5], raw_ptr[6], raw_ptr[7]);
1940
1941        printk("%s: handle [0x%016llx] stick [0x%016llx]\n",
1942               pfx, ent->err_handle, ent->err_stick);
1943
1944        printk("%s: type [%s]\n", pfx, sun4v_err_type_to_str(ent->err_type));
1945
1946        attrs = ent->err_attrs;
1947        printk("%s: attrs [0x%08x] < ", pfx, attrs);
1948        sun4v_emit_err_attr_strings(attrs);
1949        pr_cont(">\n");
1950
1951        /* Various fields in the error report are only valid if
1952         * certain attribute bits are set.
1953         */
1954        if (attrs & (SUN4V_ERR_ATTRS_MEMORY |
1955                     SUN4V_ERR_ATTRS_PIO |
1956                     SUN4V_ERR_ATTRS_ASI)) {
1957                printk("%s: raddr [0x%016llx]\n", pfx, ent->err_raddr);
1958
1959                if (ent->err_raddr == ~(u64)0)
1960                        sun4v_report_real_raddr(pfx, regs);
1961        }
1962
1963        if (attrs & (SUN4V_ERR_ATTRS_MEMORY | SUN4V_ERR_ATTRS_ASI))
1964                printk("%s: size [0x%x]\n", pfx, ent->err_size);
1965
1966        if (attrs & (SUN4V_ERR_ATTRS_PROCESSOR |
1967                     SUN4V_ERR_ATTRS_INT_REGISTERS |
1968                     SUN4V_ERR_ATTRS_FPU_REGISTERS |
1969                     SUN4V_ERR_ATTRS_PRIV_REG))
1970                printk("%s: cpu[%u]\n", pfx, ent->err_cpu);
1971
1972        if (attrs & SUN4V_ERR_ATTRS_ASI)
1973                printk("%s: asi [0x%02x]\n", pfx, ent->err_asi);
1974
1975        if ((attrs & (SUN4V_ERR_ATTRS_INT_REGISTERS |
1976                      SUN4V_ERR_ATTRS_FPU_REGISTERS |
1977                      SUN4V_ERR_ATTRS_PRIV_REG)) &&
1978            (ent->err_asr & SUN4V_ERR_ASR_VALID) != 0)
1979                printk("%s: reg [0x%04x]\n",
1980                       pfx, ent->err_asr & ~SUN4V_ERR_ASR_VALID);
1981
1982        show_regs(regs);
1983
1984        if ((cnt = atomic_read(ocnt)) != 0) {
1985                atomic_set(ocnt, 0);
1986                wmb();
1987                printk("%s: Queue overflowed %d times.\n",
1988                       pfx, cnt);
1989        }
1990}
1991
1992/* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
1993 * Log the event and clear the first word of the entry.
1994 */
1995void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
1996{
1997        struct sun4v_error_entry *ent, local_copy;
1998        struct trap_per_cpu *tb;
1999        unsigned long paddr;
2000        int cpu;
2001
2002        cpu = get_cpu();
2003
2004        tb = &trap_block[cpu];
2005        paddr = tb->resum_kernel_buf_pa + offset;
2006        ent = __va(paddr);
2007
2008        memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
2009
2010        /* We have a local copy now, so release the entry.  */
2011        ent->err_handle = 0;
2012        wmb();
2013
2014        put_cpu();
2015
2016        if (local_copy.err_type == SUN4V_ERR_TYPE_SHUTDOWN_RQST) {
2017                /* We should really take the seconds field of
2018                 * the error report and use it for the shutdown
2019                 * invocation, but for now do the same thing we
2020                 * do for a DS shutdown request.
2021                 */
2022                pr_info("Shutdown request, %u seconds...\n",
2023                        local_copy.err_secs);
2024                orderly_poweroff(true);
2025                return;
2026        }
2027
2028        sun4v_log_error(regs, &local_copy, cpu,
2029                        KERN_ERR "RESUMABLE ERROR",
2030                        &sun4v_resum_oflow_cnt);
2031}
2032
2033/* If we try to printk() we'll probably make matters worse, by trying
2034 * to retake locks this cpu already holds or causing more errors. So
2035 * just bump a counter, and we'll report these counter bumps above.
2036 */
2037void sun4v_resum_overflow(struct pt_regs *regs)
2038{
2039        atomic_inc(&sun4v_resum_oflow_cnt);
2040}
2041
2042/* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
2043 * Log the event, clear the first word of the entry, and die.
2044 */
2045void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
2046{
2047        struct sun4v_error_entry *ent, local_copy;
2048        struct trap_per_cpu *tb;
2049        unsigned long paddr;
2050        int cpu;
2051
2052        cpu = get_cpu();
2053
2054        tb = &trap_block[cpu];
2055        paddr = tb->nonresum_kernel_buf_pa + offset;
2056        ent = __va(paddr);
2057
2058        memcpy(&local_copy, ent, sizeof(struct sun4v_error_entry));
2059
2060        /* We have a local copy now, so release the entry.  */
2061        ent->err_handle = 0;
2062        wmb();
2063
2064        put_cpu();
2065
2066#ifdef CONFIG_PCI
2067        /* Check for the special PCI poke sequence. */
2068        if (pci_poke_in_progress && pci_poke_cpu == cpu) {
2069                pci_poke_faulted = 1;
2070                regs->tpc += 4;
2071                regs->tnpc = regs->tpc + 4;
2072                return;
2073        }
2074#endif
2075
2076        sun4v_log_error(regs, &local_copy, cpu,
2077                        KERN_EMERG "NON-RESUMABLE ERROR",
2078                        &sun4v_nonresum_oflow_cnt);
2079
2080        panic("Non-resumable error.");
2081}
2082
2083/* If we try to printk() we'll probably make matters worse, by trying
2084 * to retake locks this cpu already holds or causing more errors. So
2085 * just bump a counter, and we'll report these counter bumps above.
2086 */
2087void sun4v_nonresum_overflow(struct pt_regs *regs)
2088{
2089        /* XXX Actually even this can make not that much sense.  Perhaps
2090         * XXX we should just pull the plug and panic directly from here?
2091         */
2092        atomic_inc(&sun4v_nonresum_oflow_cnt);
2093}
2094
2095unsigned long sun4v_err_itlb_vaddr;
2096unsigned long sun4v_err_itlb_ctx;
2097unsigned long sun4v_err_itlb_pte;
2098unsigned long sun4v_err_itlb_error;
2099
2100void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
2101{
2102        if (tl > 1)
2103                dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2104
2105        printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
2106               regs->tpc, tl);
2107        printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
2108        printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
2109        printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
2110               (void *) regs->u_regs[UREG_I7]);
2111        printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
2112               "pte[%lx] error[%lx]\n",
2113               sun4v_err_itlb_vaddr, sun4v_err_itlb_ctx,
2114               sun4v_err_itlb_pte, sun4v_err_itlb_error);
2115
2116        prom_halt();
2117}
2118
2119unsigned long sun4v_err_dtlb_vaddr;
2120unsigned long sun4v_err_dtlb_ctx;
2121unsigned long sun4v_err_dtlb_pte;
2122unsigned long sun4v_err_dtlb_error;
2123
2124void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
2125{
2126        if (tl > 1)
2127                dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2128
2129        printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
2130               regs->tpc, tl);
2131        printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
2132        printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
2133        printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
2134               (void *) regs->u_regs[UREG_I7]);
2135        printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
2136               "pte[%lx] error[%lx]\n",
2137               sun4v_err_dtlb_vaddr, sun4v_err_dtlb_ctx,
2138               sun4v_err_dtlb_pte, sun4v_err_dtlb_error);
2139
2140        prom_halt();
2141}
2142
2143void hypervisor_tlbop_error(unsigned long err, unsigned long op)
2144{
2145        printk(KERN_CRIT "SUN4V: TLB hv call error %lu for op %lu\n",
2146               err, op);
2147}
2148
2149void hypervisor_tlbop_error_xcall(unsigned long err, unsigned long op)
2150{
2151        printk(KERN_CRIT "SUN4V: XCALL TLB hv call error %lu for op %lu\n",
2152               err, op);
2153}
2154
2155void do_fpe_common(struct pt_regs *regs)
2156{
2157        if (regs->tstate & TSTATE_PRIV) {
2158                regs->tpc = regs->tnpc;
2159                regs->tnpc += 4;
2160        } else {
2161                unsigned long fsr = current_thread_info()->xfsr[0];
2162                siginfo_t info;
2163
2164                if (test_thread_flag(TIF_32BIT)) {
2165                        regs->tpc &= 0xffffffff;
2166                        regs->tnpc &= 0xffffffff;
2167                }
2168                info.si_signo = SIGFPE;
2169                info.si_errno = 0;
2170                info.si_addr = (void __user *)regs->tpc;
2171                info.si_trapno = 0;
2172                info.si_code = __SI_FAULT;
2173                if ((fsr & 0x1c000) == (1 << 14)) {
2174                        if (fsr & 0x10)
2175                                info.si_code = FPE_FLTINV;
2176                        else if (fsr & 0x08)
2177                                info.si_code = FPE_FLTOVF;
2178                        else if (fsr & 0x04)
2179                                info.si_code = FPE_FLTUND;
2180                        else if (fsr & 0x02)
2181                                info.si_code = FPE_FLTDIV;
2182                        else if (fsr & 0x01)
2183                                info.si_code = FPE_FLTRES;
2184                }
2185                force_sig_info(SIGFPE, &info, current);
2186        }
2187}
2188
2189void do_fpieee(struct pt_regs *regs)
2190{
2191        if (notify_die(DIE_TRAP, "fpu exception ieee", regs,
2192                       0, 0x24, SIGFPE) == NOTIFY_STOP)
2193                return;
2194
2195        do_fpe_common(regs);
2196}
2197
2198extern int do_mathemu(struct pt_regs *, struct fpustate *, bool);
2199
2200void do_fpother(struct pt_regs *regs)
2201{
2202        struct fpustate *f = FPUSTATE;
2203        int ret = 0;
2204
2205        if (notify_die(DIE_TRAP, "fpu exception other", regs,
2206                       0, 0x25, SIGFPE) == NOTIFY_STOP)
2207                return;
2208
2209        switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
2210        case (2 << 14): /* unfinished_FPop */
2211        case (3 << 14): /* unimplemented_FPop */
2212                ret = do_mathemu(regs, f, false);
2213                break;
2214        }
2215        if (ret)
2216                return;
2217        do_fpe_common(regs);
2218}
2219
2220void do_tof(struct pt_regs *regs)
2221{
2222        siginfo_t info;
2223
2224        if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs,
2225                       0, 0x26, SIGEMT) == NOTIFY_STOP)
2226                return;
2227
2228        if (regs->tstate & TSTATE_PRIV)
2229                die_if_kernel("Penguin overflow trap from kernel mode", regs);
2230        if (test_thread_flag(TIF_32BIT)) {
2231                regs->tpc &= 0xffffffff;
2232                regs->tnpc &= 0xffffffff;
2233        }
2234        info.si_signo = SIGEMT;
2235        info.si_errno = 0;
2236        info.si_code = EMT_TAGOVF;
2237        info.si_addr = (void __user *)regs->tpc;
2238        info.si_trapno = 0;
2239        force_sig_info(SIGEMT, &info, current);
2240}
2241
2242void do_div0(struct pt_regs *regs)
2243{
2244        siginfo_t info;
2245
2246        if (notify_die(DIE_TRAP, "integer division by zero", regs,
2247                       0, 0x28, SIGFPE) == NOTIFY_STOP)
2248                return;
2249
2250        if (regs->tstate & TSTATE_PRIV)
2251                die_if_kernel("TL0: Kernel divide by zero.", regs);
2252        if (test_thread_flag(TIF_32BIT)) {
2253                regs->tpc &= 0xffffffff;
2254                regs->tnpc &= 0xffffffff;
2255        }
2256        info.si_signo = SIGFPE;
2257        info.si_errno = 0;
2258        info.si_code = FPE_INTDIV;
2259        info.si_addr = (void __user *)regs->tpc;
2260        info.si_trapno = 0;
2261        force_sig_info(SIGFPE, &info, current);
2262}
2263
2264static void instruction_dump(unsigned int *pc)
2265{
2266        int i;
2267
2268        if ((((unsigned long) pc) & 3))
2269                return;
2270
2271        printk("Instruction DUMP:");
2272        for (i = -3; i < 6; i++)
2273                printk("%c%08x%c",i?' ':'<',pc[i],i?' ':'>');
2274        printk("\n");
2275}
2276
2277static void user_instruction_dump(unsigned int __user *pc)
2278{
2279        int i;
2280        unsigned int buf[9];
2281        
2282        if ((((unsigned long) pc) & 3))
2283                return;
2284                
2285        if (copy_from_user(buf, pc - 3, sizeof(buf)))
2286                return;
2287
2288        printk("Instruction DUMP:");
2289        for (i = 0; i < 9; i++)
2290                printk("%c%08x%c",i==3?' ':'<',buf[i],i==3?' ':'>');
2291        printk("\n");
2292}
2293
2294void show_stack(struct task_struct *tsk, unsigned long *_ksp)
2295{
2296        unsigned long fp, ksp;
2297        struct thread_info *tp;
2298        int count = 0;
2299#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2300        int graph = 0;
2301#endif
2302
2303        ksp = (unsigned long) _ksp;
2304        if (!tsk)
2305                tsk = current;
2306        tp = task_thread_info(tsk);
2307        if (ksp == 0UL) {
2308                if (tsk == current)
2309                        asm("mov %%fp, %0" : "=r" (ksp));
2310                else
2311                        ksp = tp->ksp;
2312        }
2313        if (tp == current_thread_info())
2314                flushw_all();
2315
2316        fp = ksp + STACK_BIAS;
2317
2318        printk("Call Trace:\n");
2319        do {
2320                struct sparc_stackf *sf;
2321                struct pt_regs *regs;
2322                unsigned long pc;
2323
2324                if (!kstack_valid(tp, fp))
2325                        break;
2326                sf = (struct sparc_stackf *) fp;
2327                regs = (struct pt_regs *) (sf + 1);
2328
2329                if (kstack_is_trap_frame(tp, regs)) {
2330                        if (!(regs->tstate & TSTATE_PRIV))
2331                                break;
2332                        pc = regs->tpc;
2333                        fp = regs->u_regs[UREG_I6] + STACK_BIAS;
2334                } else {
2335                        pc = sf->callers_pc;
2336                        fp = (unsigned long)sf->fp + STACK_BIAS;
2337                }
2338
2339                printk(" [%016lx] %pS\n", pc, (void *) pc);
2340#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2341                if ((pc + 8UL) == (unsigned long) &return_to_handler) {
2342                        int index = tsk->curr_ret_stack;
2343                        if (tsk->ret_stack && index >= graph) {
2344                                pc = tsk->ret_stack[index - graph].ret;
2345                                printk(" [%016lx] %pS\n", pc, (void *) pc);
2346                                graph++;
2347                        }
2348                }
2349#endif
2350        } while (++count < 16);
2351}
2352
2353static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
2354{
2355        unsigned long fp = rw->ins[6];
2356
2357        if (!fp)
2358                return NULL;
2359
2360        return (struct reg_window *) (fp + STACK_BIAS);
2361}
2362
2363void die_if_kernel(char *str, struct pt_regs *regs)
2364{
2365        static int die_counter;
2366        int count = 0;
2367        
2368        /* Amuse the user. */
2369        printk(
2370"              \\|/ ____ \\|/\n"
2371"              \"@'/ .. \\`@\"\n"
2372"              /_| \\__/ |_\\\n"
2373"                 \\__U_/\n");
2374
2375        printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter);
2376        notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
2377        __asm__ __volatile__("flushw");
2378        show_regs(regs);
2379        add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
2380        if (regs->tstate & TSTATE_PRIV) {
2381                struct thread_info *tp = current_thread_info();
2382                struct reg_window *rw = (struct reg_window *)
2383                        (regs->u_regs[UREG_FP] + STACK_BIAS);
2384
2385                /* Stop the back trace when we hit userland or we
2386                 * find some badly aligned kernel stack.
2387                 */
2388                while (rw &&
2389                       count++ < 30 &&
2390                       kstack_valid(tp, (unsigned long) rw)) {
2391                        printk("Caller[%016lx]: %pS\n", rw->ins[7],
2392                               (void *) rw->ins[7]);
2393
2394                        rw = kernel_stack_up(rw);
2395                }
2396                instruction_dump ((unsigned int *) regs->tpc);
2397        } else {
2398                if (test_thread_flag(TIF_32BIT)) {
2399                        regs->tpc &= 0xffffffff;
2400                        regs->tnpc &= 0xffffffff;
2401                }
2402                user_instruction_dump ((unsigned int __user *) regs->tpc);
2403        }
2404        if (regs->tstate & TSTATE_PRIV)
2405                do_exit(SIGKILL);
2406        do_exit(SIGSEGV);
2407}
2408EXPORT_SYMBOL(die_if_kernel);
2409
2410#define VIS_OPCODE_MASK ((0x3 << 30) | (0x3f << 19))
2411#define VIS_OPCODE_VAL  ((0x2 << 30) | (0x36 << 19))
2412
2413extern int handle_popc(u32 insn, struct pt_regs *regs);
2414extern int handle_ldf_stq(u32 insn, struct pt_regs *regs);
2415
2416void do_illegal_instruction(struct pt_regs *regs)
2417{
2418        unsigned long pc = regs->tpc;
2419        unsigned long tstate = regs->tstate;
2420        u32 insn;
2421        siginfo_t info;
2422
2423        if (notify_die(DIE_TRAP, "illegal instruction", regs,
2424                       0, 0x10, SIGILL) == NOTIFY_STOP)
2425                return;
2426
2427        if (tstate & TSTATE_PRIV)
2428                die_if_kernel("Kernel illegal instruction", regs);
2429        if (test_thread_flag(TIF_32BIT))
2430                pc = (u32)pc;
2431        if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
2432                if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ {
2433                        if (handle_popc(insn, regs))
2434                                return;
2435                } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ {
2436                        if (handle_ldf_stq(insn, regs))
2437                                return;
2438                } else if (tlb_type == hypervisor) {
2439                        if ((insn & VIS_OPCODE_MASK) == VIS_OPCODE_VAL) {
2440                                if (!vis_emul(regs, insn))
2441                                        return;
2442                        } else {
2443                                struct fpustate *f = FPUSTATE;
2444
2445                                /* On UltraSPARC T2 and later, FPU insns which
2446                                 * are not implemented in HW signal an illegal
2447                                 * instruction trap and do not set the FP Trap
2448                                 * Trap in the %fsr to unimplemented_FPop.
2449                                 */
2450                                if (do_mathemu(regs, f, true))
2451                                        return;
2452                        }
2453                }
2454        }
2455        info.si_signo = SIGILL;
2456        info.si_errno = 0;
2457        info.si_code = ILL_ILLOPC;
2458        info.si_addr = (void __user *)pc;
2459        info.si_trapno = 0;
2460        force_sig_info(SIGILL, &info, current);
2461}
2462
2463extern void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn);
2464
2465void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
2466{
2467        siginfo_t info;
2468
2469        if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2470                       0, 0x34, SIGSEGV) == NOTIFY_STOP)
2471                return;
2472
2473        if (regs->tstate & TSTATE_PRIV) {
2474                kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2475                return;
2476        }
2477        info.si_signo = SIGBUS;
2478        info.si_errno = 0;
2479        info.si_code = BUS_ADRALN;
2480        info.si_addr = (void __user *)sfar;
2481        info.si_trapno = 0;
2482        force_sig_info(SIGBUS, &info, current);
2483}
2484
2485void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx)
2486{
2487        siginfo_t info;
2488
2489        if (notify_die(DIE_TRAP, "memory address unaligned", regs,
2490                       0, 0x34, SIGSEGV) == NOTIFY_STOP)
2491                return;
2492
2493        if (regs->tstate & TSTATE_PRIV) {
2494                kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc));
2495                return;
2496        }
2497        info.si_signo = SIGBUS;
2498        info.si_errno = 0;
2499        info.si_code = BUS_ADRALN;
2500        info.si_addr = (void __user *) addr;
2501        info.si_trapno = 0;
2502        force_sig_info(SIGBUS, &info, current);
2503}
2504
2505void do_privop(struct pt_regs *regs)
2506{
2507        siginfo_t info;
2508
2509        if (notify_die(DIE_TRAP, "privileged operation", regs,
2510                       0, 0x11, SIGILL) == NOTIFY_STOP)
2511                return;
2512
2513        if (test_thread_flag(TIF_32BIT)) {
2514                regs->tpc &= 0xffffffff;
2515                regs->tnpc &= 0xffffffff;
2516        }
2517        info.si_signo = SIGILL;
2518        info.si_errno = 0;
2519        info.si_code = ILL_PRVOPC;
2520        info.si_addr = (void __user *)regs->tpc;
2521        info.si_trapno = 0;
2522        force_sig_info(SIGILL, &info, current);
2523}
2524
2525void do_privact(struct pt_regs *regs)
2526{
2527        do_privop(regs);
2528}
2529
2530/* Trap level 1 stuff or other traps we should never see... */
2531void do_cee(struct pt_regs *regs)
2532{
2533        die_if_kernel("TL0: Cache Error Exception", regs);
2534}
2535
2536void do_cee_tl1(struct pt_regs *regs)
2537{
2538        dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2539        die_if_kernel("TL1: Cache Error Exception", regs);
2540}
2541
2542void do_dae_tl1(struct pt_regs *regs)
2543{
2544        dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2545        die_if_kernel("TL1: Data Access Exception", regs);
2546}
2547
2548void do_iae_tl1(struct pt_regs *regs)
2549{
2550        dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2551        die_if_kernel("TL1: Instruction Access Exception", regs);
2552}
2553
2554void do_div0_tl1(struct pt_regs *regs)
2555{
2556        dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2557        die_if_kernel("TL1: DIV0 Exception", regs);
2558}
2559
2560void do_fpdis_tl1(struct pt_regs *regs)
2561{
2562        dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2563        die_if_kernel("TL1: FPU Disabled", regs);
2564}
2565
2566void do_fpieee_tl1(struct pt_regs *regs)
2567{
2568        dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2569        die_if_kernel("TL1: FPU IEEE Exception", regs);
2570}
2571
2572void do_fpother_tl1(struct pt_regs *regs)
2573{
2574        dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2575        die_if_kernel("TL1: FPU Other Exception", regs);
2576}
2577
2578void do_ill_tl1(struct pt_regs *regs)
2579{
2580        dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2581        die_if_kernel("TL1: Illegal Instruction Exception", regs);
2582}
2583
2584void do_irq_tl1(struct pt_regs *regs)
2585{
2586        dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2587        die_if_kernel("TL1: IRQ Exception", regs);
2588}
2589
2590void do_lddfmna_tl1(struct pt_regs *regs)
2591{
2592        dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2593        die_if_kernel("TL1: LDDF Exception", regs);
2594}
2595
2596void do_stdfmna_tl1(struct pt_regs *regs)
2597{
2598        dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2599        die_if_kernel("TL1: STDF Exception", regs);
2600}
2601
2602void do_paw(struct pt_regs *regs)
2603{
2604        die_if_kernel("TL0: Phys Watchpoint Exception", regs);
2605}
2606
2607void do_paw_tl1(struct pt_regs *regs)
2608{
2609        dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2610        die_if_kernel("TL1: Phys Watchpoint Exception", regs);
2611}
2612
2613void do_vaw(struct pt_regs *regs)
2614{
2615        die_if_kernel("TL0: Virt Watchpoint Exception", regs);
2616}
2617
2618void do_vaw_tl1(struct pt_regs *regs)
2619{
2620        dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2621        die_if_kernel("TL1: Virt Watchpoint Exception", regs);
2622}
2623
2624void do_tof_tl1(struct pt_regs *regs)
2625{
2626        dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
2627        die_if_kernel("TL1: Tag Overflow Exception", regs);
2628}
2629
2630void do_getpsr(struct pt_regs *regs)
2631{
2632        regs->u_regs[UREG_I0] = tstate_to_psr(regs->tstate);
2633        regs->tpc   = regs->tnpc;
2634        regs->tnpc += 4;
2635        if (test_thread_flag(TIF_32BIT)) {
2636                regs->tpc &= 0xffffffff;
2637                regs->tnpc &= 0xffffffff;
2638        }
2639}
2640
2641struct trap_per_cpu trap_block[NR_CPUS];
2642EXPORT_SYMBOL(trap_block);
2643
2644/* This can get invoked before sched_init() so play it super safe
2645 * and use hard_smp_processor_id().
2646 */
2647void notrace init_cur_cpu_trap(struct thread_info *t)
2648{
2649        int cpu = hard_smp_processor_id();
2650        struct trap_per_cpu *p = &trap_block[cpu];
2651
2652        p->thread = t;
2653        p->pgd_paddr = 0;
2654}
2655
2656extern void thread_info_offsets_are_bolixed_dave(void);
2657extern void trap_per_cpu_offsets_are_bolixed_dave(void);
2658extern void tsb_config_offsets_are_bolixed_dave(void);
2659
2660/* Only invoked on boot processor. */
2661void __init trap_init(void)
2662{
2663        /* Compile time sanity check. */
2664        BUILD_BUG_ON(TI_TASK != offsetof(struct thread_info, task) ||
2665                     TI_FLAGS != offsetof(struct thread_info, flags) ||
2666                     TI_CPU != offsetof(struct thread_info, cpu) ||
2667                     TI_FPSAVED != offsetof(struct thread_info, fpsaved) ||
2668                     TI_KSP != offsetof(struct thread_info, ksp) ||
2669                     TI_FAULT_ADDR != offsetof(struct thread_info,
2670                                               fault_address) ||
2671                     TI_KREGS != offsetof(struct thread_info, kregs) ||
2672                     TI_UTRAPS != offsetof(struct thread_info, utraps) ||
2673                     TI_EXEC_DOMAIN != offsetof(struct thread_info,
2674                                                exec_domain) ||
2675                     TI_REG_WINDOW != offsetof(struct thread_info,
2676                                               reg_window) ||
2677                     TI_RWIN_SPTRS != offsetof(struct thread_info,
2678                                               rwbuf_stkptrs) ||
2679                     TI_GSR != offsetof(struct thread_info, gsr) ||
2680                     TI_XFSR != offsetof(struct thread_info, xfsr) ||
2681                     TI_PRE_COUNT != offsetof(struct thread_info,
2682                                              preempt_count) ||
2683                     TI_NEW_CHILD != offsetof(struct thread_info, new_child) ||
2684                     TI_CURRENT_DS != offsetof(struct thread_info,
2685                                                current_ds) ||
2686                     TI_RESTART_BLOCK != offsetof(struct thread_info,
2687                                                  restart_block) ||
2688                     TI_KUNA_REGS != offsetof(struct thread_info,
2689                                              kern_una_regs) ||
2690                     TI_KUNA_INSN != offsetof(struct thread_info,
2691                                              kern_una_insn) ||
2692                     TI_FPREGS != offsetof(struct thread_info, fpregs) ||
2693                     (TI_FPREGS & (64 - 1)));
2694
2695        BUILD_BUG_ON(TRAP_PER_CPU_THREAD != offsetof(struct trap_per_cpu,
2696                                                     thread) ||
2697                     (TRAP_PER_CPU_PGD_PADDR !=
2698                      offsetof(struct trap_per_cpu, pgd_paddr)) ||
2699                     (TRAP_PER_CPU_CPU_MONDO_PA !=
2700                      offsetof(struct trap_per_cpu, cpu_mondo_pa)) ||
2701                     (TRAP_PER_CPU_DEV_MONDO_PA !=
2702                      offsetof(struct trap_per_cpu, dev_mondo_pa)) ||
2703                     (TRAP_PER_CPU_RESUM_MONDO_PA !=
2704                      offsetof(struct trap_per_cpu, resum_mondo_pa)) ||
2705                     (TRAP_PER_CPU_RESUM_KBUF_PA !=
2706                      offsetof(struct trap_per_cpu, resum_kernel_buf_pa)) ||
2707                     (TRAP_PER_CPU_NONRESUM_MONDO_PA !=
2708                      offsetof(struct trap_per_cpu, nonresum_mondo_pa)) ||
2709                     (TRAP_PER_CPU_NONRESUM_KBUF_PA !=
2710                      offsetof(struct trap_per_cpu, nonresum_kernel_buf_pa)) ||
2711                     (TRAP_PER_CPU_FAULT_INFO !=
2712                      offsetof(struct trap_per_cpu, fault_info)) ||
2713                     (TRAP_PER_CPU_CPU_MONDO_BLOCK_PA !=
2714                      offsetof(struct trap_per_cpu, cpu_mondo_block_pa)) ||
2715                     (TRAP_PER_CPU_CPU_LIST_PA !=
2716                      offsetof(struct trap_per_cpu, cpu_list_pa)) ||
2717                     (TRAP_PER_CPU_TSB_HUGE !=
2718                      offsetof(struct trap_per_cpu, tsb_huge)) ||
2719                     (TRAP_PER_CPU_TSB_HUGE_TEMP !=
2720                      offsetof(struct trap_per_cpu, tsb_huge_temp)) ||
2721                     (TRAP_PER_CPU_IRQ_WORKLIST_PA !=
2722                      offsetof(struct trap_per_cpu, irq_worklist_pa)) ||
2723                     (TRAP_PER_CPU_CPU_MONDO_QMASK !=
2724                      offsetof(struct trap_per_cpu, cpu_mondo_qmask)) ||
2725                     (TRAP_PER_CPU_DEV_MONDO_QMASK !=
2726                      offsetof(struct trap_per_cpu, dev_mondo_qmask)) ||
2727                     (TRAP_PER_CPU_RESUM_QMASK !=
2728                      offsetof(struct trap_per_cpu, resum_qmask)) ||
2729                     (TRAP_PER_CPU_NONRESUM_QMASK !=
2730                      offsetof(struct trap_per_cpu, nonresum_qmask)) ||
2731                     (TRAP_PER_CPU_PER_CPU_BASE !=
2732                      offsetof(struct trap_per_cpu, __per_cpu_base)));
2733
2734        BUILD_BUG_ON((TSB_CONFIG_TSB !=
2735                      offsetof(struct tsb_config, tsb)) ||
2736                     (TSB_CONFIG_RSS_LIMIT !=
2737                      offsetof(struct tsb_config, tsb_rss_limit)) ||
2738                     (TSB_CONFIG_NENTRIES !=
2739                      offsetof(struct tsb_config, tsb_nentries)) ||
2740                     (TSB_CONFIG_REG_VAL !=
2741                      offsetof(struct tsb_config, tsb_reg_val)) ||
2742                     (TSB_CONFIG_MAP_VADDR !=
2743                      offsetof(struct tsb_config, tsb_map_vaddr)) ||
2744                     (TSB_CONFIG_MAP_PTE !=
2745                      offsetof(struct tsb_config, tsb_map_pte)));
2746
2747        /* Attach to the address space of init_task.  On SMP we
2748         * do this in smp.c:smp_callin for other cpus.
2749         */
2750        atomic_inc(&init_mm.mm_count);
2751        current->active_mm = &init_mm;
2752}
2753