linux/arch/xtensa/kernel/traps.c
<<
>>
Prefs
   1/*
   2 * arch/xtensa/kernel/traps.c
   3 *
   4 * Exception handling.
   5 *
   6 * Derived from code with the following copyrights:
   7 * Copyright (C) 1994 - 1999 by Ralf Baechle
   8 * Modified for R3000 by Paul M. Antoine, 1995, 1996
   9 * Complete output from die() by Ulf Carlsson, 1998
  10 * Copyright (C) 1999 Silicon Graphics, Inc.
  11 *
  12 * Essentially rewritten for the Xtensa architecture port.
  13 *
  14 * Copyright (C) 2001 - 2005 Tensilica Inc.
  15 *
  16 * Joe Taylor   <joe@tensilica.com, joetylr@yahoo.com>
  17 * Chris Zankel <chris@zankel.net>
  18 * Marc Gauthier<marc@tensilica.com, marc@alumni.uwaterloo.ca>
  19 * Kevin Chea
  20 *
  21 * This file is subject to the terms and conditions of the GNU General Public
  22 * License.  See the file "COPYING" in the main directory of this archive
  23 * for more details.
  24 */
  25
  26#include <linux/kernel.h>
  27#include <linux/sched.h>
  28#include <linux/init.h>
  29#include <linux/module.h>
  30#include <linux/stringify.h>
  31#include <linux/kallsyms.h>
  32#include <linux/delay.h>
  33
  34#include <asm/ptrace.h>
  35#include <asm/timex.h>
  36#include <asm/uaccess.h>
  37#include <asm/pgtable.h>
  38#include <asm/processor.h>
  39
  40#ifdef CONFIG_KGDB
  41extern int gdb_enter;
  42extern int return_from_debug_flag;
  43#endif
  44
  45/*
  46 * Machine specific interrupt handlers
  47 */
  48
  49extern void kernel_exception(void);
  50extern void user_exception(void);
  51
  52extern void fast_syscall_kernel(void);
  53extern void fast_syscall_user(void);
  54extern void fast_alloca(void);
  55extern void fast_unaligned(void);
  56extern void fast_second_level_miss(void);
  57extern void fast_store_prohibited(void);
  58extern void fast_coprocessor(void);
  59
  60extern void do_illegal_instruction (struct pt_regs*);
  61extern void do_interrupt (struct pt_regs*);
  62extern void do_unaligned_user (struct pt_regs*);
  63extern void do_multihit (struct pt_regs*, unsigned long);
  64extern void do_page_fault (struct pt_regs*, unsigned long);
  65extern void do_debug (struct pt_regs*);
  66extern void system_call (struct pt_regs*);
  67
  68/*
  69 * The vector table must be preceded by a save area (which
  70 * implies it must be in RAM, unless one places RAM immediately
  71 * before a ROM and puts the vector at the start of the ROM (!))
  72 */
  73
  74#define KRNL            0x01
  75#define USER            0x02
  76
  77#define COPROCESSOR(x)                                                  \
  78{ EXCCAUSE_COPROCESSOR ## x ## _DISABLED, USER, fast_coprocessor }
  79
  80typedef struct {
  81        int cause;
  82        int fast;
  83        void* handler;
  84} dispatch_init_table_t;
  85
  86static dispatch_init_table_t __initdata dispatch_init_table[] = {
  87
  88{ EXCCAUSE_ILLEGAL_INSTRUCTION, 0,         do_illegal_instruction},
  89{ EXCCAUSE_SYSTEM_CALL,         KRNL,      fast_syscall_kernel },
  90{ EXCCAUSE_SYSTEM_CALL,         USER,      fast_syscall_user },
  91{ EXCCAUSE_SYSTEM_CALL,         0,         system_call },
  92/* EXCCAUSE_INSTRUCTION_FETCH unhandled */
  93/* EXCCAUSE_LOAD_STORE_ERROR unhandled*/
  94{ EXCCAUSE_LEVEL1_INTERRUPT,    0,         do_interrupt },
  95{ EXCCAUSE_ALLOCA,              USER|KRNL, fast_alloca },
  96/* EXCCAUSE_INTEGER_DIVIDE_BY_ZERO unhandled */
  97/* EXCCAUSE_PRIVILEGED unhandled */
  98#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
  99#ifdef CONFIG_UNALIGNED_USER
 100{ EXCCAUSE_UNALIGNED,           USER,      fast_unaligned },
 101#else
 102{ EXCCAUSE_UNALIGNED,           0,         do_unaligned_user },
 103#endif
 104{ EXCCAUSE_UNALIGNED,           KRNL,      fast_unaligned },
 105#endif
 106{ EXCCAUSE_ITLB_MISS,           0,         do_page_fault },
 107{ EXCCAUSE_ITLB_MISS,           USER|KRNL, fast_second_level_miss},
 108{ EXCCAUSE_ITLB_MULTIHIT,               0,         do_multihit },
 109{ EXCCAUSE_ITLB_PRIVILEGE,      0,         do_page_fault },
 110/* EXCCAUSE_SIZE_RESTRICTION unhandled */
 111{ EXCCAUSE_FETCH_CACHE_ATTRIBUTE,       0,         do_page_fault },
 112{ EXCCAUSE_DTLB_MISS,           USER|KRNL, fast_second_level_miss},
 113{ EXCCAUSE_DTLB_MISS,           0,         do_page_fault },
 114{ EXCCAUSE_DTLB_MULTIHIT,               0,         do_multihit },
 115{ EXCCAUSE_DTLB_PRIVILEGE,      0,         do_page_fault },
 116/* EXCCAUSE_DTLB_SIZE_RESTRICTION unhandled */
 117{ EXCCAUSE_STORE_CACHE_ATTRIBUTE,       USER|KRNL, fast_store_prohibited },
 118{ EXCCAUSE_STORE_CACHE_ATTRIBUTE,       0,         do_page_fault },
 119{ EXCCAUSE_LOAD_CACHE_ATTRIBUTE,        0,         do_page_fault },
 120/* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */
 121#if (XCHAL_CP_MASK & 1)
 122COPROCESSOR(0),
 123#endif
 124#if (XCHAL_CP_MASK & 2)
 125COPROCESSOR(1),
 126#endif
 127#if (XCHAL_CP_MASK & 4)
 128COPROCESSOR(2),
 129#endif
 130#if (XCHAL_CP_MASK & 8)
 131COPROCESSOR(3),
 132#endif
 133#if (XCHAL_CP_MASK & 16)
 134COPROCESSOR(4),
 135#endif
 136#if (XCHAL_CP_MASK & 32)
 137COPROCESSOR(5),
 138#endif
 139#if (XCHAL_CP_MASK & 64)
 140COPROCESSOR(6),
 141#endif
 142#if (XCHAL_CP_MASK & 128)
 143COPROCESSOR(7),
 144#endif
 145{ EXCCAUSE_MAPPED_DEBUG,                0,              do_debug },
 146{ -1, -1, 0 }
 147
 148};
 149
 150/* The exception table <exc_table> serves two functions:
 151 * 1. it contains three dispatch tables (fast_user, fast_kernel, default-c)
 152 * 2. it is a temporary memory buffer for the exception handlers.
 153 */
 154
 155unsigned long exc_table[EXC_TABLE_SIZE/4];
 156
 157void die(const char*, struct pt_regs*, long);
 158
 159static inline void
 160__die_if_kernel(const char *str, struct pt_regs *regs, long err)
 161{
 162        if (!user_mode(regs))
 163                die(str, regs, err);
 164}
 165
 166/*
 167 * Unhandled Exceptions. Kill user task or panic if in kernel space.
 168 */
 169
 170void do_unhandled(struct pt_regs *regs, unsigned long exccause)
 171{
 172        __die_if_kernel("Caught unhandled exception - should not happen",
 173                        regs, SIGKILL);
 174
 175        /* If in user mode, send SIGILL signal to current process */
 176        printk("Caught unhandled exception in '%s' "
 177               "(pid = %d, pc = %#010lx) - should not happen\n"
 178               "\tEXCCAUSE is %ld\n",
 179               current->comm, task_pid_nr(current), regs->pc, exccause);
 180        force_sig(SIGILL, current);
 181}
 182
 183/*
 184 * Multi-hit exception. This if fatal!
 185 */
 186
 187void do_multihit(struct pt_regs *regs, unsigned long exccause)
 188{
 189        die("Caught multihit exception", regs, SIGKILL);
 190}
 191
 192/*
 193 * Level-1 interrupt.
 194 * We currently have no priority encoding.
 195 */
 196
 197unsigned long ignored_level1_interrupts;
 198extern void do_IRQ(int, struct pt_regs *);
 199
 200void do_interrupt (struct pt_regs *regs)
 201{
 202        unsigned long intread = get_sr (INTREAD);
 203        unsigned long intenable = get_sr (INTENABLE);
 204        int i, mask;
 205
 206        /* Handle all interrupts (no priorities).
 207         * (Clear the interrupt before processing, in case it's
 208         *  edge-triggered or software-generated)
 209         */
 210
 211        for (i=0, mask = 1; i < XCHAL_NUM_INTERRUPTS; i++, mask <<= 1) {
 212                if (mask & (intread & intenable)) {
 213                        set_sr (mask, INTCLEAR);
 214                        do_IRQ (i,regs);
 215                }
 216        }
 217}
 218
 219/*
 220 * Illegal instruction. Fatal if in kernel space.
 221 */
 222
 223void
 224do_illegal_instruction(struct pt_regs *regs)
 225{
 226        __die_if_kernel("Illegal instruction in kernel", regs, SIGKILL);
 227
 228        /* If in user mode, send SIGILL signal to current process. */
 229
 230        printk("Illegal Instruction in '%s' (pid = %d, pc = %#010lx)\n",
 231            current->comm, task_pid_nr(current), regs->pc);
 232        force_sig(SIGILL, current);
 233}
 234
 235
 236/*
 237 * Handle unaligned memory accesses from user space. Kill task.
 238 *
 239 * If CONFIG_UNALIGNED_USER is not set, we don't allow unaligned memory
 240 * accesses causes from user space.
 241 */
 242
 243#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION
 244#ifndef CONFIG_UNALIGNED_USER
 245void
 246do_unaligned_user (struct pt_regs *regs)
 247{
 248        siginfo_t info;
 249
 250        __die_if_kernel("Unhandled unaligned exception in kernel",
 251                        regs, SIGKILL);
 252
 253        current->thread.bad_vaddr = regs->excvaddr;
 254        current->thread.error_code = -3;
 255        printk("Unaligned memory access to %08lx in '%s' "
 256               "(pid = %d, pc = %#010lx)\n",
 257               regs->excvaddr, current->comm, task_pid_nr(current), regs->pc);
 258        info.si_signo = SIGBUS;
 259        info.si_errno = 0;
 260        info.si_code = BUS_ADRALN;
 261        info.si_addr = (void *) regs->excvaddr;
 262        force_sig_info(SIGSEGV, &info, current);
 263
 264}
 265#endif
 266#endif
 267
 268void
 269do_debug(struct pt_regs *regs)
 270{
 271#ifdef CONFIG_KGDB
 272        /* If remote debugging is configured AND enabled, we give control to
 273         * kgdb.  Otherwise, we fall through, perhaps giving control to the
 274         * native debugger.
 275         */
 276
 277        if (gdb_enter) {
 278                extern void gdb_handle_exception(struct pt_regs *);
 279                gdb_handle_exception(regs);
 280                return_from_debug_flag = 1;
 281                return;
 282        }
 283#endif
 284
 285        __die_if_kernel("Breakpoint in kernel", regs, SIGKILL);
 286
 287        /* If in user mode, send SIGTRAP signal to current process */
 288
 289        force_sig(SIGTRAP, current);
 290}
 291
 292
 293/*
 294 * Initialize dispatch tables.
 295 *
 296 * The exception vectors are stored compressed the __init section in the
 297 * dispatch_init_table. This function initializes the following three tables
 298 * from that compressed table:
 299 * - fast user          first dispatch table for user exceptions
 300 * - fast kernel        first dispatch table for kernel exceptions
 301 * - default C-handler  C-handler called by the default fast handler.
 302 *
 303 * See vectors.S for more details.
 304 */
 305
 306#define set_handler(idx,handler) (exc_table[idx] = (unsigned long) (handler))
 307
 308void __init trap_init(void)
 309{
 310        int i;
 311
 312        /* Setup default vectors. */
 313
 314        for(i = 0; i < 64; i++) {
 315                set_handler(EXC_TABLE_FAST_USER/4   + i, user_exception);
 316                set_handler(EXC_TABLE_FAST_KERNEL/4 + i, kernel_exception);
 317                set_handler(EXC_TABLE_DEFAULT/4 + i, do_unhandled);
 318        }
 319
 320        /* Setup specific handlers. */
 321
 322        for(i = 0; dispatch_init_table[i].cause >= 0; i++) {
 323
 324                int fast = dispatch_init_table[i].fast;
 325                int cause = dispatch_init_table[i].cause;
 326                void *handler = dispatch_init_table[i].handler;
 327
 328                if (fast == 0)
 329                        set_handler (EXC_TABLE_DEFAULT/4 + cause, handler);
 330                if (fast && fast & USER)
 331                        set_handler (EXC_TABLE_FAST_USER/4 + cause, handler);
 332                if (fast && fast & KRNL)
 333                        set_handler (EXC_TABLE_FAST_KERNEL/4 + cause, handler);
 334        }
 335
 336        /* Initialize EXCSAVE_1 to hold the address of the exception table. */
 337
 338        i = (unsigned long)exc_table;
 339        __asm__ __volatile__("wsr  %0, "__stringify(EXCSAVE_1)"\n" : : "a" (i));
 340}
 341
 342/*
 343 * This function dumps the current valid window frame and other base registers.
 344 */
 345
 346void show_regs(struct pt_regs * regs)
 347{
 348        int i, wmask;
 349
 350        wmask = regs->wmask & ~1;
 351
 352        for (i = 0; i < 32; i++) {
 353                if (wmask & (1 << (i / 4)))
 354                        break;
 355                if ((i % 8) == 0)
 356                        printk ("\n" KERN_INFO "a%02d: ", i);
 357                printk("%08lx ", regs->areg[i]);
 358        }
 359        printk("\n");
 360
 361        printk("pc: %08lx, ps: %08lx, depc: %08lx, excvaddr: %08lx\n",
 362               regs->pc, regs->ps, regs->depc, regs->excvaddr);
 363        printk("lbeg: %08lx, lend: %08lx lcount: %08lx, sar: %08lx\n",
 364               regs->lbeg, regs->lend, regs->lcount, regs->sar);
 365        if (user_mode(regs))
 366                printk("wb: %08lx, ws: %08lx, wmask: %08lx, syscall: %ld\n",
 367                       regs->windowbase, regs->windowstart, regs->wmask,
 368                       regs->syscall);
 369}
 370
 371void show_trace(struct task_struct *task, unsigned long *sp)
 372{
 373        unsigned long a0, a1, pc;
 374        unsigned long sp_start, sp_end;
 375
 376        a1 = (unsigned long)sp;
 377
 378        if (a1 == 0)
 379                __asm__ __volatile__ ("mov %0, a1\n" : "=a"(a1));
 380
 381
 382        sp_start = a1 & ~(THREAD_SIZE-1);
 383        sp_end = sp_start + THREAD_SIZE;
 384
 385        printk("Call Trace:");
 386#ifdef CONFIG_KALLSYMS
 387        printk("\n");
 388#endif
 389        spill_registers();
 390
 391        while (a1 > sp_start && a1 < sp_end) {
 392                sp = (unsigned long*)a1;
 393
 394                a0 = *(sp - 4);
 395                a1 = *(sp - 3);
 396
 397                if (a1 <= (unsigned long) sp)
 398                        break;
 399
 400                pc = MAKE_PC_FROM_RA(a0, a1);
 401
 402                if (kernel_text_address(pc)) {
 403                        printk(" [<%08lx>] ", pc);
 404                        print_symbol("%s\n", pc);
 405                }
 406        }
 407        printk("\n");
 408}
 409
 410/*
 411 * This routine abuses get_user()/put_user() to reference pointers
 412 * with at least a bit of error checking ...
 413 */
 414
 415static int kstack_depth_to_print = 24;
 416
 417void show_stack(struct task_struct *task, unsigned long *sp)
 418{
 419        int i = 0;
 420        unsigned long *stack;
 421
 422        if (sp == 0)
 423                __asm__ __volatile__ ("mov %0, a1\n" : "=a"(sp));
 424
 425        stack = sp;
 426
 427        printk("\nStack: ");
 428
 429        for (i = 0; i < kstack_depth_to_print; i++) {
 430                if (kstack_end(sp))
 431                        break;
 432                if (i && ((i % 8) == 0))
 433                        printk("\n       ");
 434                printk("%08lx ", *sp++);
 435        }
 436        printk("\n");
 437        show_trace(task, stack);
 438}
 439
 440void dump_stack(void)
 441{
 442        show_stack(current, NULL);
 443}
 444
 445EXPORT_SYMBOL(dump_stack);
 446
 447
 448void show_code(unsigned int *pc)
 449{
 450        long i;
 451
 452        printk("\nCode:");
 453
 454        for(i = -3 ; i < 6 ; i++) {
 455                unsigned long insn;
 456                if (__get_user(insn, pc + i)) {
 457                        printk(" (Bad address in pc)\n");
 458                        break;
 459                }
 460                printk("%c%08lx%c",(i?' ':'<'),insn,(i?' ':'>'));
 461        }
 462}
 463
 464DEFINE_SPINLOCK(die_lock);
 465
 466void die(const char * str, struct pt_regs * regs, long err)
 467{
 468        static int die_counter;
 469        int nl = 0;
 470
 471        console_verbose();
 472        spin_lock_irq(&die_lock);
 473
 474        printk("%s: sig: %ld [#%d]\n", str, err, ++die_counter);
 475#ifdef CONFIG_PREEMPT
 476        printk("PREEMPT ");
 477        nl = 1;
 478#endif
 479        if (nl)
 480                printk("\n");
 481        show_regs(regs);
 482        if (!user_mode(regs))
 483                show_stack(NULL, (unsigned long*)regs->areg[1]);
 484
 485        add_taint(TAINT_DIE);
 486        spin_unlock_irq(&die_lock);
 487
 488        if (in_interrupt())
 489                panic("Fatal exception in interrupt");
 490
 491        if (panic_on_oops)
 492                panic("Fatal exception");
 493
 494        do_exit(err);
 495}
 496
 497
 498