linux/arch/sh/kernel/traps_32.c
<<
>>
Prefs
   1/*
   2 * 'traps.c' handles hardware traps and faults after we have saved some
   3 * state in 'entry.S'.
   4 *
   5 *  SuperH version: Copyright (C) 1999 Niibe Yutaka
   6 *                  Copyright (C) 2000 Philipp Rumpf
   7 *                  Copyright (C) 2000 David Howells
   8 *                  Copyright (C) 2002 - 2010 Paul Mundt
   9 *
  10 * This file is subject to the terms and conditions of the GNU General Public
  11 * License.  See the file "COPYING" in the main directory of this archive
  12 * for more details.
  13 */
  14#include <linux/kernel.h>
  15#include <linux/ptrace.h>
  16#include <linux/hardirq.h>
  17#include <linux/init.h>
  18#include <linux/spinlock.h>
  19#include <linux/module.h>
  20#include <linux/kallsyms.h>
  21#include <linux/io.h>
  22#include <linux/bug.h>
  23#include <linux/debug_locks.h>
  24#include <linux/kdebug.h>
  25#include <linux/kexec.h>
  26#include <linux/limits.h>
  27#include <linux/sysfs.h>
  28#include <linux/uaccess.h>
  29#include <linux/perf_event.h>
  30#include <asm/system.h>
  31#include <asm/alignment.h>
  32#include <asm/fpu.h>
  33#include <asm/kprobes.h>
  34
  35#ifdef CONFIG_CPU_SH2
  36# define TRAP_RESERVED_INST     4
  37# define TRAP_ILLEGAL_SLOT_INST 6
  38# define TRAP_ADDRESS_ERROR     9
  39# ifdef CONFIG_CPU_SH2A
  40#  define TRAP_UBC              12
  41#  define TRAP_FPU_ERROR        13
  42#  define TRAP_DIVZERO_ERROR    17
  43#  define TRAP_DIVOVF_ERROR     18
  44# endif
  45#else
  46#define TRAP_RESERVED_INST      12
  47#define TRAP_ILLEGAL_SLOT_INST  13
  48#endif
  49
  50static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
  51{
  52        unsigned long p;
  53        int i;
  54
  55        printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top);
  56
  57        for (p = bottom & ~31; p < top; ) {
  58                printk("%04lx: ", p & 0xffff);
  59
  60                for (i = 0; i < 8; i++, p += 4) {
  61                        unsigned int val;
  62
  63                        if (p < bottom || p >= top)
  64                                printk("         ");
  65                        else {
  66                                if (__get_user(val, (unsigned int __user *)p)) {
  67                                        printk("\n");
  68                                        return;
  69                                }
  70                                printk("%08x ", val);
  71                        }
  72                }
  73                printk("\n");
  74        }
  75}
  76
  77static DEFINE_SPINLOCK(die_lock);
  78
  79void die(const char * str, struct pt_regs * regs, long err)
  80{
  81        static int die_counter;
  82
  83        oops_enter();
  84
  85        spin_lock_irq(&die_lock);
  86        console_verbose();
  87        bust_spinlocks(1);
  88
  89        printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
  90        sysfs_printk_last_file();
  91        print_modules();
  92        show_regs(regs);
  93
  94        printk("Process: %s (pid: %d, stack limit = %p)\n", current->comm,
  95                        task_pid_nr(current), task_stack_page(current) + 1);
  96
  97        if (!user_mode(regs) || in_interrupt())
  98                dump_mem("Stack: ", regs->regs[15], THREAD_SIZE +
  99                         (unsigned long)task_stack_page(current));
 100
 101        notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV);
 102
 103        bust_spinlocks(0);
 104        add_taint(TAINT_DIE);
 105        spin_unlock_irq(&die_lock);
 106        oops_exit();
 107
 108        if (kexec_should_crash(current))
 109                crash_kexec(regs);
 110
 111        if (in_interrupt())
 112                panic("Fatal exception in interrupt");
 113
 114        if (panic_on_oops)
 115                panic("Fatal exception");
 116
 117        do_exit(SIGSEGV);
 118}
 119
 120static inline void die_if_kernel(const char *str, struct pt_regs *regs,
 121                                 long err)
 122{
 123        if (!user_mode(regs))
 124                die(str, regs, err);
 125}
 126
 127/*
 128 * try and fix up kernelspace address errors
 129 * - userspace errors just cause EFAULT to be returned, resulting in SEGV
 130 * - kernel/userspace interfaces cause a jump to an appropriate handler
 131 * - other kernel errors are bad
 132 */
 133static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
 134{
 135        if (!user_mode(regs)) {
 136                const struct exception_table_entry *fixup;
 137                fixup = search_exception_tables(regs->pc);
 138                if (fixup) {
 139                        regs->pc = fixup->fixup;
 140                        return;
 141                }
 142
 143                die(str, regs, err);
 144        }
 145}
 146
 147static inline void sign_extend(unsigned int count, unsigned char *dst)
 148{
 149#ifdef __LITTLE_ENDIAN__
 150        if ((count == 1) && dst[0] & 0x80) {
 151                dst[1] = 0xff;
 152                dst[2] = 0xff;
 153                dst[3] = 0xff;
 154        }
 155        if ((count == 2) && dst[1] & 0x80) {
 156                dst[2] = 0xff;
 157                dst[3] = 0xff;
 158        }
 159#else
 160        if ((count == 1) && dst[3] & 0x80) {
 161                dst[2] = 0xff;
 162                dst[1] = 0xff;
 163                dst[0] = 0xff;
 164        }
 165        if ((count == 2) && dst[2] & 0x80) {
 166                dst[1] = 0xff;
 167                dst[0] = 0xff;
 168        }
 169#endif
 170}
 171
 172static struct mem_access user_mem_access = {
 173        copy_from_user,
 174        copy_to_user,
 175};
 176
 177/*
 178 * handle an instruction that does an unaligned memory access by emulating the
 179 * desired behaviour
 180 * - note that PC _may not_ point to the faulting instruction
 181 *   (if that instruction is in a branch delay slot)
 182 * - return 0 if emulation okay, -EFAULT on existential error
 183 */
 184static int handle_unaligned_ins(insn_size_t instruction, struct pt_regs *regs,
 185                                struct mem_access *ma)
 186{
 187        int ret, index, count;
 188        unsigned long *rm, *rn;
 189        unsigned char *src, *dst;
 190        unsigned char __user *srcu, *dstu;
 191
 192        index = (instruction>>8)&15;    /* 0x0F00 */
 193        rn = &regs->regs[index];
 194
 195        index = (instruction>>4)&15;    /* 0x00F0 */
 196        rm = &regs->regs[index];
 197
 198        count = 1<<(instruction&3);
 199
 200        switch (count) {
 201        case 1: inc_unaligned_byte_access(); break;
 202        case 2: inc_unaligned_word_access(); break;
 203        case 4: inc_unaligned_dword_access(); break;
 204        case 8: inc_unaligned_multi_access(); break;
 205        }
 206
 207        ret = -EFAULT;
 208        switch (instruction>>12) {
 209        case 0: /* mov.[bwl] to/from memory via r0+rn */
 210                if (instruction & 8) {
 211                        /* from memory */
 212                        srcu = (unsigned char __user *)*rm;
 213                        srcu += regs->regs[0];
 214                        dst = (unsigned char *)rn;
 215                        *(unsigned long *)dst = 0;
 216
 217#if !defined(__LITTLE_ENDIAN__)
 218                        dst += 4-count;
 219#endif
 220                        if (ma->from(dst, srcu, count))
 221                                goto fetch_fault;
 222
 223                        sign_extend(count, dst);
 224                } else {
 225                        /* to memory */
 226                        src = (unsigned char *)rm;
 227#if !defined(__LITTLE_ENDIAN__)
 228                        src += 4-count;
 229#endif
 230                        dstu = (unsigned char __user *)*rn;
 231                        dstu += regs->regs[0];
 232
 233                        if (ma->to(dstu, src, count))
 234                                goto fetch_fault;
 235                }
 236                ret = 0;
 237                break;
 238
 239        case 1: /* mov.l Rm,@(disp,Rn) */
 240                src = (unsigned char*) rm;
 241                dstu = (unsigned char __user *)*rn;
 242                dstu += (instruction&0x000F)<<2;
 243
 244                if (ma->to(dstu, src, 4))
 245                        goto fetch_fault;
 246                ret = 0;
 247                break;
 248
 249        case 2: /* mov.[bwl] to memory, possibly with pre-decrement */
 250                if (instruction & 4)
 251                        *rn -= count;
 252                src = (unsigned char*) rm;
 253                dstu = (unsigned char __user *)*rn;
 254#if !defined(__LITTLE_ENDIAN__)
 255                src += 4-count;
 256#endif
 257                if (ma->to(dstu, src, count))
 258                        goto fetch_fault;
 259                ret = 0;
 260                break;
 261
 262        case 5: /* mov.l @(disp,Rm),Rn */
 263                srcu = (unsigned char __user *)*rm;
 264                srcu += (instruction & 0x000F) << 2;
 265                dst = (unsigned char *)rn;
 266                *(unsigned long *)dst = 0;
 267
 268                if (ma->from(dst, srcu, 4))
 269                        goto fetch_fault;
 270                ret = 0;
 271                break;
 272
 273        case 6: /* mov.[bwl] from memory, possibly with post-increment */
 274                srcu = (unsigned char __user *)*rm;
 275                if (instruction & 4)
 276                        *rm += count;
 277                dst = (unsigned char*) rn;
 278                *(unsigned long*)dst = 0;
 279
 280#if !defined(__LITTLE_ENDIAN__)
 281                dst += 4-count;
 282#endif
 283                if (ma->from(dst, srcu, count))
 284                        goto fetch_fault;
 285                sign_extend(count, dst);
 286                ret = 0;
 287                break;
 288
 289        case 8:
 290                switch ((instruction&0xFF00)>>8) {
 291                case 0x81: /* mov.w R0,@(disp,Rn) */
 292                        src = (unsigned char *) &regs->regs[0];
 293#if !defined(__LITTLE_ENDIAN__)
 294                        src += 2;
 295#endif
 296                        dstu = (unsigned char __user *)*rm; /* called Rn in the spec */
 297                        dstu += (instruction & 0x000F) << 1;
 298
 299                        if (ma->to(dstu, src, 2))
 300                                goto fetch_fault;
 301                        ret = 0;
 302                        break;
 303
 304                case 0x85: /* mov.w @(disp,Rm),R0 */
 305                        srcu = (unsigned char __user *)*rm;
 306                        srcu += (instruction & 0x000F) << 1;
 307                        dst = (unsigned char *) &regs->regs[0];
 308                        *(unsigned long *)dst = 0;
 309
 310#if !defined(__LITTLE_ENDIAN__)
 311                        dst += 2;
 312#endif
 313                        if (ma->from(dst, srcu, 2))
 314                                goto fetch_fault;
 315                        sign_extend(2, dst);
 316                        ret = 0;
 317                        break;
 318                }
 319                break;
 320        }
 321        return ret;
 322
 323 fetch_fault:
 324        /* Argh. Address not only misaligned but also non-existent.
 325         * Raise an EFAULT and see if it's trapped
 326         */
 327        die_if_no_fixup("Fault in unaligned fixup", regs, 0);
 328        return -EFAULT;
 329}
 330
 331/*
 332 * emulate the instruction in the delay slot
 333 * - fetches the instruction from PC+2
 334 */
 335static inline int handle_delayslot(struct pt_regs *regs,
 336                                   insn_size_t old_instruction,
 337                                   struct mem_access *ma)
 338{
 339        insn_size_t instruction;
 340        void __user *addr = (void __user *)(regs->pc +
 341                instruction_size(old_instruction));
 342
 343        if (copy_from_user(&instruction, addr, sizeof(instruction))) {
 344                /* the instruction-fetch faulted */
 345                if (user_mode(regs))
 346                        return -EFAULT;
 347
 348                /* kernel */
 349                die("delay-slot-insn faulting in handle_unaligned_delayslot",
 350                    regs, 0);
 351        }
 352
 353        return handle_unaligned_ins(instruction, regs, ma);
 354}
 355
 356/*
 357 * handle an instruction that does an unaligned memory access
 358 * - have to be careful of branch delay-slot instructions that fault
 359 *  SH3:
 360 *   - if the branch would be taken PC points to the branch
 361 *   - if the branch would not be taken, PC points to delay-slot
 362 *  SH4:
 363 *   - PC always points to delayed branch
 364 * - return 0 if handled, -EFAULT if failed (may not return if in kernel)
 365 */
 366
 367/* Macros to determine offset from current PC for branch instructions */
 368/* Explicit type coercion is used to force sign extension where needed */
 369#define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4)
 370#define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4)
 371
 372int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
 373                            struct mem_access *ma, int expected,
 374                            unsigned long address)
 375{
 376        u_int rm;
 377        int ret, index;
 378
 379        /*
 380         * XXX: We can't handle mixed 16/32-bit instructions yet
 381         */
 382        if (instruction_size(instruction) != 2)
 383                return -EINVAL;
 384
 385        index = (instruction>>8)&15;    /* 0x0F00 */
 386        rm = regs->regs[index];
 387
 388        /*
 389         * Log the unexpected fixups, and then pass them on to perf.
 390         *
 391         * We intentionally don't report the expected cases to perf as
 392         * otherwise the trapped I/O case will skew the results too much
 393         * to be useful.
 394         */
 395        if (!expected) {
 396                unaligned_fixups_notify(current, instruction, regs);
 397                perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0,
 398                              regs, address);
 399        }
 400
 401        ret = -EFAULT;
 402        switch (instruction&0xF000) {
 403        case 0x0000:
 404                if (instruction==0x000B) {
 405                        /* rts */
 406                        ret = handle_delayslot(regs, instruction, ma);
 407                        if (ret==0)
 408                                regs->pc = regs->pr;
 409                }
 410                else if ((instruction&0x00FF)==0x0023) {
 411                        /* braf @Rm */
 412                        ret = handle_delayslot(regs, instruction, ma);
 413                        if (ret==0)
 414                                regs->pc += rm + 4;
 415                }
 416                else if ((instruction&0x00FF)==0x0003) {
 417                        /* bsrf @Rm */
 418                        ret = handle_delayslot(regs, instruction, ma);
 419                        if (ret==0) {
 420                                regs->pr = regs->pc + 4;
 421                                regs->pc += rm + 4;
 422                        }
 423                }
 424                else {
 425                        /* mov.[bwl] to/from memory via r0+rn */
 426                        goto simple;
 427                }
 428                break;
 429
 430        case 0x1000: /* mov.l Rm,@(disp,Rn) */
 431                goto simple;
 432
 433        case 0x2000: /* mov.[bwl] to memory, possibly with pre-decrement */
 434                goto simple;
 435
 436        case 0x4000:
 437                if ((instruction&0x00FF)==0x002B) {
 438                        /* jmp @Rm */
 439                        ret = handle_delayslot(regs, instruction, ma);
 440                        if (ret==0)
 441                                regs->pc = rm;
 442                }
 443                else if ((instruction&0x00FF)==0x000B) {
 444                        /* jsr @Rm */
 445                        ret = handle_delayslot(regs, instruction, ma);
 446                        if (ret==0) {
 447                                regs->pr = regs->pc + 4;
 448                                regs->pc = rm;
 449                        }
 450                }
 451                else {
 452                        /* mov.[bwl] to/from memory via r0+rn */
 453                        goto simple;
 454                }
 455                break;
 456
 457        case 0x5000: /* mov.l @(disp,Rm),Rn */
 458                goto simple;
 459
 460        case 0x6000: /* mov.[bwl] from memory, possibly with post-increment */
 461                goto simple;
 462
 463        case 0x8000: /* bf lab, bf/s lab, bt lab, bt/s lab */
 464                switch (instruction&0x0F00) {
 465                case 0x0100: /* mov.w R0,@(disp,Rm) */
 466                        goto simple;
 467                case 0x0500: /* mov.w @(disp,Rm),R0 */
 468                        goto simple;
 469                case 0x0B00: /* bf   lab - no delayslot*/
 470                        break;
 471                case 0x0F00: /* bf/s lab */
 472                        ret = handle_delayslot(regs, instruction, ma);
 473                        if (ret==0) {
 474#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
 475                                if ((regs->sr & 0x00000001) != 0)
 476                                        regs->pc += 4; /* next after slot */
 477                                else
 478#endif
 479                                        regs->pc += SH_PC_8BIT_OFFSET(instruction);
 480                        }
 481                        break;
 482                case 0x0900: /* bt   lab - no delayslot */
 483                        break;
 484                case 0x0D00: /* bt/s lab */
 485                        ret = handle_delayslot(regs, instruction, ma);
 486                        if (ret==0) {
 487#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
 488                                if ((regs->sr & 0x00000001) == 0)
 489                                        regs->pc += 4; /* next after slot */
 490                                else
 491#endif
 492                                        regs->pc += SH_PC_8BIT_OFFSET(instruction);
 493                        }
 494                        break;
 495                }
 496                break;
 497
 498        case 0xA000: /* bra label */
 499                ret = handle_delayslot(regs, instruction, ma);
 500                if (ret==0)
 501                        regs->pc += SH_PC_12BIT_OFFSET(instruction);
 502                break;
 503
 504        case 0xB000: /* bsr label */
 505                ret = handle_delayslot(regs, instruction, ma);
 506                if (ret==0) {
 507                        regs->pr = regs->pc + 4;
 508                        regs->pc += SH_PC_12BIT_OFFSET(instruction);
 509                }
 510                break;
 511        }
 512        return ret;
 513
 514        /* handle non-delay-slot instruction */
 515 simple:
 516        ret = handle_unaligned_ins(instruction, regs, ma);
 517        if (ret==0)
 518                regs->pc += instruction_size(instruction);
 519        return ret;
 520}
 521
 522/*
 523 * Handle various address error exceptions:
 524 *  - instruction address error:
 525 *       misaligned PC
 526 *       PC >= 0x80000000 in user mode
 527 *  - data address error (read and write)
 528 *       misaligned data access
 529 *       access to >= 0x80000000 is user mode
 530 * Unfortuntaly we can't distinguish between instruction address error
 531 * and data address errors caused by read accesses.
 532 */
 533asmlinkage void do_address_error(struct pt_regs *regs,
 534                                 unsigned long writeaccess,
 535                                 unsigned long address)
 536{
 537        unsigned long error_code = 0;
 538        mm_segment_t oldfs;
 539        siginfo_t info;
 540        insn_size_t instruction;
 541        int tmp;
 542
 543        /* Intentional ifdef */
 544#ifdef CONFIG_CPU_HAS_SR_RB
 545        error_code = lookup_exception_vector();
 546#endif
 547
 548        oldfs = get_fs();
 549
 550        if (user_mode(regs)) {
 551                int si_code = BUS_ADRERR;
 552                unsigned int user_action;
 553
 554                local_irq_enable();
 555                inc_unaligned_user_access();
 556
 557                set_fs(USER_DS);
 558                if (copy_from_user(&instruction, (insn_size_t *)(regs->pc & ~1),
 559                                   sizeof(instruction))) {
 560                        set_fs(oldfs);
 561                        goto uspace_segv;
 562                }
 563                set_fs(oldfs);
 564
 565                /* shout about userspace fixups */
 566                unaligned_fixups_notify(current, instruction, regs);
 567
 568                user_action = unaligned_user_action();
 569                if (user_action & UM_FIXUP)
 570                        goto fixup;
 571                if (user_action & UM_SIGNAL)
 572                        goto uspace_segv;
 573                else {
 574                        /* ignore */
 575                        regs->pc += instruction_size(instruction);
 576                        return;
 577                }
 578
 579fixup:
 580                /* bad PC is not something we can fix */
 581                if (regs->pc & 1) {
 582                        si_code = BUS_ADRALN;
 583                        goto uspace_segv;
 584                }
 585
 586                set_fs(USER_DS);
 587                tmp = handle_unaligned_access(instruction, regs,
 588                                              &user_mem_access, 0,
 589                                              address);
 590                set_fs(oldfs);
 591
 592                if (tmp == 0)
 593                        return; /* sorted */
 594uspace_segv:
 595                printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned "
 596                       "access (PC %lx PR %lx)\n", current->comm, regs->pc,
 597                       regs->pr);
 598
 599                info.si_signo = SIGBUS;
 600                info.si_errno = 0;
 601                info.si_code = si_code;
 602                info.si_addr = (void __user *)address;
 603                force_sig_info(SIGBUS, &info, current);
 604        } else {
 605                inc_unaligned_kernel_access();
 606
 607                if (regs->pc & 1)
 608                        die("unaligned program counter", regs, error_code);
 609
 610                set_fs(KERNEL_DS);
 611                if (copy_from_user(&instruction, (void __user *)(regs->pc),
 612                                   sizeof(instruction))) {
 613                        /* Argh. Fault on the instruction itself.
 614                           This should never happen non-SMP
 615                        */
 616                        set_fs(oldfs);
 617                        die("insn faulting in do_address_error", regs, 0);
 618                }
 619
 620                unaligned_fixups_notify(current, instruction, regs);
 621
 622                handle_unaligned_access(instruction, regs, &user_mem_access,
 623                                        0, address);
 624                set_fs(oldfs);
 625        }
 626}
 627
 628#ifdef CONFIG_SH_DSP
 629/*
 630 *      SH-DSP support gerg@snapgear.com.
 631 */
 632int is_dsp_inst(struct pt_regs *regs)
 633{
 634        unsigned short inst = 0;
 635
 636        /*
 637         * Safe guard if DSP mode is already enabled or we're lacking
 638         * the DSP altogether.
 639         */
 640        if (!(current_cpu_data.flags & CPU_HAS_DSP) || (regs->sr & SR_DSP))
 641                return 0;
 642
 643        get_user(inst, ((unsigned short *) regs->pc));
 644
 645        inst &= 0xf000;
 646
 647        /* Check for any type of DSP or support instruction */
 648        if ((inst == 0xf000) || (inst == 0x4000))
 649                return 1;
 650
 651        return 0;
 652}
 653#else
 654#define is_dsp_inst(regs)       (0)
 655#endif /* CONFIG_SH_DSP */
 656
 657#ifdef CONFIG_CPU_SH2A
 658asmlinkage void do_divide_error(unsigned long r4, unsigned long r5,
 659                                unsigned long r6, unsigned long r7,
 660                                struct pt_regs __regs)
 661{
 662        siginfo_t info;
 663
 664        switch (r4) {
 665        case TRAP_DIVZERO_ERROR:
 666                info.si_code = FPE_INTDIV;
 667                break;
 668        case TRAP_DIVOVF_ERROR:
 669                info.si_code = FPE_INTOVF;
 670                break;
 671        }
 672
 673        force_sig_info(SIGFPE, &info, current);
 674}
 675#endif
 676
 677asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
 678                                unsigned long r6, unsigned long r7,
 679                                struct pt_regs __regs)
 680{
 681        struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
 682        unsigned long error_code;
 683        struct task_struct *tsk = current;
 684
 685#ifdef CONFIG_SH_FPU_EMU
 686        unsigned short inst = 0;
 687        int err;
 688
 689        get_user(inst, (unsigned short*)regs->pc);
 690
 691        err = do_fpu_inst(inst, regs);
 692        if (!err) {
 693                regs->pc += instruction_size(inst);
 694                return;
 695        }
 696        /* not a FPU inst. */
 697#endif
 698
 699#ifdef CONFIG_SH_DSP
 700        /* Check if it's a DSP instruction */
 701        if (is_dsp_inst(regs)) {
 702                /* Enable DSP mode, and restart instruction. */
 703                regs->sr |= SR_DSP;
 704                /* Save DSP mode */
 705                tsk->thread.dsp_status.status |= SR_DSP;
 706                return;
 707        }
 708#endif
 709
 710        error_code = lookup_exception_vector();
 711
 712        local_irq_enable();
 713        force_sig(SIGILL, tsk);
 714        die_if_no_fixup("reserved instruction", regs, error_code);
 715}
 716
 717#ifdef CONFIG_SH_FPU_EMU
 718static int emulate_branch(unsigned short inst, struct pt_regs *regs)
 719{
 720        /*
 721         * bfs: 8fxx: PC+=d*2+4;
 722         * bts: 8dxx: PC+=d*2+4;
 723         * bra: axxx: PC+=D*2+4;
 724         * bsr: bxxx: PC+=D*2+4  after PR=PC+4;
 725         * braf:0x23: PC+=Rn*2+4;
 726         * bsrf:0x03: PC+=Rn*2+4 after PR=PC+4;
 727         * jmp: 4x2b: PC=Rn;
 728         * jsr: 4x0b: PC=Rn      after PR=PC+4;
 729         * rts: 000b: PC=PR;
 730         */
 731        if (((inst & 0xf000) == 0xb000)  ||     /* bsr */
 732            ((inst & 0xf0ff) == 0x0003)  ||     /* bsrf */
 733            ((inst & 0xf0ff) == 0x400b))        /* jsr */
 734                regs->pr = regs->pc + 4;
 735
 736        if ((inst & 0xfd00) == 0x8d00) {        /* bfs, bts */
 737                regs->pc += SH_PC_8BIT_OFFSET(inst);
 738                return 0;
 739        }
 740
 741        if ((inst & 0xe000) == 0xa000) {        /* bra, bsr */
 742                regs->pc += SH_PC_12BIT_OFFSET(inst);
 743                return 0;
 744        }
 745
 746        if ((inst & 0xf0df) == 0x0003) {        /* braf, bsrf */
 747                regs->pc += regs->regs[(inst & 0x0f00) >> 8] + 4;
 748                return 0;
 749        }
 750
 751        if ((inst & 0xf0df) == 0x400b) {        /* jmp, jsr */
 752                regs->pc = regs->regs[(inst & 0x0f00) >> 8];
 753                return 0;
 754        }
 755
 756        if ((inst & 0xffff) == 0x000b) {        /* rts */
 757                regs->pc = regs->pr;
 758                return 0;
 759        }
 760
 761        return 1;
 762}
 763#endif
 764
 765asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
 766                                unsigned long r6, unsigned long r7,
 767                                struct pt_regs __regs)
 768{
 769        struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
 770        unsigned long inst;
 771        struct task_struct *tsk = current;
 772
 773        if (kprobe_handle_illslot(regs->pc) == 0)
 774                return;
 775
 776#ifdef CONFIG_SH_FPU_EMU
 777        get_user(inst, (unsigned short *)regs->pc + 1);
 778        if (!do_fpu_inst(inst, regs)) {
 779                get_user(inst, (unsigned short *)regs->pc);
 780                if (!emulate_branch(inst, regs))
 781                        return;
 782                /* fault in branch.*/
 783        }
 784        /* not a FPU inst. */
 785#endif
 786
 787        inst = lookup_exception_vector();
 788
 789        local_irq_enable();
 790        force_sig(SIGILL, tsk);
 791        die_if_no_fixup("illegal slot instruction", regs, inst);
 792}
 793
 794asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
 795                                   unsigned long r6, unsigned long r7,
 796                                   struct pt_regs __regs)
 797{
 798        struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
 799        long ex;
 800
 801        ex = lookup_exception_vector();
 802        die_if_kernel("exception", regs, ex);
 803}
 804
 805void __cpuinit per_cpu_trap_init(void)
 806{
 807        extern void *vbr_base;
 808
 809        /* NOTE: The VBR value should be at P1
 810           (or P2, virtural "fixed" address space).
 811           It's definitely should not in physical address.  */
 812
 813        asm volatile("ldc       %0, vbr"
 814                     : /* no output */
 815                     : "r" (&vbr_base)
 816                     : "memory");
 817
 818        /* disable exception blocking now when the vbr has been setup */
 819        clear_bl_bit();
 820}
 821
 822void *set_exception_table_vec(unsigned int vec, void *handler)
 823{
 824        extern void *exception_handling_table[];
 825        void *old_handler;
 826
 827        old_handler = exception_handling_table[vec];
 828        exception_handling_table[vec] = handler;
 829        return old_handler;
 830}
 831
 832void __init trap_init(void)
 833{
 834        set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst);
 835        set_exception_table_vec(TRAP_ILLEGAL_SLOT_INST, do_illegal_slot_inst);
 836
 837#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_FPU) || \
 838    defined(CONFIG_SH_FPU_EMU)
 839        /*
 840         * For SH-4 lacking an FPU, treat floating point instructions as
 841         * reserved. They'll be handled in the math-emu case, or faulted on
 842         * otherwise.
 843         */
 844        set_exception_table_evt(0x800, do_reserved_inst);
 845        set_exception_table_evt(0x820, do_illegal_slot_inst);
 846#elif defined(CONFIG_SH_FPU)
 847        set_exception_table_evt(0x800, fpu_state_restore_trap_handler);
 848        set_exception_table_evt(0x820, fpu_state_restore_trap_handler);
 849#endif
 850
 851#ifdef CONFIG_CPU_SH2
 852        set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_trap_handler);
 853#endif
 854#ifdef CONFIG_CPU_SH2A
 855        set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error);
 856        set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error);
 857#ifdef CONFIG_SH_FPU
 858        set_exception_table_vec(TRAP_FPU_ERROR, fpu_error_trap_handler);
 859#endif
 860#endif
 861
 862#ifdef TRAP_UBC
 863        set_exception_table_vec(TRAP_UBC, breakpoint_trap_handler);
 864#endif
 865}
 866
 867void show_stack(struct task_struct *tsk, unsigned long *sp)
 868{
 869        unsigned long stack;
 870
 871        if (!tsk)
 872                tsk = current;
 873        if (tsk == current)
 874                sp = (unsigned long *)current_stack_pointer;
 875        else
 876                sp = (unsigned long *)tsk->thread.sp;
 877
 878        stack = (unsigned long)sp;
 879        dump_mem("Stack: ", stack, THREAD_SIZE +
 880                 (unsigned long)task_stack_page(tsk));
 881        show_trace(tsk, sp, NULL);
 882}
 883
 884void dump_stack(void)
 885{
 886        show_stack(NULL, NULL);
 887}
 888EXPORT_SYMBOL(dump_stack);
 889