linux/arch/sh/kernel/traps_32.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * 'traps.c' handles hardware traps and faults after we have saved some
   4 * state in 'entry.S'.
   5 *
   6 *  SuperH version: Copyright (C) 1999 Niibe Yutaka
   7 *                  Copyright (C) 2000 Philipp Rumpf
   8 *                  Copyright (C) 2000 David Howells
   9 *                  Copyright (C) 2002 - 2010 Paul Mundt
  10 */
  11#include <linux/kernel.h>
  12#include <linux/ptrace.h>
  13#include <linux/hardirq.h>
  14#include <linux/init.h>
  15#include <linux/spinlock.h>
  16#include <linux/kallsyms.h>
  17#include <linux/io.h>
  18#include <linux/bug.h>
  19#include <linux/debug_locks.h>
  20#include <linux/kdebug.h>
  21#include <linux/limits.h>
  22#include <linux/sysfs.h>
  23#include <linux/uaccess.h>
  24#include <linux/perf_event.h>
  25#include <linux/sched/task_stack.h>
  26
  27#include <asm/alignment.h>
  28#include <asm/fpu.h>
  29#include <asm/kprobes.h>
  30#include <asm/traps.h>
  31#include <asm/bl_bit.h>
  32
  33#ifdef CONFIG_CPU_SH2
  34# define TRAP_RESERVED_INST     4
  35# define TRAP_ILLEGAL_SLOT_INST 6
  36# define TRAP_ADDRESS_ERROR     9
  37# ifdef CONFIG_CPU_SH2A
  38#  define TRAP_UBC              12
  39#  define TRAP_FPU_ERROR        13
  40#  define TRAP_DIVZERO_ERROR    17
  41#  define TRAP_DIVOVF_ERROR     18
  42# endif
  43#else
  44#define TRAP_RESERVED_INST      12
  45#define TRAP_ILLEGAL_SLOT_INST  13
  46#endif
  47
  48static inline void sign_extend(unsigned int count, unsigned char *dst)
  49{
  50#ifdef __LITTLE_ENDIAN__
  51        if ((count == 1) && dst[0] & 0x80) {
  52                dst[1] = 0xff;
  53                dst[2] = 0xff;
  54                dst[3] = 0xff;
  55        }
  56        if ((count == 2) && dst[1] & 0x80) {
  57                dst[2] = 0xff;
  58                dst[3] = 0xff;
  59        }
  60#else
  61        if ((count == 1) && dst[3] & 0x80) {
  62                dst[2] = 0xff;
  63                dst[1] = 0xff;
  64                dst[0] = 0xff;
  65        }
  66        if ((count == 2) && dst[2] & 0x80) {
  67                dst[1] = 0xff;
  68                dst[0] = 0xff;
  69        }
  70#endif
  71}
  72
  73static struct mem_access user_mem_access = {
  74        copy_from_user,
  75        copy_to_user,
  76};
  77
  78/*
  79 * handle an instruction that does an unaligned memory access by emulating the
  80 * desired behaviour
  81 * - note that PC _may not_ point to the faulting instruction
  82 *   (if that instruction is in a branch delay slot)
  83 * - return 0 if emulation okay, -EFAULT on existential error
  84 */
  85static int handle_unaligned_ins(insn_size_t instruction, struct pt_regs *regs,
  86                                struct mem_access *ma)
  87{
  88        int ret, index, count;
  89        unsigned long *rm, *rn;
  90        unsigned char *src, *dst;
  91        unsigned char __user *srcu, *dstu;
  92
  93        index = (instruction>>8)&15;    /* 0x0F00 */
  94        rn = &regs->regs[index];
  95
  96        index = (instruction>>4)&15;    /* 0x00F0 */
  97        rm = &regs->regs[index];
  98
  99        count = 1<<(instruction&3);
 100
 101        switch (count) {
 102        case 1: inc_unaligned_byte_access(); break;
 103        case 2: inc_unaligned_word_access(); break;
 104        case 4: inc_unaligned_dword_access(); break;
 105        case 8: inc_unaligned_multi_access(); break;
 106        }
 107
 108        ret = -EFAULT;
 109        switch (instruction>>12) {
 110        case 0: /* mov.[bwl] to/from memory via r0+rn */
 111                if (instruction & 8) {
 112                        /* from memory */
 113                        srcu = (unsigned char __user *)*rm;
 114                        srcu += regs->regs[0];
 115                        dst = (unsigned char *)rn;
 116                        *(unsigned long *)dst = 0;
 117
 118#if !defined(__LITTLE_ENDIAN__)
 119                        dst += 4-count;
 120#endif
 121                        if (ma->from(dst, srcu, count))
 122                                goto fetch_fault;
 123
 124                        sign_extend(count, dst);
 125                } else {
 126                        /* to memory */
 127                        src = (unsigned char *)rm;
 128#if !defined(__LITTLE_ENDIAN__)
 129                        src += 4-count;
 130#endif
 131                        dstu = (unsigned char __user *)*rn;
 132                        dstu += regs->regs[0];
 133
 134                        if (ma->to(dstu, src, count))
 135                                goto fetch_fault;
 136                }
 137                ret = 0;
 138                break;
 139
 140        case 1: /* mov.l Rm,@(disp,Rn) */
 141                src = (unsigned char*) rm;
 142                dstu = (unsigned char __user *)*rn;
 143                dstu += (instruction&0x000F)<<2;
 144
 145                if (ma->to(dstu, src, 4))
 146                        goto fetch_fault;
 147                ret = 0;
 148                break;
 149
 150        case 2: /* mov.[bwl] to memory, possibly with pre-decrement */
 151                if (instruction & 4)
 152                        *rn -= count;
 153                src = (unsigned char*) rm;
 154                dstu = (unsigned char __user *)*rn;
 155#if !defined(__LITTLE_ENDIAN__)
 156                src += 4-count;
 157#endif
 158                if (ma->to(dstu, src, count))
 159                        goto fetch_fault;
 160                ret = 0;
 161                break;
 162
 163        case 5: /* mov.l @(disp,Rm),Rn */
 164                srcu = (unsigned char __user *)*rm;
 165                srcu += (instruction & 0x000F) << 2;
 166                dst = (unsigned char *)rn;
 167                *(unsigned long *)dst = 0;
 168
 169                if (ma->from(dst, srcu, 4))
 170                        goto fetch_fault;
 171                ret = 0;
 172                break;
 173
 174        case 6: /* mov.[bwl] from memory, possibly with post-increment */
 175                srcu = (unsigned char __user *)*rm;
 176                if (instruction & 4)
 177                        *rm += count;
 178                dst = (unsigned char*) rn;
 179                *(unsigned long*)dst = 0;
 180
 181#if !defined(__LITTLE_ENDIAN__)
 182                dst += 4-count;
 183#endif
 184                if (ma->from(dst, srcu, count))
 185                        goto fetch_fault;
 186                sign_extend(count, dst);
 187                ret = 0;
 188                break;
 189
 190        case 8:
 191                switch ((instruction&0xFF00)>>8) {
 192                case 0x81: /* mov.w R0,@(disp,Rn) */
 193                        src = (unsigned char *) &regs->regs[0];
 194#if !defined(__LITTLE_ENDIAN__)
 195                        src += 2;
 196#endif
 197                        dstu = (unsigned char __user *)*rm; /* called Rn in the spec */
 198                        dstu += (instruction & 0x000F) << 1;
 199
 200                        if (ma->to(dstu, src, 2))
 201                                goto fetch_fault;
 202                        ret = 0;
 203                        break;
 204
 205                case 0x85: /* mov.w @(disp,Rm),R0 */
 206                        srcu = (unsigned char __user *)*rm;
 207                        srcu += (instruction & 0x000F) << 1;
 208                        dst = (unsigned char *) &regs->regs[0];
 209                        *(unsigned long *)dst = 0;
 210
 211#if !defined(__LITTLE_ENDIAN__)
 212                        dst += 2;
 213#endif
 214                        if (ma->from(dst, srcu, 2))
 215                                goto fetch_fault;
 216                        sign_extend(2, dst);
 217                        ret = 0;
 218                        break;
 219                }
 220                break;
 221
 222        case 9: /* mov.w @(disp,PC),Rn */
 223                srcu = (unsigned char __user *)regs->pc;
 224                srcu += 4;
 225                srcu += (instruction & 0x00FF) << 1;
 226                dst = (unsigned char *)rn;
 227                *(unsigned long *)dst = 0;
 228
 229#if !defined(__LITTLE_ENDIAN__)
 230                dst += 2;
 231#endif
 232
 233                if (ma->from(dst, srcu, 2))
 234                        goto fetch_fault;
 235                sign_extend(2, dst);
 236                ret = 0;
 237                break;
 238
 239        case 0xd: /* mov.l @(disp,PC),Rn */
 240                srcu = (unsigned char __user *)(regs->pc & ~0x3);
 241                srcu += 4;
 242                srcu += (instruction & 0x00FF) << 2;
 243                dst = (unsigned char *)rn;
 244                *(unsigned long *)dst = 0;
 245
 246                if (ma->from(dst, srcu, 4))
 247                        goto fetch_fault;
 248                ret = 0;
 249                break;
 250        }
 251        return ret;
 252
 253 fetch_fault:
 254        /* Argh. Address not only misaligned but also non-existent.
 255         * Raise an EFAULT and see if it's trapped
 256         */
 257        die_if_no_fixup("Fault in unaligned fixup", regs, 0);
 258        return -EFAULT;
 259}
 260
 261/*
 262 * emulate the instruction in the delay slot
 263 * - fetches the instruction from PC+2
 264 */
 265static inline int handle_delayslot(struct pt_regs *regs,
 266                                   insn_size_t old_instruction,
 267                                   struct mem_access *ma)
 268{
 269        insn_size_t instruction;
 270        void __user *addr = (void __user *)(regs->pc +
 271                instruction_size(old_instruction));
 272
 273        if (copy_from_user(&instruction, addr, sizeof(instruction))) {
 274                /* the instruction-fetch faulted */
 275                if (user_mode(regs))
 276                        return -EFAULT;
 277
 278                /* kernel */
 279                die("delay-slot-insn faulting in handle_unaligned_delayslot",
 280                    regs, 0);
 281        }
 282
 283        return handle_unaligned_ins(instruction, regs, ma);
 284}
 285
 286/*
 287 * handle an instruction that does an unaligned memory access
 288 * - have to be careful of branch delay-slot instructions that fault
 289 *  SH3:
 290 *   - if the branch would be taken PC points to the branch
 291 *   - if the branch would not be taken, PC points to delay-slot
 292 *  SH4:
 293 *   - PC always points to delayed branch
 294 * - return 0 if handled, -EFAULT if failed (may not return if in kernel)
 295 */
 296
 297/* Macros to determine offset from current PC for branch instructions */
 298/* Explicit type coercion is used to force sign extension where needed */
 299#define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4)
 300#define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4)
 301
 302int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
 303                            struct mem_access *ma, int expected,
 304                            unsigned long address)
 305{
 306        u_int rm;
 307        int ret, index;
 308
 309        /*
 310         * XXX: We can't handle mixed 16/32-bit instructions yet
 311         */
 312        if (instruction_size(instruction) != 2)
 313                return -EINVAL;
 314
 315        index = (instruction>>8)&15;    /* 0x0F00 */
 316        rm = regs->regs[index];
 317
 318        /*
 319         * Log the unexpected fixups, and then pass them on to perf.
 320         *
 321         * We intentionally don't report the expected cases to perf as
 322         * otherwise the trapped I/O case will skew the results too much
 323         * to be useful.
 324         */
 325        if (!expected) {
 326                unaligned_fixups_notify(current, instruction, regs);
 327                perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1,
 328                              regs, address);
 329        }
 330
 331        ret = -EFAULT;
 332        switch (instruction&0xF000) {
 333        case 0x0000:
 334                if (instruction==0x000B) {
 335                        /* rts */
 336                        ret = handle_delayslot(regs, instruction, ma);
 337                        if (ret==0)
 338                                regs->pc = regs->pr;
 339                }
 340                else if ((instruction&0x00FF)==0x0023) {
 341                        /* braf @Rm */
 342                        ret = handle_delayslot(regs, instruction, ma);
 343                        if (ret==0)
 344                                regs->pc += rm + 4;
 345                }
 346                else if ((instruction&0x00FF)==0x0003) {
 347                        /* bsrf @Rm */
 348                        ret = handle_delayslot(regs, instruction, ma);
 349                        if (ret==0) {
 350                                regs->pr = regs->pc + 4;
 351                                regs->pc += rm + 4;
 352                        }
 353                }
 354                else {
 355                        /* mov.[bwl] to/from memory via r0+rn */
 356                        goto simple;
 357                }
 358                break;
 359
 360        case 0x1000: /* mov.l Rm,@(disp,Rn) */
 361                goto simple;
 362
 363        case 0x2000: /* mov.[bwl] to memory, possibly with pre-decrement */
 364                goto simple;
 365
 366        case 0x4000:
 367                if ((instruction&0x00FF)==0x002B) {
 368                        /* jmp @Rm */
 369                        ret = handle_delayslot(regs, instruction, ma);
 370                        if (ret==0)
 371                                regs->pc = rm;
 372                }
 373                else if ((instruction&0x00FF)==0x000B) {
 374                        /* jsr @Rm */
 375                        ret = handle_delayslot(regs, instruction, ma);
 376                        if (ret==0) {
 377                                regs->pr = regs->pc + 4;
 378                                regs->pc = rm;
 379                        }
 380                }
 381                else {
 382                        /* mov.[bwl] to/from memory via r0+rn */
 383                        goto simple;
 384                }
 385                break;
 386
 387        case 0x5000: /* mov.l @(disp,Rm),Rn */
 388                goto simple;
 389
 390        case 0x6000: /* mov.[bwl] from memory, possibly with post-increment */
 391                goto simple;
 392
 393        case 0x8000: /* bf lab, bf/s lab, bt lab, bt/s lab */
 394                switch (instruction&0x0F00) {
 395                case 0x0100: /* mov.w R0,@(disp,Rm) */
 396                        goto simple;
 397                case 0x0500: /* mov.w @(disp,Rm),R0 */
 398                        goto simple;
 399                case 0x0B00: /* bf   lab - no delayslot*/
 400                        ret = 0;
 401                        break;
 402                case 0x0F00: /* bf/s lab */
 403                        ret = handle_delayslot(regs, instruction, ma);
 404                        if (ret==0) {
 405#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
 406                                if ((regs->sr & 0x00000001) != 0)
 407                                        regs->pc += 4; /* next after slot */
 408                                else
 409#endif
 410                                        regs->pc += SH_PC_8BIT_OFFSET(instruction);
 411                        }
 412                        break;
 413                case 0x0900: /* bt   lab - no delayslot */
 414                        ret = 0;
 415                        break;
 416                case 0x0D00: /* bt/s lab */
 417                        ret = handle_delayslot(regs, instruction, ma);
 418                        if (ret==0) {
 419#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
 420                                if ((regs->sr & 0x00000001) == 0)
 421                                        regs->pc += 4; /* next after slot */
 422                                else
 423#endif
 424                                        regs->pc += SH_PC_8BIT_OFFSET(instruction);
 425                        }
 426                        break;
 427                }
 428                break;
 429
 430        case 0x9000: /* mov.w @(disp,Rm),Rn */
 431                goto simple;
 432
 433        case 0xA000: /* bra label */
 434                ret = handle_delayslot(regs, instruction, ma);
 435                if (ret==0)
 436                        regs->pc += SH_PC_12BIT_OFFSET(instruction);
 437                break;
 438
 439        case 0xB000: /* bsr label */
 440                ret = handle_delayslot(regs, instruction, ma);
 441                if (ret==0) {
 442                        regs->pr = regs->pc + 4;
 443                        regs->pc += SH_PC_12BIT_OFFSET(instruction);
 444                }
 445                break;
 446
 447        case 0xD000: /* mov.l @(disp,Rm),Rn */
 448                goto simple;
 449        }
 450        return ret;
 451
 452        /* handle non-delay-slot instruction */
 453 simple:
 454        ret = handle_unaligned_ins(instruction, regs, ma);
 455        if (ret==0)
 456                regs->pc += instruction_size(instruction);
 457        return ret;
 458}
 459
 460/*
 461 * Handle various address error exceptions:
 462 *  - instruction address error:
 463 *       misaligned PC
 464 *       PC >= 0x80000000 in user mode
 465 *  - data address error (read and write)
 466 *       misaligned data access
 467 *       access to >= 0x80000000 is user mode
 468 * Unfortuntaly we can't distinguish between instruction address error
 469 * and data address errors caused by read accesses.
 470 */
 471asmlinkage void do_address_error(struct pt_regs *regs,
 472                                 unsigned long writeaccess,
 473                                 unsigned long address)
 474{
 475        unsigned long error_code = 0;
 476        mm_segment_t oldfs;
 477        insn_size_t instruction;
 478        int tmp;
 479
 480        /* Intentional ifdef */
 481#ifdef CONFIG_CPU_HAS_SR_RB
 482        error_code = lookup_exception_vector();
 483#endif
 484
 485        oldfs = get_fs();
 486
 487        if (user_mode(regs)) {
 488                int si_code = BUS_ADRERR;
 489                unsigned int user_action;
 490
 491                local_irq_enable();
 492                inc_unaligned_user_access();
 493
 494                set_fs(USER_DS);
 495                if (copy_from_user(&instruction, (insn_size_t *)(regs->pc & ~1),
 496                                   sizeof(instruction))) {
 497                        set_fs(oldfs);
 498                        goto uspace_segv;
 499                }
 500                set_fs(oldfs);
 501
 502                /* shout about userspace fixups */
 503                unaligned_fixups_notify(current, instruction, regs);
 504
 505                user_action = unaligned_user_action();
 506                if (user_action & UM_FIXUP)
 507                        goto fixup;
 508                if (user_action & UM_SIGNAL)
 509                        goto uspace_segv;
 510                else {
 511                        /* ignore */
 512                        regs->pc += instruction_size(instruction);
 513                        return;
 514                }
 515
 516fixup:
 517                /* bad PC is not something we can fix */
 518                if (regs->pc & 1) {
 519                        si_code = BUS_ADRALN;
 520                        goto uspace_segv;
 521                }
 522
 523                set_fs(USER_DS);
 524                tmp = handle_unaligned_access(instruction, regs,
 525                                              &user_mem_access, 0,
 526                                              address);
 527                set_fs(oldfs);
 528
 529                if (tmp == 0)
 530                        return; /* sorted */
 531uspace_segv:
 532                printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned "
 533                       "access (PC %lx PR %lx)\n", current->comm, regs->pc,
 534                       regs->pr);
 535
 536                force_sig_fault(SIGBUS, si_code, (void __user *)address);
 537        } else {
 538                inc_unaligned_kernel_access();
 539
 540                if (regs->pc & 1)
 541                        die("unaligned program counter", regs, error_code);
 542
 543                set_fs(KERNEL_DS);
 544                if (copy_from_user(&instruction, (void __user *)(regs->pc),
 545                                   sizeof(instruction))) {
 546                        /* Argh. Fault on the instruction itself.
 547                           This should never happen non-SMP
 548                        */
 549                        set_fs(oldfs);
 550                        die("insn faulting in do_address_error", regs, 0);
 551                }
 552
 553                unaligned_fixups_notify(current, instruction, regs);
 554
 555                handle_unaligned_access(instruction, regs, &user_mem_access,
 556                                        0, address);
 557                set_fs(oldfs);
 558        }
 559}
 560
 561#ifdef CONFIG_SH_DSP
 562/*
 563 *      SH-DSP support gerg@snapgear.com.
 564 */
 565int is_dsp_inst(struct pt_regs *regs)
 566{
 567        unsigned short inst = 0;
 568
 569        /*
 570         * Safe guard if DSP mode is already enabled or we're lacking
 571         * the DSP altogether.
 572         */
 573        if (!(current_cpu_data.flags & CPU_HAS_DSP) || (regs->sr & SR_DSP))
 574                return 0;
 575
 576        get_user(inst, ((unsigned short *) regs->pc));
 577
 578        inst &= 0xf000;
 579
 580        /* Check for any type of DSP or support instruction */
 581        if ((inst == 0xf000) || (inst == 0x4000))
 582                return 1;
 583
 584        return 0;
 585}
 586#else
 587#define is_dsp_inst(regs)       (0)
 588#endif /* CONFIG_SH_DSP */
 589
 590#ifdef CONFIG_CPU_SH2A
 591asmlinkage void do_divide_error(unsigned long r4)
 592{
 593        int code;
 594
 595        switch (r4) {
 596        case TRAP_DIVZERO_ERROR:
 597                code = FPE_INTDIV;
 598                break;
 599        case TRAP_DIVOVF_ERROR:
 600                code = FPE_INTOVF;
 601                break;
 602        default:
 603                /* Let gcc know unhandled cases don't make it past here */
 604                return;
 605        }
 606        force_sig_fault(SIGFPE, code, NULL);
 607}
 608#endif
 609
 610asmlinkage void do_reserved_inst(void)
 611{
 612        struct pt_regs *regs = current_pt_regs();
 613        unsigned long error_code;
 614
 615#ifdef CONFIG_SH_FPU_EMU
 616        unsigned short inst = 0;
 617        int err;
 618
 619        get_user(inst, (unsigned short*)regs->pc);
 620
 621        err = do_fpu_inst(inst, regs);
 622        if (!err) {
 623                regs->pc += instruction_size(inst);
 624                return;
 625        }
 626        /* not a FPU inst. */
 627#endif
 628
 629#ifdef CONFIG_SH_DSP
 630        /* Check if it's a DSP instruction */
 631        if (is_dsp_inst(regs)) {
 632                /* Enable DSP mode, and restart instruction. */
 633                regs->sr |= SR_DSP;
 634                /* Save DSP mode */
 635                current->thread.dsp_status.status |= SR_DSP;
 636                return;
 637        }
 638#endif
 639
 640        error_code = lookup_exception_vector();
 641
 642        local_irq_enable();
 643        force_sig(SIGILL);
 644        die_if_no_fixup("reserved instruction", regs, error_code);
 645}
 646
 647#ifdef CONFIG_SH_FPU_EMU
 648static int emulate_branch(unsigned short inst, struct pt_regs *regs)
 649{
 650        /*
 651         * bfs: 8fxx: PC+=d*2+4;
 652         * bts: 8dxx: PC+=d*2+4;
 653         * bra: axxx: PC+=D*2+4;
 654         * bsr: bxxx: PC+=D*2+4  after PR=PC+4;
 655         * braf:0x23: PC+=Rn*2+4;
 656         * bsrf:0x03: PC+=Rn*2+4 after PR=PC+4;
 657         * jmp: 4x2b: PC=Rn;
 658         * jsr: 4x0b: PC=Rn      after PR=PC+4;
 659         * rts: 000b: PC=PR;
 660         */
 661        if (((inst & 0xf000) == 0xb000)  ||     /* bsr */
 662            ((inst & 0xf0ff) == 0x0003)  ||     /* bsrf */
 663            ((inst & 0xf0ff) == 0x400b))        /* jsr */
 664                regs->pr = regs->pc + 4;
 665
 666        if ((inst & 0xfd00) == 0x8d00) {        /* bfs, bts */
 667                regs->pc += SH_PC_8BIT_OFFSET(inst);
 668                return 0;
 669        }
 670
 671        if ((inst & 0xe000) == 0xa000) {        /* bra, bsr */
 672                regs->pc += SH_PC_12BIT_OFFSET(inst);
 673                return 0;
 674        }
 675
 676        if ((inst & 0xf0df) == 0x0003) {        /* braf, bsrf */
 677                regs->pc += regs->regs[(inst & 0x0f00) >> 8] + 4;
 678                return 0;
 679        }
 680
 681        if ((inst & 0xf0df) == 0x400b) {        /* jmp, jsr */
 682                regs->pc = regs->regs[(inst & 0x0f00) >> 8];
 683                return 0;
 684        }
 685
 686        if ((inst & 0xffff) == 0x000b) {        /* rts */
 687                regs->pc = regs->pr;
 688                return 0;
 689        }
 690
 691        return 1;
 692}
 693#endif
 694
 695asmlinkage void do_illegal_slot_inst(void)
 696{
 697        struct pt_regs *regs = current_pt_regs();
 698        unsigned long inst;
 699
 700        if (kprobe_handle_illslot(regs->pc) == 0)
 701                return;
 702
 703#ifdef CONFIG_SH_FPU_EMU
 704        get_user(inst, (unsigned short *)regs->pc + 1);
 705        if (!do_fpu_inst(inst, regs)) {
 706                get_user(inst, (unsigned short *)regs->pc);
 707                if (!emulate_branch(inst, regs))
 708                        return;
 709                /* fault in branch.*/
 710        }
 711        /* not a FPU inst. */
 712#endif
 713
 714        inst = lookup_exception_vector();
 715
 716        local_irq_enable();
 717        force_sig(SIGILL);
 718        die_if_no_fixup("illegal slot instruction", regs, inst);
 719}
 720
 721asmlinkage void do_exception_error(void)
 722{
 723        long ex;
 724
 725        ex = lookup_exception_vector();
 726        die_if_kernel("exception", current_pt_regs(), ex);
 727}
 728
 729void per_cpu_trap_init(void)
 730{
 731        extern void *vbr_base;
 732
 733        /* NOTE: The VBR value should be at P1
 734           (or P2, virtural "fixed" address space).
 735           It's definitely should not in physical address.  */
 736
 737        asm volatile("ldc       %0, vbr"
 738                     : /* no output */
 739                     : "r" (&vbr_base)
 740                     : "memory");
 741
 742        /* disable exception blocking now when the vbr has been setup */
 743        clear_bl_bit();
 744}
 745
 746void *set_exception_table_vec(unsigned int vec, void *handler)
 747{
 748        extern void *exception_handling_table[];
 749        void *old_handler;
 750
 751        old_handler = exception_handling_table[vec];
 752        exception_handling_table[vec] = handler;
 753        return old_handler;
 754}
 755
 756void __init trap_init(void)
 757{
 758        set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst);
 759        set_exception_table_vec(TRAP_ILLEGAL_SLOT_INST, do_illegal_slot_inst);
 760
 761#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_FPU) || \
 762    defined(CONFIG_SH_FPU_EMU)
 763        /*
 764         * For SH-4 lacking an FPU, treat floating point instructions as
 765         * reserved. They'll be handled in the math-emu case, or faulted on
 766         * otherwise.
 767         */
 768        set_exception_table_evt(0x800, do_reserved_inst);
 769        set_exception_table_evt(0x820, do_illegal_slot_inst);
 770#elif defined(CONFIG_SH_FPU)
 771        set_exception_table_evt(0x800, fpu_state_restore_trap_handler);
 772        set_exception_table_evt(0x820, fpu_state_restore_trap_handler);
 773#endif
 774
 775#ifdef CONFIG_CPU_SH2
 776        set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_trap_handler);
 777#endif
 778#ifdef CONFIG_CPU_SH2A
 779        set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error);
 780        set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error);
 781#ifdef CONFIG_SH_FPU
 782        set_exception_table_vec(TRAP_FPU_ERROR, fpu_error_trap_handler);
 783#endif
 784#endif
 785
 786#ifdef TRAP_UBC
 787        set_exception_table_vec(TRAP_UBC, breakpoint_trap_handler);
 788#endif
 789}
 790