linux/arch/sh/kernel/traps_64.c
<<
>>
Prefs
   1/*
   2 * arch/sh/kernel/traps_64.c
   3 *
   4 * Copyright (C) 2000, 2001  Paolo Alberelli
   5 * Copyright (C) 2003, 2004  Paul Mundt
   6 * Copyright (C) 2003, 2004  Richard Curnow
   7 *
   8 * This file is subject to the terms and conditions of the GNU General Public
   9 * License.  See the file "COPYING" in the main directory of this archive
  10 * for more details.
  11 */
  12#include <linux/sched.h>
  13#include <linux/kernel.h>
  14#include <linux/string.h>
  15#include <linux/errno.h>
  16#include <linux/ptrace.h>
  17#include <linux/timer.h>
  18#include <linux/mm.h>
  19#include <linux/smp.h>
  20#include <linux/init.h>
  21#include <linux/delay.h>
  22#include <linux/spinlock.h>
  23#include <linux/kallsyms.h>
  24#include <linux/interrupt.h>
  25#include <linux/sysctl.h>
  26#include <linux/module.h>
  27#include <linux/perf_event.h>
  28#include <asm/uaccess.h>
  29#include <asm/io.h>
  30#include <asm/alignment.h>
  31#include <asm/processor.h>
  32#include <asm/pgtable.h>
  33#include <asm/fpu.h>
  34
  35static int read_opcode(reg_size_t pc, insn_size_t *result_opcode, int from_user_mode)
  36{
  37        int get_user_error;
  38        unsigned long aligned_pc;
  39        insn_size_t opcode;
  40
  41        if ((pc & 3) == 1) {
  42                /* SHmedia */
  43                aligned_pc = pc & ~3;
  44                if (from_user_mode) {
  45                        if (!access_ok(VERIFY_READ, aligned_pc, sizeof(insn_size_t))) {
  46                                get_user_error = -EFAULT;
  47                        } else {
  48                                get_user_error = __get_user(opcode, (insn_size_t *)aligned_pc);
  49                                *result_opcode = opcode;
  50                        }
  51                        return get_user_error;
  52                } else {
  53                        /* If the fault was in the kernel, we can either read
  54                         * this directly, or if not, we fault.
  55                        */
  56                        *result_opcode = *(insn_size_t *)aligned_pc;
  57                        return 0;
  58                }
  59        } else if ((pc & 1) == 0) {
  60                /* SHcompact */
  61                /* TODO : provide handling for this.  We don't really support
  62                   user-mode SHcompact yet, and for a kernel fault, this would
  63                   have to come from a module built for SHcompact.  */
  64                return -EFAULT;
  65        } else {
  66                /* misaligned */
  67                return -EFAULT;
  68        }
  69}
  70
  71static int address_is_sign_extended(__u64 a)
  72{
  73        __u64 b;
  74#if (NEFF == 32)
  75        b = (__u64)(__s64)(__s32)(a & 0xffffffffUL);
  76        return (b == a) ? 1 : 0;
  77#else
  78#error "Sign extend check only works for NEFF==32"
  79#endif
  80}
  81
  82/* return -1 for fault, 0 for OK */
  83static int generate_and_check_address(struct pt_regs *regs,
  84                                      insn_size_t opcode,
  85                                      int displacement_not_indexed,
  86                                      int width_shift,
  87                                      __u64 *address)
  88{
  89        __u64 base_address, addr;
  90        int basereg;
  91
  92        switch (1 << width_shift) {
  93        case 1: inc_unaligned_byte_access(); break;
  94        case 2: inc_unaligned_word_access(); break;
  95        case 4: inc_unaligned_dword_access(); break;
  96        case 8: inc_unaligned_multi_access(); break;
  97        }
  98
  99        basereg = (opcode >> 20) & 0x3f;
 100        base_address = regs->regs[basereg];
 101        if (displacement_not_indexed) {
 102                __s64 displacement;
 103                displacement = (opcode >> 10) & 0x3ff;
 104                displacement = sign_extend64(displacement, 9);
 105                addr = (__u64)((__s64)base_address + (displacement << width_shift));
 106        } else {
 107                __u64 offset;
 108                int offsetreg;
 109                offsetreg = (opcode >> 10) & 0x3f;
 110                offset = regs->regs[offsetreg];
 111                addr = base_address + offset;
 112        }
 113
 114        /* Check sign extended */
 115        if (!address_is_sign_extended(addr))
 116                return -1;
 117
 118        /* Check accessible.  For misaligned access in the kernel, assume the
 119           address is always accessible (and if not, just fault when the
 120           load/store gets done.) */
 121        if (user_mode(regs)) {
 122                inc_unaligned_user_access();
 123
 124                if (addr >= TASK_SIZE)
 125                        return -1;
 126        } else
 127                inc_unaligned_kernel_access();
 128
 129        *address = addr;
 130
 131        perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, addr);
 132        unaligned_fixups_notify(current, opcode, regs);
 133
 134        return 0;
 135}
 136
 137static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result)
 138{
 139        unsigned short x;
 140        unsigned char *p, *q;
 141        p = (unsigned char *) (int) address;
 142        q = (unsigned char *) &x;
 143        q[0] = p[0];
 144        q[1] = p[1];
 145
 146        if (do_sign_extend) {
 147                *result = (__u64)(__s64) *(short *) &x;
 148        } else {
 149                *result = (__u64) x;
 150        }
 151}
 152
 153static void misaligned_kernel_word_store(__u64 address, __u64 value)
 154{
 155        unsigned short x;
 156        unsigned char *p, *q;
 157        p = (unsigned char *) (int) address;
 158        q = (unsigned char *) &x;
 159
 160        x = (__u16) value;
 161        p[0] = q[0];
 162        p[1] = q[1];
 163}
 164
 165static int misaligned_load(struct pt_regs *regs,
 166                           insn_size_t opcode,
 167                           int displacement_not_indexed,
 168                           int width_shift,
 169                           int do_sign_extend)
 170{
 171        /* Return -1 for a fault, 0 for OK */
 172        int error;
 173        int destreg;
 174        __u64 address;
 175
 176        error = generate_and_check_address(regs, opcode,
 177                        displacement_not_indexed, width_shift, &address);
 178        if (error < 0)
 179                return error;
 180
 181        destreg = (opcode >> 4) & 0x3f;
 182        if (user_mode(regs)) {
 183                __u64 buffer;
 184
 185                if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
 186                        return -1;
 187                }
 188
 189                if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
 190                        return -1; /* fault */
 191                }
 192                switch (width_shift) {
 193                case 1:
 194                        if (do_sign_extend) {
 195                                regs->regs[destreg] = (__u64)(__s64) *(__s16 *) &buffer;
 196                        } else {
 197                                regs->regs[destreg] = (__u64) *(__u16 *) &buffer;
 198                        }
 199                        break;
 200                case 2:
 201                        regs->regs[destreg] = (__u64)(__s64) *(__s32 *) &buffer;
 202                        break;
 203                case 3:
 204                        regs->regs[destreg] = buffer;
 205                        break;
 206                default:
 207                        printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
 208                                width_shift, (unsigned long) regs->pc);
 209                        break;
 210                }
 211        } else {
 212                /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
 213                __u64 lo, hi;
 214
 215                switch (width_shift) {
 216                case 1:
 217                        misaligned_kernel_word_load(address, do_sign_extend, &regs->regs[destreg]);
 218                        break;
 219                case 2:
 220                        asm ("ldlo.l %1, 0, %0" : "=r" (lo) : "r" (address));
 221                        asm ("ldhi.l %1, 3, %0" : "=r" (hi) : "r" (address));
 222                        regs->regs[destreg] = lo | hi;
 223                        break;
 224                case 3:
 225                        asm ("ldlo.q %1, 0, %0" : "=r" (lo) : "r" (address));
 226                        asm ("ldhi.q %1, 7, %0" : "=r" (hi) : "r" (address));
 227                        regs->regs[destreg] = lo | hi;
 228                        break;
 229
 230                default:
 231                        printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
 232                                width_shift, (unsigned long) regs->pc);
 233                        break;
 234                }
 235        }
 236
 237        return 0;
 238}
 239
 240static int misaligned_store(struct pt_regs *regs,
 241                            insn_size_t opcode,
 242                            int displacement_not_indexed,
 243                            int width_shift)
 244{
 245        /* Return -1 for a fault, 0 for OK */
 246        int error;
 247        int srcreg;
 248        __u64 address;
 249
 250        error = generate_and_check_address(regs, opcode,
 251                        displacement_not_indexed, width_shift, &address);
 252        if (error < 0)
 253                return error;
 254
 255        srcreg = (opcode >> 4) & 0x3f;
 256        if (user_mode(regs)) {
 257                __u64 buffer;
 258
 259                if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
 260                        return -1;
 261                }
 262
 263                switch (width_shift) {
 264                case 1:
 265                        *(__u16 *) &buffer = (__u16) regs->regs[srcreg];
 266                        break;
 267                case 2:
 268                        *(__u32 *) &buffer = (__u32) regs->regs[srcreg];
 269                        break;
 270                case 3:
 271                        buffer = regs->regs[srcreg];
 272                        break;
 273                default:
 274                        printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
 275                                width_shift, (unsigned long) regs->pc);
 276                        break;
 277                }
 278
 279                if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
 280                        return -1; /* fault */
 281                }
 282        } else {
 283                /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
 284                __u64 val = regs->regs[srcreg];
 285
 286                switch (width_shift) {
 287                case 1:
 288                        misaligned_kernel_word_store(address, val);
 289                        break;
 290                case 2:
 291                        asm ("stlo.l %1, 0, %0" : : "r" (val), "r" (address));
 292                        asm ("sthi.l %1, 3, %0" : : "r" (val), "r" (address));
 293                        break;
 294                case 3:
 295                        asm ("stlo.q %1, 0, %0" : : "r" (val), "r" (address));
 296                        asm ("sthi.q %1, 7, %0" : : "r" (val), "r" (address));
 297                        break;
 298
 299                default:
 300                        printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
 301                                width_shift, (unsigned long) regs->pc);
 302                        break;
 303                }
 304        }
 305
 306        return 0;
 307}
 308
 309/* Never need to fix up misaligned FPU accesses within the kernel since that's a real
 310   error. */
 311static int misaligned_fpu_load(struct pt_regs *regs,
 312                           insn_size_t opcode,
 313                           int displacement_not_indexed,
 314                           int width_shift,
 315                           int do_paired_load)
 316{
 317        /* Return -1 for a fault, 0 for OK */
 318        int error;
 319        int destreg;
 320        __u64 address;
 321
 322        error = generate_and_check_address(regs, opcode,
 323                        displacement_not_indexed, width_shift, &address);
 324        if (error < 0)
 325                return error;
 326
 327        destreg = (opcode >> 4) & 0x3f;
 328        if (user_mode(regs)) {
 329                __u64 buffer;
 330                __u32 buflo, bufhi;
 331
 332                if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
 333                        return -1;
 334                }
 335
 336                if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
 337                        return -1; /* fault */
 338                }
 339                /* 'current' may be the current owner of the FPU state, so
 340                   context switch the registers into memory so they can be
 341                   indexed by register number. */
 342                if (last_task_used_math == current) {
 343                        enable_fpu();
 344                        save_fpu(current);
 345                        disable_fpu();
 346                        last_task_used_math = NULL;
 347                        regs->sr |= SR_FD;
 348                }
 349
 350                buflo = *(__u32*) &buffer;
 351                bufhi = *(1 + (__u32*) &buffer);
 352
 353                switch (width_shift) {
 354                case 2:
 355                        current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
 356                        break;
 357                case 3:
 358                        if (do_paired_load) {
 359                                current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
 360                                current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
 361                        } else {
 362#if defined(CONFIG_CPU_LITTLE_ENDIAN)
 363                                current->thread.xstate->hardfpu.fp_regs[destreg] = bufhi;
 364                                current->thread.xstate->hardfpu.fp_regs[destreg+1] = buflo;
 365#else
 366                                current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
 367                                current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
 368#endif
 369                        }
 370                        break;
 371                default:
 372                        printk("Unexpected width_shift %d in misaligned_fpu_load, PC=%08lx\n",
 373                                width_shift, (unsigned long) regs->pc);
 374                        break;
 375                }
 376                return 0;
 377        } else {
 378                die ("Misaligned FPU load inside kernel", regs, 0);
 379                return -1;
 380        }
 381}
 382
 383static int misaligned_fpu_store(struct pt_regs *regs,
 384                           insn_size_t opcode,
 385                           int displacement_not_indexed,
 386                           int width_shift,
 387                           int do_paired_load)
 388{
 389        /* Return -1 for a fault, 0 for OK */
 390        int error;
 391        int srcreg;
 392        __u64 address;
 393
 394        error = generate_and_check_address(regs, opcode,
 395                        displacement_not_indexed, width_shift, &address);
 396        if (error < 0)
 397                return error;
 398
 399        srcreg = (opcode >> 4) & 0x3f;
 400        if (user_mode(regs)) {
 401                __u64 buffer;
 402                /* Initialise these to NaNs. */
 403                __u32 buflo=0xffffffffUL, bufhi=0xffffffffUL;
 404
 405                if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
 406                        return -1;
 407                }
 408
 409                /* 'current' may be the current owner of the FPU state, so
 410                   context switch the registers into memory so they can be
 411                   indexed by register number. */
 412                if (last_task_used_math == current) {
 413                        enable_fpu();
 414                        save_fpu(current);
 415                        disable_fpu();
 416                        last_task_used_math = NULL;
 417                        regs->sr |= SR_FD;
 418                }
 419
 420                switch (width_shift) {
 421                case 2:
 422                        buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
 423                        break;
 424                case 3:
 425                        if (do_paired_load) {
 426                                buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
 427                                bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
 428                        } else {
 429#if defined(CONFIG_CPU_LITTLE_ENDIAN)
 430                                bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg];
 431                                buflo = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
 432#else
 433                                buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
 434                                bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
 435#endif
 436                        }
 437                        break;
 438                default:
 439                        printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n",
 440                                width_shift, (unsigned long) regs->pc);
 441                        break;
 442                }
 443
 444                *(__u32*) &buffer = buflo;
 445                *(1 + (__u32*) &buffer) = bufhi;
 446                if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
 447                        return -1; /* fault */
 448                }
 449                return 0;
 450        } else {
 451                die ("Misaligned FPU load inside kernel", regs, 0);
 452                return -1;
 453        }
 454}
 455
 456static int misaligned_fixup(struct pt_regs *regs)
 457{
 458        insn_size_t opcode;
 459        int error;
 460        int major, minor;
 461        unsigned int user_action;
 462
 463        user_action = unaligned_user_action();
 464        if (!(user_action & UM_FIXUP))
 465                return -1;
 466
 467        error = read_opcode(regs->pc, &opcode, user_mode(regs));
 468        if (error < 0) {
 469                return error;
 470        }
 471        major = (opcode >> 26) & 0x3f;
 472        minor = (opcode >> 16) & 0xf;
 473
 474        switch (major) {
 475                case (0x84>>2): /* LD.W */
 476                        error = misaligned_load(regs, opcode, 1, 1, 1);
 477                        break;
 478                case (0xb0>>2): /* LD.UW */
 479                        error = misaligned_load(regs, opcode, 1, 1, 0);
 480                        break;
 481                case (0x88>>2): /* LD.L */
 482                        error = misaligned_load(regs, opcode, 1, 2, 1);
 483                        break;
 484                case (0x8c>>2): /* LD.Q */
 485                        error = misaligned_load(regs, opcode, 1, 3, 0);
 486                        break;
 487
 488                case (0xa4>>2): /* ST.W */
 489                        error = misaligned_store(regs, opcode, 1, 1);
 490                        break;
 491                case (0xa8>>2): /* ST.L */
 492                        error = misaligned_store(regs, opcode, 1, 2);
 493                        break;
 494                case (0xac>>2): /* ST.Q */
 495                        error = misaligned_store(regs, opcode, 1, 3);
 496                        break;
 497
 498                case (0x40>>2): /* indexed loads */
 499                        switch (minor) {
 500                                case 0x1: /* LDX.W */
 501                                        error = misaligned_load(regs, opcode, 0, 1, 1);
 502                                        break;
 503                                case 0x5: /* LDX.UW */
 504                                        error = misaligned_load(regs, opcode, 0, 1, 0);
 505                                        break;
 506                                case 0x2: /* LDX.L */
 507                                        error = misaligned_load(regs, opcode, 0, 2, 1);
 508                                        break;
 509                                case 0x3: /* LDX.Q */
 510                                        error = misaligned_load(regs, opcode, 0, 3, 0);
 511                                        break;
 512                                default:
 513                                        error = -1;
 514                                        break;
 515                        }
 516                        break;
 517
 518                case (0x60>>2): /* indexed stores */
 519                        switch (minor) {
 520                                case 0x1: /* STX.W */
 521                                        error = misaligned_store(regs, opcode, 0, 1);
 522                                        break;
 523                                case 0x2: /* STX.L */
 524                                        error = misaligned_store(regs, opcode, 0, 2);
 525                                        break;
 526                                case 0x3: /* STX.Q */
 527                                        error = misaligned_store(regs, opcode, 0, 3);
 528                                        break;
 529                                default:
 530                                        error = -1;
 531                                        break;
 532                        }
 533                        break;
 534
 535                case (0x94>>2): /* FLD.S */
 536                        error = misaligned_fpu_load(regs, opcode, 1, 2, 0);
 537                        break;
 538                case (0x98>>2): /* FLD.P */
 539                        error = misaligned_fpu_load(regs, opcode, 1, 3, 1);
 540                        break;
 541                case (0x9c>>2): /* FLD.D */
 542                        error = misaligned_fpu_load(regs, opcode, 1, 3, 0);
 543                        break;
 544                case (0x1c>>2): /* floating indexed loads */
 545                        switch (minor) {
 546                        case 0x8: /* FLDX.S */
 547                                error = misaligned_fpu_load(regs, opcode, 0, 2, 0);
 548                                break;
 549                        case 0xd: /* FLDX.P */
 550                                error = misaligned_fpu_load(regs, opcode, 0, 3, 1);
 551                                break;
 552                        case 0x9: /* FLDX.D */
 553                                error = misaligned_fpu_load(regs, opcode, 0, 3, 0);
 554                                break;
 555                        default:
 556                                error = -1;
 557                                break;
 558                        }
 559                        break;
 560                case (0xb4>>2): /* FLD.S */
 561                        error = misaligned_fpu_store(regs, opcode, 1, 2, 0);
 562                        break;
 563                case (0xb8>>2): /* FLD.P */
 564                        error = misaligned_fpu_store(regs, opcode, 1, 3, 1);
 565                        break;
 566                case (0xbc>>2): /* FLD.D */
 567                        error = misaligned_fpu_store(regs, opcode, 1, 3, 0);
 568                        break;
 569                case (0x3c>>2): /* floating indexed stores */
 570                        switch (minor) {
 571                        case 0x8: /* FSTX.S */
 572                                error = misaligned_fpu_store(regs, opcode, 0, 2, 0);
 573                                break;
 574                        case 0xd: /* FSTX.P */
 575                                error = misaligned_fpu_store(regs, opcode, 0, 3, 1);
 576                                break;
 577                        case 0x9: /* FSTX.D */
 578                                error = misaligned_fpu_store(regs, opcode, 0, 3, 0);
 579                                break;
 580                        default:
 581                                error = -1;
 582                                break;
 583                        }
 584                        break;
 585
 586                default:
 587                        /* Fault */
 588                        error = -1;
 589                        break;
 590        }
 591
 592        if (error < 0) {
 593                return error;
 594        } else {
 595                regs->pc += 4; /* Skip the instruction that's just been emulated */
 596                return 0;
 597        }
 598}
 599
 600static void do_unhandled_exception(int signr, char *str, unsigned long error,
 601                                   struct pt_regs *regs)
 602{
 603        if (user_mode(regs))
 604                force_sig(signr, current);
 605
 606        die_if_no_fixup(str, regs, error);
 607}
 608
 609#define DO_ERROR(signr, str, name) \
 610asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \
 611{ \
 612        do_unhandled_exception(signr, str, error_code, regs); \
 613}
 614
 615DO_ERROR(SIGILL,  "illegal slot instruction", illegal_slot_inst)
 616DO_ERROR(SIGSEGV, "address error (exec)", address_error_exec)
 617
 618#if defined(CONFIG_SH64_ID2815_WORKAROUND)
 619
 620#define OPCODE_INVALID      0
 621#define OPCODE_USER_VALID   1
 622#define OPCODE_PRIV_VALID   2
 623
 624/* getcon/putcon - requires checking which control register is referenced. */
 625#define OPCODE_CTRL_REG     3
 626
 627/* Table of valid opcodes for SHmedia mode.
 628   Form a 10-bit value by concatenating the major/minor opcodes i.e.
 629   opcode[31:26,20:16].  The 6 MSBs of this value index into the following
 630   array.  The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to
 631   LSBs==4'b0000 etc). */
 632static unsigned long shmedia_opcode_table[64] = {
 633        0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015,
 634        0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000,
 635        0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000,
 636        0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000,
 637        0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
 638        0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
 639        0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
 640        0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000
 641};
 642
 643/* Workaround SH5-101 cut2 silicon defect #2815 :
 644   in some situations, inter-mode branches from SHcompact -> SHmedia
 645   which should take ITLBMISS or EXECPROT exceptions at the target
 646   falsely take RESINST at the target instead. */
 647void do_reserved_inst(unsigned long error_code, struct pt_regs *regs)
 648{
 649        insn_size_t opcode = 0x6ff4fff0; /* guaranteed reserved opcode */
 650        unsigned long pc, aligned_pc;
 651        unsigned long index, shift;
 652        unsigned long major, minor, combined;
 653        unsigned long reserved_field;
 654        int opcode_state;
 655        int get_user_error;
 656        int signr = SIGILL;
 657        char *exception_name = "reserved_instruction";
 658
 659        pc = regs->pc;
 660
 661        /* SHcompact is not handled */
 662        if (unlikely((pc & 3) == 0))
 663                goto out;
 664
 665        /* SHmedia : check for defect.  This requires executable vmas
 666           to be readable too. */
 667        aligned_pc = pc & ~3;
 668        if (!access_ok(VERIFY_READ, aligned_pc, sizeof(insn_size_t)))
 669                get_user_error = -EFAULT;
 670        else
 671                get_user_error = __get_user(opcode, (insn_size_t *)aligned_pc);
 672
 673        if (get_user_error < 0) {
 674                /*
 675                 * Error trying to read opcode.  This typically means a
 676                 * real fault, not a RESINST any more.  So change the
 677                 * codes.
 678                 */
 679                exception_name = "address error (exec)";
 680                signr = SIGSEGV;
 681                goto out;
 682        }
 683
 684        /* These bits are currently reserved as zero in all valid opcodes */
 685        reserved_field = opcode & 0xf;
 686        if (unlikely(reserved_field))
 687                goto out;       /* invalid opcode */
 688
 689        major = (opcode >> 26) & 0x3f;
 690        minor = (opcode >> 16) & 0xf;
 691        combined = (major << 4) | minor;
 692        index = major;
 693        shift = minor << 1;
 694        opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3;
 695        switch (opcode_state) {
 696        case OPCODE_INVALID:
 697                /* Trap. */
 698                break;
 699        case OPCODE_USER_VALID:
 700                /*
 701                 * Restart the instruction: the branch to the instruction
 702                 * will now be from an RTE not from SHcompact so the
 703                 * silicon defect won't be triggered.
 704                 */
 705                return;
 706        case OPCODE_PRIV_VALID:
 707                if (!user_mode(regs)) {
 708                        /*
 709                         * Should only ever get here if a module has
 710                         * SHcompact code inside it. If so, the same fix
 711                         * up is needed.
 712                         */
 713                        return; /* same reason */
 714                }
 715
 716                /*
 717                 * Otherwise, user mode trying to execute a privileged
 718                 * instruction - fall through to trap.
 719                 */
 720                break;
 721        case OPCODE_CTRL_REG:
 722                /* If in privileged mode, return as above. */
 723                if (!user_mode(regs))
 724                        return;
 725
 726                /* In user mode ... */
 727                if (combined == 0x9f) { /* GETCON */
 728                        unsigned long regno = (opcode >> 20) & 0x3f;
 729
 730                        if (regno >= 62)
 731                                return;
 732
 733                        /* reserved/privileged control register => trap */
 734                } else if (combined == 0x1bf) { /* PUTCON */
 735                        unsigned long regno = (opcode >> 4) & 0x3f;
 736
 737                        if (regno >= 62)
 738                                return;
 739
 740                        /* reserved/privileged control register => trap */
 741                }
 742
 743                break;
 744        default:
 745                /* Fall through to trap. */
 746                break;
 747        }
 748
 749out:
 750        do_unhandled_exception(signr, exception_name, error_code, regs);
 751}
 752
 753#else /* CONFIG_SH64_ID2815_WORKAROUND */
 754
 755/* If the workaround isn't needed, this is just a straightforward reserved
 756   instruction */
 757DO_ERROR(SIGILL, "reserved instruction", reserved_inst)
 758
 759#endif /* CONFIG_SH64_ID2815_WORKAROUND */
 760
 761/* Called with interrupts disabled */
 762asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs)
 763{
 764        die_if_kernel("exception", regs, ex);
 765}
 766
 767asmlinkage int do_unknown_trapa(unsigned long scId, struct pt_regs *regs)
 768{
 769        /* Syscall debug */
 770        printk("System call ID error: [0x1#args:8 #syscall:16  0x%lx]\n", scId);
 771
 772        die_if_kernel("unknown trapa", regs, scId);
 773
 774        return -ENOSYS;
 775}
 776
 777/* Implement misaligned load/store handling for kernel (and optionally for user
 778   mode too).  Limitation : only SHmedia mode code is handled - there is no
 779   handling at all for misaligned accesses occurring in SHcompact code yet. */
 780
 781asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs)
 782{
 783        if (misaligned_fixup(regs) < 0)
 784                do_unhandled_exception(SIGSEGV, "address error(load)",
 785                                       error_code, regs);
 786}
 787
 788asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs)
 789{
 790        if (misaligned_fixup(regs) < 0)
 791                do_unhandled_exception(SIGSEGV, "address error(store)",
 792                                error_code, regs);
 793}
 794
 795asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs)
 796{
 797        u64 peek_real_address_q(u64 addr);
 798        u64 poke_real_address_q(u64 addr, u64 val);
 799        unsigned long long DM_EXP_CAUSE_PHY = 0x0c100010;
 800        unsigned long long exp_cause;
 801        /* It's not worth ioremapping the debug module registers for the amount
 802           of access we make to them - just go direct to their physical
 803           addresses. */
 804        exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY);
 805        if (exp_cause & ~4)
 806                printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n",
 807                        (unsigned long)(exp_cause & 0xffffffff));
 808        show_state();
 809        /* Clear all DEBUGINT causes */
 810        poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0);
 811}
 812
 813void per_cpu_trap_init(void)
 814{
 815        /* Nothing to do for now, VBR initialization later. */
 816}
 817