linux/arch/sh/kernel/traps_64.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * arch/sh/kernel/traps_64.c
   4 *
   5 * Copyright (C) 2000, 2001  Paolo Alberelli
   6 * Copyright (C) 2003, 2004  Paul Mundt
   7 * Copyright (C) 2003, 2004  Richard Curnow
   8 */
   9#include <linux/sched.h>
  10#include <linux/sched/debug.h>
  11#include <linux/kernel.h>
  12#include <linux/string.h>
  13#include <linux/errno.h>
  14#include <linux/ptrace.h>
  15#include <linux/timer.h>
  16#include <linux/mm.h>
  17#include <linux/smp.h>
  18#include <linux/init.h>
  19#include <linux/delay.h>
  20#include <linux/spinlock.h>
  21#include <linux/kallsyms.h>
  22#include <linux/interrupt.h>
  23#include <linux/sysctl.h>
  24#include <linux/module.h>
  25#include <linux/perf_event.h>
  26#include <linux/uaccess.h>
  27#include <asm/io.h>
  28#include <asm/alignment.h>
  29#include <asm/processor.h>
  30#include <asm/pgtable.h>
  31#include <asm/fpu.h>
  32
  33static int read_opcode(reg_size_t pc, insn_size_t *result_opcode, int from_user_mode)
  34{
  35        int get_user_error;
  36        unsigned long aligned_pc;
  37        insn_size_t opcode;
  38
  39        if ((pc & 3) == 1) {
  40                /* SHmedia */
  41                aligned_pc = pc & ~3;
  42                if (from_user_mode) {
  43                        if (!access_ok(aligned_pc, sizeof(insn_size_t))) {
  44                                get_user_error = -EFAULT;
  45                        } else {
  46                                get_user_error = __get_user(opcode, (insn_size_t *)aligned_pc);
  47                                *result_opcode = opcode;
  48                        }
  49                        return get_user_error;
  50                } else {
  51                        /* If the fault was in the kernel, we can either read
  52                         * this directly, or if not, we fault.
  53                        */
  54                        *result_opcode = *(insn_size_t *)aligned_pc;
  55                        return 0;
  56                }
  57        } else if ((pc & 1) == 0) {
  58                /* SHcompact */
  59                /* TODO : provide handling for this.  We don't really support
  60                   user-mode SHcompact yet, and for a kernel fault, this would
  61                   have to come from a module built for SHcompact.  */
  62                return -EFAULT;
  63        } else {
  64                /* misaligned */
  65                return -EFAULT;
  66        }
  67}
  68
  69static int address_is_sign_extended(__u64 a)
  70{
  71        __u64 b;
  72#if (NEFF == 32)
  73        b = (__u64)(__s64)(__s32)(a & 0xffffffffUL);
  74        return (b == a) ? 1 : 0;
  75#else
  76#error "Sign extend check only works for NEFF==32"
  77#endif
  78}
  79
  80/* return -1 for fault, 0 for OK */
  81static int generate_and_check_address(struct pt_regs *regs,
  82                                      insn_size_t opcode,
  83                                      int displacement_not_indexed,
  84                                      int width_shift,
  85                                      __u64 *address)
  86{
  87        __u64 base_address, addr;
  88        int basereg;
  89
  90        switch (1 << width_shift) {
  91        case 1: inc_unaligned_byte_access(); break;
  92        case 2: inc_unaligned_word_access(); break;
  93        case 4: inc_unaligned_dword_access(); break;
  94        case 8: inc_unaligned_multi_access(); break;
  95        }
  96
  97        basereg = (opcode >> 20) & 0x3f;
  98        base_address = regs->regs[basereg];
  99        if (displacement_not_indexed) {
 100                __s64 displacement;
 101                displacement = (opcode >> 10) & 0x3ff;
 102                displacement = sign_extend64(displacement, 9);
 103                addr = (__u64)((__s64)base_address + (displacement << width_shift));
 104        } else {
 105                __u64 offset;
 106                int offsetreg;
 107                offsetreg = (opcode >> 10) & 0x3f;
 108                offset = regs->regs[offsetreg];
 109                addr = base_address + offset;
 110        }
 111
 112        /* Check sign extended */
 113        if (!address_is_sign_extended(addr))
 114                return -1;
 115
 116        /* Check accessible.  For misaligned access in the kernel, assume the
 117           address is always accessible (and if not, just fault when the
 118           load/store gets done.) */
 119        if (user_mode(regs)) {
 120                inc_unaligned_user_access();
 121
 122                if (addr >= TASK_SIZE)
 123                        return -1;
 124        } else
 125                inc_unaligned_kernel_access();
 126
 127        *address = addr;
 128
 129        perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, addr);
 130        unaligned_fixups_notify(current, opcode, regs);
 131
 132        return 0;
 133}
 134
 135static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result)
 136{
 137        unsigned short x;
 138        unsigned char *p, *q;
 139        p = (unsigned char *) (int) address;
 140        q = (unsigned char *) &x;
 141        q[0] = p[0];
 142        q[1] = p[1];
 143
 144        if (do_sign_extend) {
 145                *result = (__u64)(__s64) *(short *) &x;
 146        } else {
 147                *result = (__u64) x;
 148        }
 149}
 150
 151static void misaligned_kernel_word_store(__u64 address, __u64 value)
 152{
 153        unsigned short x;
 154        unsigned char *p, *q;
 155        p = (unsigned char *) (int) address;
 156        q = (unsigned char *) &x;
 157
 158        x = (__u16) value;
 159        p[0] = q[0];
 160        p[1] = q[1];
 161}
 162
 163static int misaligned_load(struct pt_regs *regs,
 164                           insn_size_t opcode,
 165                           int displacement_not_indexed,
 166                           int width_shift,
 167                           int do_sign_extend)
 168{
 169        /* Return -1 for a fault, 0 for OK */
 170        int error;
 171        int destreg;
 172        __u64 address;
 173
 174        error = generate_and_check_address(regs, opcode,
 175                        displacement_not_indexed, width_shift, &address);
 176        if (error < 0)
 177                return error;
 178
 179        destreg = (opcode >> 4) & 0x3f;
 180        if (user_mode(regs)) {
 181                __u64 buffer;
 182
 183                if (!access_ok((unsigned long) address, 1UL<<width_shift)) {
 184                        return -1;
 185                }
 186
 187                if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
 188                        return -1; /* fault */
 189                }
 190                switch (width_shift) {
 191                case 1:
 192                        if (do_sign_extend) {
 193                                regs->regs[destreg] = (__u64)(__s64) *(__s16 *) &buffer;
 194                        } else {
 195                                regs->regs[destreg] = (__u64) *(__u16 *) &buffer;
 196                        }
 197                        break;
 198                case 2:
 199                        regs->regs[destreg] = (__u64)(__s64) *(__s32 *) &buffer;
 200                        break;
 201                case 3:
 202                        regs->regs[destreg] = buffer;
 203                        break;
 204                default:
 205                        printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
 206                                width_shift, (unsigned long) regs->pc);
 207                        break;
 208                }
 209        } else {
 210                /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
 211                __u64 lo, hi;
 212
 213                switch (width_shift) {
 214                case 1:
 215                        misaligned_kernel_word_load(address, do_sign_extend, &regs->regs[destreg]);
 216                        break;
 217                case 2:
 218                        asm ("ldlo.l %1, 0, %0" : "=r" (lo) : "r" (address));
 219                        asm ("ldhi.l %1, 3, %0" : "=r" (hi) : "r" (address));
 220                        regs->regs[destreg] = lo | hi;
 221                        break;
 222                case 3:
 223                        asm ("ldlo.q %1, 0, %0" : "=r" (lo) : "r" (address));
 224                        asm ("ldhi.q %1, 7, %0" : "=r" (hi) : "r" (address));
 225                        regs->regs[destreg] = lo | hi;
 226                        break;
 227
 228                default:
 229                        printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
 230                                width_shift, (unsigned long) regs->pc);
 231                        break;
 232                }
 233        }
 234
 235        return 0;
 236}
 237
 238static int misaligned_store(struct pt_regs *regs,
 239                            insn_size_t opcode,
 240                            int displacement_not_indexed,
 241                            int width_shift)
 242{
 243        /* Return -1 for a fault, 0 for OK */
 244        int error;
 245        int srcreg;
 246        __u64 address;
 247
 248        error = generate_and_check_address(regs, opcode,
 249                        displacement_not_indexed, width_shift, &address);
 250        if (error < 0)
 251                return error;
 252
 253        srcreg = (opcode >> 4) & 0x3f;
 254        if (user_mode(regs)) {
 255                __u64 buffer;
 256
 257                if (!access_ok((unsigned long) address, 1UL<<width_shift)) {
 258                        return -1;
 259                }
 260
 261                switch (width_shift) {
 262                case 1:
 263                        *(__u16 *) &buffer = (__u16) regs->regs[srcreg];
 264                        break;
 265                case 2:
 266                        *(__u32 *) &buffer = (__u32) regs->regs[srcreg];
 267                        break;
 268                case 3:
 269                        buffer = regs->regs[srcreg];
 270                        break;
 271                default:
 272                        printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
 273                                width_shift, (unsigned long) regs->pc);
 274                        break;
 275                }
 276
 277                if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
 278                        return -1; /* fault */
 279                }
 280        } else {
 281                /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
 282                __u64 val = regs->regs[srcreg];
 283
 284                switch (width_shift) {
 285                case 1:
 286                        misaligned_kernel_word_store(address, val);
 287                        break;
 288                case 2:
 289                        asm ("stlo.l %1, 0, %0" : : "r" (val), "r" (address));
 290                        asm ("sthi.l %1, 3, %0" : : "r" (val), "r" (address));
 291                        break;
 292                case 3:
 293                        asm ("stlo.q %1, 0, %0" : : "r" (val), "r" (address));
 294                        asm ("sthi.q %1, 7, %0" : : "r" (val), "r" (address));
 295                        break;
 296
 297                default:
 298                        printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
 299                                width_shift, (unsigned long) regs->pc);
 300                        break;
 301                }
 302        }
 303
 304        return 0;
 305}
 306
 307/* Never need to fix up misaligned FPU accesses within the kernel since that's a real
 308   error. */
 309static int misaligned_fpu_load(struct pt_regs *regs,
 310                           insn_size_t opcode,
 311                           int displacement_not_indexed,
 312                           int width_shift,
 313                           int do_paired_load)
 314{
 315        /* Return -1 for a fault, 0 for OK */
 316        int error;
 317        int destreg;
 318        __u64 address;
 319
 320        error = generate_and_check_address(regs, opcode,
 321                        displacement_not_indexed, width_shift, &address);
 322        if (error < 0)
 323                return error;
 324
 325        destreg = (opcode >> 4) & 0x3f;
 326        if (user_mode(regs)) {
 327                __u64 buffer;
 328                __u32 buflo, bufhi;
 329
 330                if (!access_ok((unsigned long) address, 1UL<<width_shift)) {
 331                        return -1;
 332                }
 333
 334                if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
 335                        return -1; /* fault */
 336                }
 337                /* 'current' may be the current owner of the FPU state, so
 338                   context switch the registers into memory so they can be
 339                   indexed by register number. */
 340                if (last_task_used_math == current) {
 341                        enable_fpu();
 342                        save_fpu(current);
 343                        disable_fpu();
 344                        last_task_used_math = NULL;
 345                        regs->sr |= SR_FD;
 346                }
 347
 348                buflo = *(__u32*) &buffer;
 349                bufhi = *(1 + (__u32*) &buffer);
 350
 351                switch (width_shift) {
 352                case 2:
 353                        current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
 354                        break;
 355                case 3:
 356                        if (do_paired_load) {
 357                                current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
 358                                current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
 359                        } else {
 360#if defined(CONFIG_CPU_LITTLE_ENDIAN)
 361                                current->thread.xstate->hardfpu.fp_regs[destreg] = bufhi;
 362                                current->thread.xstate->hardfpu.fp_regs[destreg+1] = buflo;
 363#else
 364                                current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
 365                                current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
 366#endif
 367                        }
 368                        break;
 369                default:
 370                        printk("Unexpected width_shift %d in misaligned_fpu_load, PC=%08lx\n",
 371                                width_shift, (unsigned long) regs->pc);
 372                        break;
 373                }
 374                return 0;
 375        } else {
 376                die ("Misaligned FPU load inside kernel", regs, 0);
 377                return -1;
 378        }
 379}
 380
 381static int misaligned_fpu_store(struct pt_regs *regs,
 382                           insn_size_t opcode,
 383                           int displacement_not_indexed,
 384                           int width_shift,
 385                           int do_paired_load)
 386{
 387        /* Return -1 for a fault, 0 for OK */
 388        int error;
 389        int srcreg;
 390        __u64 address;
 391
 392        error = generate_and_check_address(regs, opcode,
 393                        displacement_not_indexed, width_shift, &address);
 394        if (error < 0)
 395                return error;
 396
 397        srcreg = (opcode >> 4) & 0x3f;
 398        if (user_mode(regs)) {
 399                __u64 buffer;
 400                /* Initialise these to NaNs. */
 401                __u32 buflo=0xffffffffUL, bufhi=0xffffffffUL;
 402
 403                if (!access_ok((unsigned long) address, 1UL<<width_shift)) {
 404                        return -1;
 405                }
 406
 407                /* 'current' may be the current owner of the FPU state, so
 408                   context switch the registers into memory so they can be
 409                   indexed by register number. */
 410                if (last_task_used_math == current) {
 411                        enable_fpu();
 412                        save_fpu(current);
 413                        disable_fpu();
 414                        last_task_used_math = NULL;
 415                        regs->sr |= SR_FD;
 416                }
 417
 418                switch (width_shift) {
 419                case 2:
 420                        buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
 421                        break;
 422                case 3:
 423                        if (do_paired_load) {
 424                                buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
 425                                bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
 426                        } else {
 427#if defined(CONFIG_CPU_LITTLE_ENDIAN)
 428                                bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg];
 429                                buflo = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
 430#else
 431                                buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
 432                                bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
 433#endif
 434                        }
 435                        break;
 436                default:
 437                        printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n",
 438                                width_shift, (unsigned long) regs->pc);
 439                        break;
 440                }
 441
 442                *(__u32*) &buffer = buflo;
 443                *(1 + (__u32*) &buffer) = bufhi;
 444                if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
 445                        return -1; /* fault */
 446                }
 447                return 0;
 448        } else {
 449                die ("Misaligned FPU load inside kernel", regs, 0);
 450                return -1;
 451        }
 452}
 453
 454static int misaligned_fixup(struct pt_regs *regs)
 455{
 456        insn_size_t opcode;
 457        int error;
 458        int major, minor;
 459        unsigned int user_action;
 460
 461        user_action = unaligned_user_action();
 462        if (!(user_action & UM_FIXUP))
 463                return -1;
 464
 465        error = read_opcode(regs->pc, &opcode, user_mode(regs));
 466        if (error < 0) {
 467                return error;
 468        }
 469        major = (opcode >> 26) & 0x3f;
 470        minor = (opcode >> 16) & 0xf;
 471
 472        switch (major) {
 473                case (0x84>>2): /* LD.W */
 474                        error = misaligned_load(regs, opcode, 1, 1, 1);
 475                        break;
 476                case (0xb0>>2): /* LD.UW */
 477                        error = misaligned_load(regs, opcode, 1, 1, 0);
 478                        break;
 479                case (0x88>>2): /* LD.L */
 480                        error = misaligned_load(regs, opcode, 1, 2, 1);
 481                        break;
 482                case (0x8c>>2): /* LD.Q */
 483                        error = misaligned_load(regs, opcode, 1, 3, 0);
 484                        break;
 485
 486                case (0xa4>>2): /* ST.W */
 487                        error = misaligned_store(regs, opcode, 1, 1);
 488                        break;
 489                case (0xa8>>2): /* ST.L */
 490                        error = misaligned_store(regs, opcode, 1, 2);
 491                        break;
 492                case (0xac>>2): /* ST.Q */
 493                        error = misaligned_store(regs, opcode, 1, 3);
 494                        break;
 495
 496                case (0x40>>2): /* indexed loads */
 497                        switch (minor) {
 498                                case 0x1: /* LDX.W */
 499                                        error = misaligned_load(regs, opcode, 0, 1, 1);
 500                                        break;
 501                                case 0x5: /* LDX.UW */
 502                                        error = misaligned_load(regs, opcode, 0, 1, 0);
 503                                        break;
 504                                case 0x2: /* LDX.L */
 505                                        error = misaligned_load(regs, opcode, 0, 2, 1);
 506                                        break;
 507                                case 0x3: /* LDX.Q */
 508                                        error = misaligned_load(regs, opcode, 0, 3, 0);
 509                                        break;
 510                                default:
 511                                        error = -1;
 512                                        break;
 513                        }
 514                        break;
 515
 516                case (0x60>>2): /* indexed stores */
 517                        switch (minor) {
 518                                case 0x1: /* STX.W */
 519                                        error = misaligned_store(regs, opcode, 0, 1);
 520                                        break;
 521                                case 0x2: /* STX.L */
 522                                        error = misaligned_store(regs, opcode, 0, 2);
 523                                        break;
 524                                case 0x3: /* STX.Q */
 525                                        error = misaligned_store(regs, opcode, 0, 3);
 526                                        break;
 527                                default:
 528                                        error = -1;
 529                                        break;
 530                        }
 531                        break;
 532
 533                case (0x94>>2): /* FLD.S */
 534                        error = misaligned_fpu_load(regs, opcode, 1, 2, 0);
 535                        break;
 536                case (0x98>>2): /* FLD.P */
 537                        error = misaligned_fpu_load(regs, opcode, 1, 3, 1);
 538                        break;
 539                case (0x9c>>2): /* FLD.D */
 540                        error = misaligned_fpu_load(regs, opcode, 1, 3, 0);
 541                        break;
 542                case (0x1c>>2): /* floating indexed loads */
 543                        switch (minor) {
 544                        case 0x8: /* FLDX.S */
 545                                error = misaligned_fpu_load(regs, opcode, 0, 2, 0);
 546                                break;
 547                        case 0xd: /* FLDX.P */
 548                                error = misaligned_fpu_load(regs, opcode, 0, 3, 1);
 549                                break;
 550                        case 0x9: /* FLDX.D */
 551                                error = misaligned_fpu_load(regs, opcode, 0, 3, 0);
 552                                break;
 553                        default:
 554                                error = -1;
 555                                break;
 556                        }
 557                        break;
 558                case (0xb4>>2): /* FLD.S */
 559                        error = misaligned_fpu_store(regs, opcode, 1, 2, 0);
 560                        break;
 561                case (0xb8>>2): /* FLD.P */
 562                        error = misaligned_fpu_store(regs, opcode, 1, 3, 1);
 563                        break;
 564                case (0xbc>>2): /* FLD.D */
 565                        error = misaligned_fpu_store(regs, opcode, 1, 3, 0);
 566                        break;
 567                case (0x3c>>2): /* floating indexed stores */
 568                        switch (minor) {
 569                        case 0x8: /* FSTX.S */
 570                                error = misaligned_fpu_store(regs, opcode, 0, 2, 0);
 571                                break;
 572                        case 0xd: /* FSTX.P */
 573                                error = misaligned_fpu_store(regs, opcode, 0, 3, 1);
 574                                break;
 575                        case 0x9: /* FSTX.D */
 576                                error = misaligned_fpu_store(regs, opcode, 0, 3, 0);
 577                                break;
 578                        default:
 579                                error = -1;
 580                                break;
 581                        }
 582                        break;
 583
 584                default:
 585                        /* Fault */
 586                        error = -1;
 587                        break;
 588        }
 589
 590        if (error < 0) {
 591                return error;
 592        } else {
 593                regs->pc += 4; /* Skip the instruction that's just been emulated */
 594                return 0;
 595        }
 596}
 597
 598static void do_unhandled_exception(int signr, char *str, unsigned long error,
 599                                   struct pt_regs *regs)
 600{
 601        if (user_mode(regs))
 602                force_sig(signr);
 603
 604        die_if_no_fixup(str, regs, error);
 605}
 606
 607#define DO_ERROR(signr, str, name) \
 608asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \
 609{ \
 610        do_unhandled_exception(signr, str, error_code, regs); \
 611}
 612
 613DO_ERROR(SIGILL,  "illegal slot instruction", illegal_slot_inst)
 614DO_ERROR(SIGSEGV, "address error (exec)", address_error_exec)
 615
 616#if defined(CONFIG_SH64_ID2815_WORKAROUND)
 617
 618#define OPCODE_INVALID      0
 619#define OPCODE_USER_VALID   1
 620#define OPCODE_PRIV_VALID   2
 621
 622/* getcon/putcon - requires checking which control register is referenced. */
 623#define OPCODE_CTRL_REG     3
 624
 625/* Table of valid opcodes for SHmedia mode.
 626   Form a 10-bit value by concatenating the major/minor opcodes i.e.
 627   opcode[31:26,20:16].  The 6 MSBs of this value index into the following
 628   array.  The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to
 629   LSBs==4'b0000 etc). */
 630static unsigned long shmedia_opcode_table[64] = {
 631        0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015,
 632        0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000,
 633        0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000,
 634        0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000,
 635        0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
 636        0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
 637        0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
 638        0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000
 639};
 640
 641/* Workaround SH5-101 cut2 silicon defect #2815 :
 642   in some situations, inter-mode branches from SHcompact -> SHmedia
 643   which should take ITLBMISS or EXECPROT exceptions at the target
 644   falsely take RESINST at the target instead. */
 645void do_reserved_inst(unsigned long error_code, struct pt_regs *regs)
 646{
 647        insn_size_t opcode = 0x6ff4fff0; /* guaranteed reserved opcode */
 648        unsigned long pc, aligned_pc;
 649        unsigned long index, shift;
 650        unsigned long major, minor, combined;
 651        unsigned long reserved_field;
 652        int opcode_state;
 653        int get_user_error;
 654        int signr = SIGILL;
 655        char *exception_name = "reserved_instruction";
 656
 657        pc = regs->pc;
 658
 659        /* SHcompact is not handled */
 660        if (unlikely((pc & 3) == 0))
 661                goto out;
 662
 663        /* SHmedia : check for defect.  This requires executable vmas
 664           to be readable too. */
 665        aligned_pc = pc & ~3;
 666        if (!access_ok(aligned_pc, sizeof(insn_size_t)))
 667                get_user_error = -EFAULT;
 668        else
 669                get_user_error = __get_user(opcode, (insn_size_t *)aligned_pc);
 670
 671        if (get_user_error < 0) {
 672                /*
 673                 * Error trying to read opcode.  This typically means a
 674                 * real fault, not a RESINST any more.  So change the
 675                 * codes.
 676                 */
 677                exception_name = "address error (exec)";
 678                signr = SIGSEGV;
 679                goto out;
 680        }
 681
 682        /* These bits are currently reserved as zero in all valid opcodes */
 683        reserved_field = opcode & 0xf;
 684        if (unlikely(reserved_field))
 685                goto out;       /* invalid opcode */
 686
 687        major = (opcode >> 26) & 0x3f;
 688        minor = (opcode >> 16) & 0xf;
 689        combined = (major << 4) | minor;
 690        index = major;
 691        shift = minor << 1;
 692        opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3;
 693        switch (opcode_state) {
 694        case OPCODE_INVALID:
 695                /* Trap. */
 696                break;
 697        case OPCODE_USER_VALID:
 698                /*
 699                 * Restart the instruction: the branch to the instruction
 700                 * will now be from an RTE not from SHcompact so the
 701                 * silicon defect won't be triggered.
 702                 */
 703                return;
 704        case OPCODE_PRIV_VALID:
 705                if (!user_mode(regs)) {
 706                        /*
 707                         * Should only ever get here if a module has
 708                         * SHcompact code inside it. If so, the same fix
 709                         * up is needed.
 710                         */
 711                        return; /* same reason */
 712                }
 713
 714                /*
 715                 * Otherwise, user mode trying to execute a privileged
 716                 * instruction - fall through to trap.
 717                 */
 718                break;
 719        case OPCODE_CTRL_REG:
 720                /* If in privileged mode, return as above. */
 721                if (!user_mode(regs))
 722                        return;
 723
 724                /* In user mode ... */
 725                if (combined == 0x9f) { /* GETCON */
 726                        unsigned long regno = (opcode >> 20) & 0x3f;
 727
 728                        if (regno >= 62)
 729                                return;
 730
 731                        /* reserved/privileged control register => trap */
 732                } else if (combined == 0x1bf) { /* PUTCON */
 733                        unsigned long regno = (opcode >> 4) & 0x3f;
 734
 735                        if (regno >= 62)
 736                                return;
 737
 738                        /* reserved/privileged control register => trap */
 739                }
 740
 741                break;
 742        default:
 743                /* Fall through to trap. */
 744                break;
 745        }
 746
 747out:
 748        do_unhandled_exception(signr, exception_name, error_code, regs);
 749}
 750
 751#else /* CONFIG_SH64_ID2815_WORKAROUND */
 752
 753/* If the workaround isn't needed, this is just a straightforward reserved
 754   instruction */
 755DO_ERROR(SIGILL, "reserved instruction", reserved_inst)
 756
 757#endif /* CONFIG_SH64_ID2815_WORKAROUND */
 758
 759/* Called with interrupts disabled */
 760asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs)
 761{
 762        die_if_kernel("exception", regs, ex);
 763}
 764
 765asmlinkage int do_unknown_trapa(unsigned long scId, struct pt_regs *regs)
 766{
 767        /* Syscall debug */
 768        printk("System call ID error: [0x1#args:8 #syscall:16  0x%lx]\n", scId);
 769
 770        die_if_kernel("unknown trapa", regs, scId);
 771
 772        return -ENOSYS;
 773}
 774
 775/* Implement misaligned load/store handling for kernel (and optionally for user
 776   mode too).  Limitation : only SHmedia mode code is handled - there is no
 777   handling at all for misaligned accesses occurring in SHcompact code yet. */
 778
 779asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs)
 780{
 781        if (misaligned_fixup(regs) < 0)
 782                do_unhandled_exception(SIGSEGV, "address error(load)",
 783                                       error_code, regs);
 784}
 785
 786asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs)
 787{
 788        if (misaligned_fixup(regs) < 0)
 789                do_unhandled_exception(SIGSEGV, "address error(store)",
 790                                error_code, regs);
 791}
 792
 793asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs)
 794{
 795        u64 peek_real_address_q(u64 addr);
 796        u64 poke_real_address_q(u64 addr, u64 val);
 797        unsigned long long DM_EXP_CAUSE_PHY = 0x0c100010;
 798        unsigned long long exp_cause;
 799        /* It's not worth ioremapping the debug module registers for the amount
 800           of access we make to them - just go direct to their physical
 801           addresses. */
 802        exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY);
 803        if (exp_cause & ~4)
 804                printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n",
 805                        (unsigned long)(exp_cause & 0xffffffff));
 806        show_state();
 807        /* Clear all DEBUGINT causes */
 808        poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0);
 809}
 810
 811void per_cpu_trap_init(void)
 812{
 813        /* Nothing to do for now, VBR initialization later. */
 814}
 815