linux/arch/sh/kernel/traps_64.c
<<
>>
Prefs
   1/*
   2 * arch/sh/kernel/traps_64.c
   3 *
   4 * Copyright (C) 2000, 2001  Paolo Alberelli
   5 * Copyright (C) 2003, 2004  Paul Mundt
   6 * Copyright (C) 2003, 2004  Richard Curnow
   7 *
   8 * This file is subject to the terms and conditions of the GNU General Public
   9 * License.  See the file "COPYING" in the main directory of this archive
  10 * for more details.
  11 */
  12#include <linux/sched.h>
  13#include <linux/kernel.h>
  14#include <linux/string.h>
  15#include <linux/errno.h>
  16#include <linux/ptrace.h>
  17#include <linux/timer.h>
  18#include <linux/mm.h>
  19#include <linux/smp.h>
  20#include <linux/init.h>
  21#include <linux/delay.h>
  22#include <linux/spinlock.h>
  23#include <linux/kallsyms.h>
  24#include <linux/interrupt.h>
  25#include <linux/sysctl.h>
  26#include <linux/module.h>
  27#include <asm/system.h>
  28#include <asm/uaccess.h>
  29#include <asm/io.h>
  30#include <asm/atomic.h>
  31#include <asm/processor.h>
  32#include <asm/pgtable.h>
  33#include <asm/fpu.h>
  34
  35#undef DEBUG_EXCEPTION
  36#ifdef DEBUG_EXCEPTION
  37/* implemented in ../lib/dbg.c */
  38extern void show_excp_regs(char *fname, int trapnr, int signr,
  39                           struct pt_regs *regs);
  40#else
  41#define show_excp_regs(a, b, c, d)
  42#endif
  43
  44static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
  45                unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk);
  46
  47#define DO_ERROR(trapnr, signr, str, name, tsk) \
  48asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \
  49{ \
  50        do_unhandled_exception(trapnr, signr, str, __stringify(name), error_code, regs, current); \
  51}
  52
  53spinlock_t die_lock;
  54
  55void die(const char * str, struct pt_regs * regs, long err)
  56{
  57        console_verbose();
  58        spin_lock_irq(&die_lock);
  59        printk("%s: %lx\n", str, (err & 0xffffff));
  60        show_regs(regs);
  61        spin_unlock_irq(&die_lock);
  62        do_exit(SIGSEGV);
  63}
  64
  65static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
  66{
  67        if (!user_mode(regs))
  68                die(str, regs, err);
  69}
  70
  71static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
  72{
  73        if (!user_mode(regs)) {
  74                const struct exception_table_entry *fixup;
  75                fixup = search_exception_tables(regs->pc);
  76                if (fixup) {
  77                        regs->pc = fixup->fixup;
  78                        return;
  79                }
  80                die(str, regs, err);
  81        }
  82}
  83
  84DO_ERROR(13, SIGILL,  "illegal slot instruction", illegal_slot_inst, current)
  85DO_ERROR(87, SIGSEGV, "address error (exec)", address_error_exec, current)
  86
  87
  88/* Implement misaligned load/store handling for kernel (and optionally for user
  89   mode too).  Limitation : only SHmedia mode code is handled - there is no
  90   handling at all for misaligned accesses occurring in SHcompact code yet. */
  91
  92static int misaligned_fixup(struct pt_regs *regs);
  93
  94asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs)
  95{
  96        if (misaligned_fixup(regs) < 0) {
  97                do_unhandled_exception(7, SIGSEGV, "address error(load)",
  98                                "do_address_error_load",
  99                                error_code, regs, current);
 100        }
 101        return;
 102}
 103
 104asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs)
 105{
 106        if (misaligned_fixup(regs) < 0) {
 107                do_unhandled_exception(8, SIGSEGV, "address error(store)",
 108                                "do_address_error_store",
 109                                error_code, regs, current);
 110        }
 111        return;
 112}
 113
 114#if defined(CONFIG_SH64_ID2815_WORKAROUND)
 115
 116#define OPCODE_INVALID      0
 117#define OPCODE_USER_VALID   1
 118#define OPCODE_PRIV_VALID   2
 119
 120/* getcon/putcon - requires checking which control register is referenced. */
 121#define OPCODE_CTRL_REG     3
 122
 123/* Table of valid opcodes for SHmedia mode.
 124   Form a 10-bit value by concatenating the major/minor opcodes i.e.
 125   opcode[31:26,20:16].  The 6 MSBs of this value index into the following
 126   array.  The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to
 127   LSBs==4'b0000 etc). */
 128static unsigned long shmedia_opcode_table[64] = {
 129        0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015,
 130        0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000,
 131        0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000,
 132        0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000,
 133        0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
 134        0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
 135        0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
 136        0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000
 137};
 138
 139void do_reserved_inst(unsigned long error_code, struct pt_regs *regs)
 140{
 141        /* Workaround SH5-101 cut2 silicon defect #2815 :
 142           in some situations, inter-mode branches from SHcompact -> SHmedia
 143           which should take ITLBMISS or EXECPROT exceptions at the target
 144           falsely take RESINST at the target instead. */
 145
 146        unsigned long opcode = 0x6ff4fff0; /* guaranteed reserved opcode */
 147        unsigned long pc, aligned_pc;
 148        int get_user_error;
 149        int trapnr = 12;
 150        int signr = SIGILL;
 151        char *exception_name = "reserved_instruction";
 152
 153        pc = regs->pc;
 154        if ((pc & 3) == 1) {
 155                /* SHmedia : check for defect.  This requires executable vmas
 156                   to be readable too. */
 157                aligned_pc = pc & ~3;
 158                if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
 159                        get_user_error = -EFAULT;
 160                } else {
 161                        get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
 162                }
 163                if (get_user_error >= 0) {
 164                        unsigned long index, shift;
 165                        unsigned long major, minor, combined;
 166                        unsigned long reserved_field;
 167                        reserved_field = opcode & 0xf; /* These bits are currently reserved as zero in all valid opcodes */
 168                        major = (opcode >> 26) & 0x3f;
 169                        minor = (opcode >> 16) & 0xf;
 170                        combined = (major << 4) | minor;
 171                        index = major;
 172                        shift = minor << 1;
 173                        if (reserved_field == 0) {
 174                                int opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3;
 175                                switch (opcode_state) {
 176                                        case OPCODE_INVALID:
 177                                                /* Trap. */
 178                                                break;
 179                                        case OPCODE_USER_VALID:
 180                                                /* Restart the instruction : the branch to the instruction will now be from an RTE
 181                                                   not from SHcompact so the silicon defect won't be triggered. */
 182                                                return;
 183                                        case OPCODE_PRIV_VALID:
 184                                                if (!user_mode(regs)) {
 185                                                        /* Should only ever get here if a module has
 186                                                           SHcompact code inside it.  If so, the same fix up is needed. */
 187                                                        return; /* same reason */
 188                                                }
 189                                                /* Otherwise, user mode trying to execute a privileged instruction -
 190                                                   fall through to trap. */
 191                                                break;
 192                                        case OPCODE_CTRL_REG:
 193                                                /* If in privileged mode, return as above. */
 194                                                if (!user_mode(regs)) return;
 195                                                /* In user mode ... */
 196                                                if (combined == 0x9f) { /* GETCON */
 197                                                        unsigned long regno = (opcode >> 20) & 0x3f;
 198                                                        if (regno >= 62) {
 199                                                                return;
 200                                                        }
 201                                                        /* Otherwise, reserved or privileged control register, => trap */
 202                                                } else if (combined == 0x1bf) { /* PUTCON */
 203                                                        unsigned long regno = (opcode >> 4) & 0x3f;
 204                                                        if (regno >= 62) {
 205                                                                return;
 206                                                        }
 207                                                        /* Otherwise, reserved or privileged control register, => trap */
 208                                                } else {
 209                                                        /* Trap */
 210                                                }
 211                                                break;
 212                                        default:
 213                                                /* Fall through to trap. */
 214                                                break;
 215                                }
 216                        }
 217                        /* fall through to normal resinst processing */
 218                } else {
 219                        /* Error trying to read opcode.  This typically means a
 220                           real fault, not a RESINST any more.  So change the
 221                           codes. */
 222                        trapnr = 87;
 223                        exception_name = "address error (exec)";
 224                        signr = SIGSEGV;
 225                }
 226        }
 227
 228        do_unhandled_exception(trapnr, signr, exception_name, "do_reserved_inst", error_code, regs, current);
 229}
 230
 231#else /* CONFIG_SH64_ID2815_WORKAROUND */
 232
 233/* If the workaround isn't needed, this is just a straightforward reserved
 234   instruction */
 235DO_ERROR(12, SIGILL,  "reserved instruction", reserved_inst, current)
 236
 237#endif /* CONFIG_SH64_ID2815_WORKAROUND */
 238
 239/* Called with interrupts disabled */
 240asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs)
 241{
 242        show_excp_regs(__func__, -1, -1, regs);
 243        die_if_kernel("exception", regs, ex);
 244}
 245
 246int do_unknown_trapa(unsigned long scId, struct pt_regs *regs)
 247{
 248        /* Syscall debug */
 249        printk("System call ID error: [0x1#args:8 #syscall:16  0x%lx]\n", scId);
 250
 251        die_if_kernel("unknown trapa", regs, scId);
 252
 253        return -ENOSYS;
 254}
 255
 256void show_stack(struct task_struct *tsk, unsigned long *sp)
 257{
 258#ifdef CONFIG_KALLSYMS
 259        extern void sh64_unwind(struct pt_regs *regs);
 260        struct pt_regs *regs;
 261
 262        regs = tsk ? tsk->thread.kregs : NULL;
 263
 264        sh64_unwind(regs);
 265#else
 266        printk(KERN_ERR "Can't backtrace on sh64 without CONFIG_KALLSYMS\n");
 267#endif
 268}
 269
 270void show_task(unsigned long *sp)
 271{
 272        show_stack(NULL, sp);
 273}
 274
 275void dump_stack(void)
 276{
 277        show_task(NULL);
 278}
 279/* Needed by any user of WARN_ON in view of the defn in include/asm-sh/bug.h */
 280EXPORT_SYMBOL(dump_stack);
 281
 282static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
 283                unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk)
 284{
 285        show_excp_regs(fn_name, trapnr, signr, regs);
 286        tsk->thread.error_code = error_code;
 287        tsk->thread.trap_no = trapnr;
 288
 289        if (user_mode(regs))
 290                force_sig(signr, tsk);
 291
 292        die_if_no_fixup(str, regs, error_code);
 293}
 294
 295static int read_opcode(unsigned long long pc, unsigned long *result_opcode, int from_user_mode)
 296{
 297        int get_user_error;
 298        unsigned long aligned_pc;
 299        unsigned long opcode;
 300
 301        if ((pc & 3) == 1) {
 302                /* SHmedia */
 303                aligned_pc = pc & ~3;
 304                if (from_user_mode) {
 305                        if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
 306                                get_user_error = -EFAULT;
 307                        } else {
 308                                get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
 309                                *result_opcode = opcode;
 310                        }
 311                        return get_user_error;
 312                } else {
 313                        /* If the fault was in the kernel, we can either read
 314                         * this directly, or if not, we fault.
 315                        */
 316                        *result_opcode = *(unsigned long *) aligned_pc;
 317                        return 0;
 318                }
 319        } else if ((pc & 1) == 0) {
 320                /* SHcompact */
 321                /* TODO : provide handling for this.  We don't really support
 322                   user-mode SHcompact yet, and for a kernel fault, this would
 323                   have to come from a module built for SHcompact.  */
 324                return -EFAULT;
 325        } else {
 326                /* misaligned */
 327                return -EFAULT;
 328        }
 329}
 330
 331static int address_is_sign_extended(__u64 a)
 332{
 333        __u64 b;
 334#if (NEFF == 32)
 335        b = (__u64)(__s64)(__s32)(a & 0xffffffffUL);
 336        return (b == a) ? 1 : 0;
 337#else
 338#error "Sign extend check only works for NEFF==32"
 339#endif
 340}
 341
 342static int generate_and_check_address(struct pt_regs *regs,
 343                                      __u32 opcode,
 344                                      int displacement_not_indexed,
 345                                      int width_shift,
 346                                      __u64 *address)
 347{
 348        /* return -1 for fault, 0 for OK */
 349
 350        __u64 base_address, addr;
 351        int basereg;
 352
 353        basereg = (opcode >> 20) & 0x3f;
 354        base_address = regs->regs[basereg];
 355        if (displacement_not_indexed) {
 356                __s64 displacement;
 357                displacement = (opcode >> 10) & 0x3ff;
 358                displacement = ((displacement << 54) >> 54); /* sign extend */
 359                addr = (__u64)((__s64)base_address + (displacement << width_shift));
 360        } else {
 361                __u64 offset;
 362                int offsetreg;
 363                offsetreg = (opcode >> 10) & 0x3f;
 364                offset = regs->regs[offsetreg];
 365                addr = base_address + offset;
 366        }
 367
 368        /* Check sign extended */
 369        if (!address_is_sign_extended(addr)) {
 370                return -1;
 371        }
 372
 373        /* Check accessible.  For misaligned access in the kernel, assume the
 374           address is always accessible (and if not, just fault when the
 375           load/store gets done.) */
 376        if (user_mode(regs)) {
 377                if (addr >= TASK_SIZE) {
 378                        return -1;
 379                }
 380                /* Do access_ok check later - it depends on whether it's a load or a store. */
 381        }
 382
 383        *address = addr;
 384        return 0;
 385}
 386
 387static int user_mode_unaligned_fixup_count = 10;
 388static int user_mode_unaligned_fixup_enable = 1;
 389static int kernel_mode_unaligned_fixup_count = 32;
 390
 391static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result)
 392{
 393        unsigned short x;
 394        unsigned char *p, *q;
 395        p = (unsigned char *) (int) address;
 396        q = (unsigned char *) &x;
 397        q[0] = p[0];
 398        q[1] = p[1];
 399
 400        if (do_sign_extend) {
 401                *result = (__u64)(__s64) *(short *) &x;
 402        } else {
 403                *result = (__u64) x;
 404        }
 405}
 406
 407static void misaligned_kernel_word_store(__u64 address, __u64 value)
 408{
 409        unsigned short x;
 410        unsigned char *p, *q;
 411        p = (unsigned char *) (int) address;
 412        q = (unsigned char *) &x;
 413
 414        x = (__u16) value;
 415        p[0] = q[0];
 416        p[1] = q[1];
 417}
 418
 419static int misaligned_load(struct pt_regs *regs,
 420                           __u32 opcode,
 421                           int displacement_not_indexed,
 422                           int width_shift,
 423                           int do_sign_extend)
 424{
 425        /* Return -1 for a fault, 0 for OK */
 426        int error;
 427        int destreg;
 428        __u64 address;
 429
 430        error = generate_and_check_address(regs, opcode,
 431                        displacement_not_indexed, width_shift, &address);
 432        if (error < 0) {
 433                return error;
 434        }
 435
 436        destreg = (opcode >> 4) & 0x3f;
 437        if (user_mode(regs)) {
 438                __u64 buffer;
 439
 440                if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
 441                        return -1;
 442                }
 443
 444                if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
 445                        return -1; /* fault */
 446                }
 447                switch (width_shift) {
 448                case 1:
 449                        if (do_sign_extend) {
 450                                regs->regs[destreg] = (__u64)(__s64) *(__s16 *) &buffer;
 451                        } else {
 452                                regs->regs[destreg] = (__u64) *(__u16 *) &buffer;
 453                        }
 454                        break;
 455                case 2:
 456                        regs->regs[destreg] = (__u64)(__s64) *(__s32 *) &buffer;
 457                        break;
 458                case 3:
 459                        regs->regs[destreg] = buffer;
 460                        break;
 461                default:
 462                        printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
 463                                width_shift, (unsigned long) regs->pc);
 464                        break;
 465                }
 466        } else {
 467                /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
 468                __u64 lo, hi;
 469
 470                switch (width_shift) {
 471                case 1:
 472                        misaligned_kernel_word_load(address, do_sign_extend, &regs->regs[destreg]);
 473                        break;
 474                case 2:
 475                        asm ("ldlo.l %1, 0, %0" : "=r" (lo) : "r" (address));
 476                        asm ("ldhi.l %1, 3, %0" : "=r" (hi) : "r" (address));
 477                        regs->regs[destreg] = lo | hi;
 478                        break;
 479                case 3:
 480                        asm ("ldlo.q %1, 0, %0" : "=r" (lo) : "r" (address));
 481                        asm ("ldhi.q %1, 7, %0" : "=r" (hi) : "r" (address));
 482                        regs->regs[destreg] = lo | hi;
 483                        break;
 484
 485                default:
 486                        printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
 487                                width_shift, (unsigned long) regs->pc);
 488                        break;
 489                }
 490        }
 491
 492        return 0;
 493
 494}
 495
 496static int misaligned_store(struct pt_regs *regs,
 497                            __u32 opcode,
 498                            int displacement_not_indexed,
 499                            int width_shift)
 500{
 501        /* Return -1 for a fault, 0 for OK */
 502        int error;
 503        int srcreg;
 504        __u64 address;
 505
 506        error = generate_and_check_address(regs, opcode,
 507                        displacement_not_indexed, width_shift, &address);
 508        if (error < 0) {
 509                return error;
 510        }
 511
 512        srcreg = (opcode >> 4) & 0x3f;
 513        if (user_mode(regs)) {
 514                __u64 buffer;
 515
 516                if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
 517                        return -1;
 518                }
 519
 520                switch (width_shift) {
 521                case 1:
 522                        *(__u16 *) &buffer = (__u16) regs->regs[srcreg];
 523                        break;
 524                case 2:
 525                        *(__u32 *) &buffer = (__u32) regs->regs[srcreg];
 526                        break;
 527                case 3:
 528                        buffer = regs->regs[srcreg];
 529                        break;
 530                default:
 531                        printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
 532                                width_shift, (unsigned long) regs->pc);
 533                        break;
 534                }
 535
 536                if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
 537                        return -1; /* fault */
 538                }
 539        } else {
 540                /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
 541                __u64 val = regs->regs[srcreg];
 542
 543                switch (width_shift) {
 544                case 1:
 545                        misaligned_kernel_word_store(address, val);
 546                        break;
 547                case 2:
 548                        asm ("stlo.l %1, 0, %0" : : "r" (val), "r" (address));
 549                        asm ("sthi.l %1, 3, %0" : : "r" (val), "r" (address));
 550                        break;
 551                case 3:
 552                        asm ("stlo.q %1, 0, %0" : : "r" (val), "r" (address));
 553                        asm ("sthi.q %1, 7, %0" : : "r" (val), "r" (address));
 554                        break;
 555
 556                default:
 557                        printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
 558                                width_shift, (unsigned long) regs->pc);
 559                        break;
 560                }
 561        }
 562
 563        return 0;
 564
 565}
 566
 567/* Never need to fix up misaligned FPU accesses within the kernel since that's a real
 568   error. */
 569static int misaligned_fpu_load(struct pt_regs *regs,
 570                           __u32 opcode,
 571                           int displacement_not_indexed,
 572                           int width_shift,
 573                           int do_paired_load)
 574{
 575        /* Return -1 for a fault, 0 for OK */
 576        int error;
 577        int destreg;
 578        __u64 address;
 579
 580        error = generate_and_check_address(regs, opcode,
 581                        displacement_not_indexed, width_shift, &address);
 582        if (error < 0) {
 583                return error;
 584        }
 585
 586        destreg = (opcode >> 4) & 0x3f;
 587        if (user_mode(regs)) {
 588                __u64 buffer;
 589                __u32 buflo, bufhi;
 590
 591                if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
 592                        return -1;
 593                }
 594
 595                if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
 596                        return -1; /* fault */
 597                }
 598                /* 'current' may be the current owner of the FPU state, so
 599                   context switch the registers into memory so they can be
 600                   indexed by register number. */
 601                if (last_task_used_math == current) {
 602                        enable_fpu();
 603                        save_fpu(current, regs);
 604                        disable_fpu();
 605                        last_task_used_math = NULL;
 606                        regs->sr |= SR_FD;
 607                }
 608
 609                buflo = *(__u32*) &buffer;
 610                bufhi = *(1 + (__u32*) &buffer);
 611
 612                switch (width_shift) {
 613                case 2:
 614                        current->thread.fpu.hard.fp_regs[destreg] = buflo;
 615                        break;
 616                case 3:
 617                        if (do_paired_load) {
 618                                current->thread.fpu.hard.fp_regs[destreg] = buflo;
 619                                current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
 620                        } else {
 621#if defined(CONFIG_CPU_LITTLE_ENDIAN)
 622                                current->thread.fpu.hard.fp_regs[destreg] = bufhi;
 623                                current->thread.fpu.hard.fp_regs[destreg+1] = buflo;
 624#else
 625                                current->thread.fpu.hard.fp_regs[destreg] = buflo;
 626                                current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
 627#endif
 628                        }
 629                        break;
 630                default:
 631                        printk("Unexpected width_shift %d in misaligned_fpu_load, PC=%08lx\n",
 632                                width_shift, (unsigned long) regs->pc);
 633                        break;
 634                }
 635                return 0;
 636        } else {
 637                die ("Misaligned FPU load inside kernel", regs, 0);
 638                return -1;
 639        }
 640
 641
 642}
 643
 644static int misaligned_fpu_store(struct pt_regs *regs,
 645                           __u32 opcode,
 646                           int displacement_not_indexed,
 647                           int width_shift,
 648                           int do_paired_load)
 649{
 650        /* Return -1 for a fault, 0 for OK */
 651        int error;
 652        int srcreg;
 653        __u64 address;
 654
 655        error = generate_and_check_address(regs, opcode,
 656                        displacement_not_indexed, width_shift, &address);
 657        if (error < 0) {
 658                return error;
 659        }
 660
 661        srcreg = (opcode >> 4) & 0x3f;
 662        if (user_mode(regs)) {
 663                __u64 buffer;
 664                /* Initialise these to NaNs. */
 665                __u32 buflo=0xffffffffUL, bufhi=0xffffffffUL;
 666
 667                if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
 668                        return -1;
 669                }
 670
 671                /* 'current' may be the current owner of the FPU state, so
 672                   context switch the registers into memory so they can be
 673                   indexed by register number. */
 674                if (last_task_used_math == current) {
 675                        enable_fpu();
 676                        save_fpu(current, regs);
 677                        disable_fpu();
 678                        last_task_used_math = NULL;
 679                        regs->sr |= SR_FD;
 680                }
 681
 682                switch (width_shift) {
 683                case 2:
 684                        buflo = current->thread.fpu.hard.fp_regs[srcreg];
 685                        break;
 686                case 3:
 687                        if (do_paired_load) {
 688                                buflo = current->thread.fpu.hard.fp_regs[srcreg];
 689                                bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
 690                        } else {
 691#if defined(CONFIG_CPU_LITTLE_ENDIAN)
 692                                bufhi = current->thread.fpu.hard.fp_regs[srcreg];
 693                                buflo = current->thread.fpu.hard.fp_regs[srcreg+1];
 694#else
 695                                buflo = current->thread.fpu.hard.fp_regs[srcreg];
 696                                bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
 697#endif
 698                        }
 699                        break;
 700                default:
 701                        printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n",
 702                                width_shift, (unsigned long) regs->pc);
 703                        break;
 704                }
 705
 706                *(__u32*) &buffer = buflo;
 707                *(1 + (__u32*) &buffer) = bufhi;
 708                if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
 709                        return -1; /* fault */
 710                }
 711                return 0;
 712        } else {
 713                die ("Misaligned FPU load inside kernel", regs, 0);
 714                return -1;
 715        }
 716}
 717
 718static int misaligned_fixup(struct pt_regs *regs)
 719{
 720        unsigned long opcode;
 721        int error;
 722        int major, minor;
 723
 724        if (!user_mode_unaligned_fixup_enable)
 725                return -1;
 726
 727        error = read_opcode(regs->pc, &opcode, user_mode(regs));
 728        if (error < 0) {
 729                return error;
 730        }
 731        major = (opcode >> 26) & 0x3f;
 732        minor = (opcode >> 16) & 0xf;
 733
 734        if (user_mode(regs) && (user_mode_unaligned_fixup_count > 0)) {
 735                --user_mode_unaligned_fixup_count;
 736                /* Only do 'count' worth of these reports, to remove a potential DoS against syslog */
 737                printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
 738                       current->comm, task_pid_nr(current), (__u32)regs->pc, opcode);
 739        } else if (!user_mode(regs) && (kernel_mode_unaligned_fixup_count > 0)) {
 740                --kernel_mode_unaligned_fixup_count;
 741                if (in_interrupt()) {
 742                        printk("Fixing up unaligned kernelspace access in interrupt pc=0x%08x ins=0x%08lx\n",
 743                               (__u32)regs->pc, opcode);
 744                } else {
 745                        printk("Fixing up unaligned kernelspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
 746                               current->comm, task_pid_nr(current), (__u32)regs->pc, opcode);
 747                }
 748        }
 749
 750
 751        switch (major) {
 752                case (0x84>>2): /* LD.W */
 753                        error = misaligned_load(regs, opcode, 1, 1, 1);
 754                        break;
 755                case (0xb0>>2): /* LD.UW */
 756                        error = misaligned_load(regs, opcode, 1, 1, 0);
 757                        break;
 758                case (0x88>>2): /* LD.L */
 759                        error = misaligned_load(regs, opcode, 1, 2, 1);
 760                        break;
 761                case (0x8c>>2): /* LD.Q */
 762                        error = misaligned_load(regs, opcode, 1, 3, 0);
 763                        break;
 764
 765                case (0xa4>>2): /* ST.W */
 766                        error = misaligned_store(regs, opcode, 1, 1);
 767                        break;
 768                case (0xa8>>2): /* ST.L */
 769                        error = misaligned_store(regs, opcode, 1, 2);
 770                        break;
 771                case (0xac>>2): /* ST.Q */
 772                        error = misaligned_store(regs, opcode, 1, 3);
 773                        break;
 774
 775                case (0x40>>2): /* indexed loads */
 776                        switch (minor) {
 777                                case 0x1: /* LDX.W */
 778                                        error = misaligned_load(regs, opcode, 0, 1, 1);
 779                                        break;
 780                                case 0x5: /* LDX.UW */
 781                                        error = misaligned_load(regs, opcode, 0, 1, 0);
 782                                        break;
 783                                case 0x2: /* LDX.L */
 784                                        error = misaligned_load(regs, opcode, 0, 2, 1);
 785                                        break;
 786                                case 0x3: /* LDX.Q */
 787                                        error = misaligned_load(regs, opcode, 0, 3, 0);
 788                                        break;
 789                                default:
 790                                        error = -1;
 791                                        break;
 792                        }
 793                        break;
 794
 795                case (0x60>>2): /* indexed stores */
 796                        switch (minor) {
 797                                case 0x1: /* STX.W */
 798                                        error = misaligned_store(regs, opcode, 0, 1);
 799                                        break;
 800                                case 0x2: /* STX.L */
 801                                        error = misaligned_store(regs, opcode, 0, 2);
 802                                        break;
 803                                case 0x3: /* STX.Q */
 804                                        error = misaligned_store(regs, opcode, 0, 3);
 805                                        break;
 806                                default:
 807                                        error = -1;
 808                                        break;
 809                        }
 810                        break;
 811
 812                case (0x94>>2): /* FLD.S */
 813                        error = misaligned_fpu_load(regs, opcode, 1, 2, 0);
 814                        break;
 815                case (0x98>>2): /* FLD.P */
 816                        error = misaligned_fpu_load(regs, opcode, 1, 3, 1);
 817                        break;
 818                case (0x9c>>2): /* FLD.D */
 819                        error = misaligned_fpu_load(regs, opcode, 1, 3, 0);
 820                        break;
 821                case (0x1c>>2): /* floating indexed loads */
 822                        switch (minor) {
 823                        case 0x8: /* FLDX.S */
 824                                error = misaligned_fpu_load(regs, opcode, 0, 2, 0);
 825                                break;
 826                        case 0xd: /* FLDX.P */
 827                                error = misaligned_fpu_load(regs, opcode, 0, 3, 1);
 828                                break;
 829                        case 0x9: /* FLDX.D */
 830                                error = misaligned_fpu_load(regs, opcode, 0, 3, 0);
 831                                break;
 832                        default:
 833                                error = -1;
 834                                break;
 835                        }
 836                        break;
 837                case (0xb4>>2): /* FLD.S */
 838                        error = misaligned_fpu_store(regs, opcode, 1, 2, 0);
 839                        break;
 840                case (0xb8>>2): /* FLD.P */
 841                        error = misaligned_fpu_store(regs, opcode, 1, 3, 1);
 842                        break;
 843                case (0xbc>>2): /* FLD.D */
 844                        error = misaligned_fpu_store(regs, opcode, 1, 3, 0);
 845                        break;
 846                case (0x3c>>2): /* floating indexed stores */
 847                        switch (minor) {
 848                        case 0x8: /* FSTX.S */
 849                                error = misaligned_fpu_store(regs, opcode, 0, 2, 0);
 850                                break;
 851                        case 0xd: /* FSTX.P */
 852                                error = misaligned_fpu_store(regs, opcode, 0, 3, 1);
 853                                break;
 854                        case 0x9: /* FSTX.D */
 855                                error = misaligned_fpu_store(regs, opcode, 0, 3, 0);
 856                                break;
 857                        default:
 858                                error = -1;
 859                                break;
 860                        }
 861                        break;
 862
 863                default:
 864                        /* Fault */
 865                        error = -1;
 866                        break;
 867        }
 868
 869        if (error < 0) {
 870                return error;
 871        } else {
 872                regs->pc += 4; /* Skip the instruction that's just been emulated */
 873                return 0;
 874        }
 875
 876}
 877
 878static ctl_table unaligned_table[] = {
 879        {
 880                .ctl_name       = CTL_UNNUMBERED,
 881                .procname       = "kernel_reports",
 882                .data           = &kernel_mode_unaligned_fixup_count,
 883                .maxlen         = sizeof(int),
 884                .mode           = 0644,
 885                .proc_handler   = &proc_dointvec
 886        },
 887        {
 888                .ctl_name       = CTL_UNNUMBERED,
 889                .procname       = "user_reports",
 890                .data           = &user_mode_unaligned_fixup_count,
 891                .maxlen         = sizeof(int),
 892                .mode           = 0644,
 893                .proc_handler   = &proc_dointvec
 894        },
 895        {
 896                .ctl_name       = CTL_UNNUMBERED,
 897                .procname       = "user_enable",
 898                .data           = &user_mode_unaligned_fixup_enable,
 899                .maxlen         = sizeof(int),
 900                .mode           = 0644,
 901                .proc_handler   = &proc_dointvec},
 902        {}
 903};
 904
 905static ctl_table unaligned_root[] = {
 906        {
 907                .ctl_name       = CTL_UNNUMBERED,
 908                .procname       = "unaligned_fixup",
 909                .mode           = 0555,
 910                unaligned_table
 911        },
 912        {}
 913};
 914
 915static ctl_table sh64_root[] = {
 916        {
 917                .ctl_name       = CTL_UNNUMBERED,
 918                .procname       = "sh64",
 919                .mode           = 0555,
 920                .child          = unaligned_root
 921        },
 922        {}
 923};
 924static struct ctl_table_header *sysctl_header;
 925static int __init init_sysctl(void)
 926{
 927        sysctl_header = register_sysctl_table(sh64_root);
 928        return 0;
 929}
 930
 931__initcall(init_sysctl);
 932
 933
 934asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs)
 935{
 936        u64 peek_real_address_q(u64 addr);
 937        u64 poke_real_address_q(u64 addr, u64 val);
 938        unsigned long long DM_EXP_CAUSE_PHY = 0x0c100010;
 939        unsigned long long exp_cause;
 940        /* It's not worth ioremapping the debug module registers for the amount
 941           of access we make to them - just go direct to their physical
 942           addresses. */
 943        exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY);
 944        if (exp_cause & ~4) {
 945                printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n",
 946                        (unsigned long)(exp_cause & 0xffffffff));
 947        }
 948        show_state();
 949        /* Clear all DEBUGINT causes */
 950        poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0);
 951}
 952