linux/arch/sh/kernel/traps_64.c
<<
>>
Prefs
   1/*
   2 * arch/sh/kernel/traps_64.c
   3 *
   4 * Copyright (C) 2000, 2001  Paolo Alberelli
   5 * Copyright (C) 2003, 2004  Paul Mundt
   6 * Copyright (C) 2003, 2004  Richard Curnow
   7 *
   8 * This file is subject to the terms and conditions of the GNU General Public
   9 * License.  See the file "COPYING" in the main directory of this archive
  10 * for more details.
  11 */
  12#include <linux/sched.h>
  13#include <linux/kernel.h>
  14#include <linux/string.h>
  15#include <linux/errno.h>
  16#include <linux/ptrace.h>
  17#include <linux/timer.h>
  18#include <linux/mm.h>
  19#include <linux/smp.h>
  20#include <linux/init.h>
  21#include <linux/delay.h>
  22#include <linux/spinlock.h>
  23#include <linux/kallsyms.h>
  24#include <linux/interrupt.h>
  25#include <linux/sysctl.h>
  26#include <linux/module.h>
  27#include <linux/perf_event.h>
  28#include <asm/system.h>
  29#include <asm/uaccess.h>
  30#include <asm/io.h>
  31#include <asm/atomic.h>
  32#include <asm/processor.h>
  33#include <asm/pgtable.h>
  34#include <asm/fpu.h>
  35
  36#undef DEBUG_EXCEPTION
  37#ifdef DEBUG_EXCEPTION
  38/* implemented in ../lib/dbg.c */
  39extern void show_excp_regs(char *fname, int trapnr, int signr,
  40                           struct pt_regs *regs);
  41#else
  42#define show_excp_regs(a, b, c, d)
  43#endif
  44
  45static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
  46                unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk);
  47
  48#define DO_ERROR(trapnr, signr, str, name, tsk) \
  49asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \
  50{ \
  51        do_unhandled_exception(trapnr, signr, str, __stringify(name), error_code, regs, current); \
  52}
  53
  54static DEFINE_SPINLOCK(die_lock);
  55
  56void die(const char * str, struct pt_regs * regs, long err)
  57{
  58        console_verbose();
  59        spin_lock_irq(&die_lock);
  60        printk("%s: %lx\n", str, (err & 0xffffff));
  61        show_regs(regs);
  62        spin_unlock_irq(&die_lock);
  63        do_exit(SIGSEGV);
  64}
  65
  66static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
  67{
  68        if (!user_mode(regs))
  69                die(str, regs, err);
  70}
  71
  72static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
  73{
  74        if (!user_mode(regs)) {
  75                const struct exception_table_entry *fixup;
  76                fixup = search_exception_tables(regs->pc);
  77                if (fixup) {
  78                        regs->pc = fixup->fixup;
  79                        return;
  80                }
  81                die(str, regs, err);
  82        }
  83}
  84
  85DO_ERROR(13, SIGILL,  "illegal slot instruction", illegal_slot_inst, current)
  86DO_ERROR(87, SIGSEGV, "address error (exec)", address_error_exec, current)
  87
  88
  89/* Implement misaligned load/store handling for kernel (and optionally for user
  90   mode too).  Limitation : only SHmedia mode code is handled - there is no
  91   handling at all for misaligned accesses occurring in SHcompact code yet. */
  92
  93static int misaligned_fixup(struct pt_regs *regs);
  94
  95asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs)
  96{
  97        if (misaligned_fixup(regs) < 0) {
  98                do_unhandled_exception(7, SIGSEGV, "address error(load)",
  99                                "do_address_error_load",
 100                                error_code, regs, current);
 101        }
 102        return;
 103}
 104
 105asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs)
 106{
 107        if (misaligned_fixup(regs) < 0) {
 108                do_unhandled_exception(8, SIGSEGV, "address error(store)",
 109                                "do_address_error_store",
 110                                error_code, regs, current);
 111        }
 112        return;
 113}
 114
 115#if defined(CONFIG_SH64_ID2815_WORKAROUND)
 116
 117#define OPCODE_INVALID      0
 118#define OPCODE_USER_VALID   1
 119#define OPCODE_PRIV_VALID   2
 120
 121/* getcon/putcon - requires checking which control register is referenced. */
 122#define OPCODE_CTRL_REG     3
 123
 124/* Table of valid opcodes for SHmedia mode.
 125   Form a 10-bit value by concatenating the major/minor opcodes i.e.
 126   opcode[31:26,20:16].  The 6 MSBs of this value index into the following
 127   array.  The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to
 128   LSBs==4'b0000 etc). */
 129static unsigned long shmedia_opcode_table[64] = {
 130        0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015,
 131        0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000,
 132        0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000,
 133        0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000,
 134        0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
 135        0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
 136        0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
 137        0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000
 138};
 139
 140void do_reserved_inst(unsigned long error_code, struct pt_regs *regs)
 141{
 142        /* Workaround SH5-101 cut2 silicon defect #2815 :
 143           in some situations, inter-mode branches from SHcompact -> SHmedia
 144           which should take ITLBMISS or EXECPROT exceptions at the target
 145           falsely take RESINST at the target instead. */
 146
 147        unsigned long opcode = 0x6ff4fff0; /* guaranteed reserved opcode */
 148        unsigned long pc, aligned_pc;
 149        int get_user_error;
 150        int trapnr = 12;
 151        int signr = SIGILL;
 152        char *exception_name = "reserved_instruction";
 153
 154        pc = regs->pc;
 155        if ((pc & 3) == 1) {
 156                /* SHmedia : check for defect.  This requires executable vmas
 157                   to be readable too. */
 158                aligned_pc = pc & ~3;
 159                if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
 160                        get_user_error = -EFAULT;
 161                } else {
 162                        get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
 163                }
 164                if (get_user_error >= 0) {
 165                        unsigned long index, shift;
 166                        unsigned long major, minor, combined;
 167                        unsigned long reserved_field;
 168                        reserved_field = opcode & 0xf; /* These bits are currently reserved as zero in all valid opcodes */
 169                        major = (opcode >> 26) & 0x3f;
 170                        minor = (opcode >> 16) & 0xf;
 171                        combined = (major << 4) | minor;
 172                        index = major;
 173                        shift = minor << 1;
 174                        if (reserved_field == 0) {
 175                                int opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3;
 176                                switch (opcode_state) {
 177                                        case OPCODE_INVALID:
 178                                                /* Trap. */
 179                                                break;
 180                                        case OPCODE_USER_VALID:
 181                                                /* Restart the instruction : the branch to the instruction will now be from an RTE
 182                                                   not from SHcompact so the silicon defect won't be triggered. */
 183                                                return;
 184                                        case OPCODE_PRIV_VALID:
 185                                                if (!user_mode(regs)) {
 186                                                        /* Should only ever get here if a module has
 187                                                           SHcompact code inside it.  If so, the same fix up is needed. */
 188                                                        return; /* same reason */
 189                                                }
 190                                                /* Otherwise, user mode trying to execute a privileged instruction -
 191                                                   fall through to trap. */
 192                                                break;
 193                                        case OPCODE_CTRL_REG:
 194                                                /* If in privileged mode, return as above. */
 195                                                if (!user_mode(regs)) return;
 196                                                /* In user mode ... */
 197                                                if (combined == 0x9f) { /* GETCON */
 198                                                        unsigned long regno = (opcode >> 20) & 0x3f;
 199                                                        if (regno >= 62) {
 200                                                                return;
 201                                                        }
 202                                                        /* Otherwise, reserved or privileged control register, => trap */
 203                                                } else if (combined == 0x1bf) { /* PUTCON */
 204                                                        unsigned long regno = (opcode >> 4) & 0x3f;
 205                                                        if (regno >= 62) {
 206                                                                return;
 207                                                        }
 208                                                        /* Otherwise, reserved or privileged control register, => trap */
 209                                                } else {
 210                                                        /* Trap */
 211                                                }
 212                                                break;
 213                                        default:
 214                                                /* Fall through to trap. */
 215                                                break;
 216                                }
 217                        }
 218                        /* fall through to normal resinst processing */
 219                } else {
 220                        /* Error trying to read opcode.  This typically means a
 221                           real fault, not a RESINST any more.  So change the
 222                           codes. */
 223                        trapnr = 87;
 224                        exception_name = "address error (exec)";
 225                        signr = SIGSEGV;
 226                }
 227        }
 228
 229        do_unhandled_exception(trapnr, signr, exception_name, "do_reserved_inst", error_code, regs, current);
 230}
 231
 232#else /* CONFIG_SH64_ID2815_WORKAROUND */
 233
 234/* If the workaround isn't needed, this is just a straightforward reserved
 235   instruction */
 236DO_ERROR(12, SIGILL,  "reserved instruction", reserved_inst, current)
 237
 238#endif /* CONFIG_SH64_ID2815_WORKAROUND */
 239
 240/* Called with interrupts disabled */
 241asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs)
 242{
 243        show_excp_regs(__func__, -1, -1, regs);
 244        die_if_kernel("exception", regs, ex);
 245}
 246
 247int do_unknown_trapa(unsigned long scId, struct pt_regs *regs)
 248{
 249        /* Syscall debug */
 250        printk("System call ID error: [0x1#args:8 #syscall:16  0x%lx]\n", scId);
 251
 252        die_if_kernel("unknown trapa", regs, scId);
 253
 254        return -ENOSYS;
 255}
 256
 257void show_stack(struct task_struct *tsk, unsigned long *sp)
 258{
 259#ifdef CONFIG_KALLSYMS
 260        extern void sh64_unwind(struct pt_regs *regs);
 261        struct pt_regs *regs;
 262
 263        regs = tsk ? tsk->thread.kregs : NULL;
 264
 265        sh64_unwind(regs);
 266#else
 267        printk(KERN_ERR "Can't backtrace on sh64 without CONFIG_KALLSYMS\n");
 268#endif
 269}
 270
 271void show_task(unsigned long *sp)
 272{
 273        show_stack(NULL, sp);
 274}
 275
 276void dump_stack(void)
 277{
 278        show_task(NULL);
 279}
 280/* Needed by any user of WARN_ON in view of the defn in include/asm-sh/bug.h */
 281EXPORT_SYMBOL(dump_stack);
 282
 283static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
 284                unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk)
 285{
 286        show_excp_regs(fn_name, trapnr, signr, regs);
 287        tsk->thread.error_code = error_code;
 288        tsk->thread.trap_no = trapnr;
 289
 290        if (user_mode(regs))
 291                force_sig(signr, tsk);
 292
 293        die_if_no_fixup(str, regs, error_code);
 294}
 295
 296static int read_opcode(unsigned long long pc, unsigned long *result_opcode, int from_user_mode)
 297{
 298        int get_user_error;
 299        unsigned long aligned_pc;
 300        unsigned long opcode;
 301
 302        if ((pc & 3) == 1) {
 303                /* SHmedia */
 304                aligned_pc = pc & ~3;
 305                if (from_user_mode) {
 306                        if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
 307                                get_user_error = -EFAULT;
 308                        } else {
 309                                get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
 310                                *result_opcode = opcode;
 311                        }
 312                        return get_user_error;
 313                } else {
 314                        /* If the fault was in the kernel, we can either read
 315                         * this directly, or if not, we fault.
 316                        */
 317                        *result_opcode = *(unsigned long *) aligned_pc;
 318                        return 0;
 319                }
 320        } else if ((pc & 1) == 0) {
 321                /* SHcompact */
 322                /* TODO : provide handling for this.  We don't really support
 323                   user-mode SHcompact yet, and for a kernel fault, this would
 324                   have to come from a module built for SHcompact.  */
 325                return -EFAULT;
 326        } else {
 327                /* misaligned */
 328                return -EFAULT;
 329        }
 330}
 331
 332static int address_is_sign_extended(__u64 a)
 333{
 334        __u64 b;
 335#if (NEFF == 32)
 336        b = (__u64)(__s64)(__s32)(a & 0xffffffffUL);
 337        return (b == a) ? 1 : 0;
 338#else
 339#error "Sign extend check only works for NEFF==32"
 340#endif
 341}
 342
 343static int generate_and_check_address(struct pt_regs *regs,
 344                                      __u32 opcode,
 345                                      int displacement_not_indexed,
 346                                      int width_shift,
 347                                      __u64 *address)
 348{
 349        /* return -1 for fault, 0 for OK */
 350
 351        __u64 base_address, addr;
 352        int basereg;
 353
 354        basereg = (opcode >> 20) & 0x3f;
 355        base_address = regs->regs[basereg];
 356        if (displacement_not_indexed) {
 357                __s64 displacement;
 358                displacement = (opcode >> 10) & 0x3ff;
 359                displacement = ((displacement << 54) >> 54); /* sign extend */
 360                addr = (__u64)((__s64)base_address + (displacement << width_shift));
 361        } else {
 362                __u64 offset;
 363                int offsetreg;
 364                offsetreg = (opcode >> 10) & 0x3f;
 365                offset = regs->regs[offsetreg];
 366                addr = base_address + offset;
 367        }
 368
 369        /* Check sign extended */
 370        if (!address_is_sign_extended(addr)) {
 371                return -1;
 372        }
 373
 374        /* Check accessible.  For misaligned access in the kernel, assume the
 375           address is always accessible (and if not, just fault when the
 376           load/store gets done.) */
 377        if (user_mode(regs)) {
 378                if (addr >= TASK_SIZE) {
 379                        return -1;
 380                }
 381                /* Do access_ok check later - it depends on whether it's a load or a store. */
 382        }
 383
 384        *address = addr;
 385        return 0;
 386}
 387
 388static int user_mode_unaligned_fixup_count = 10;
 389static int user_mode_unaligned_fixup_enable = 1;
 390static int kernel_mode_unaligned_fixup_count = 32;
 391
 392static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result)
 393{
 394        unsigned short x;
 395        unsigned char *p, *q;
 396        p = (unsigned char *) (int) address;
 397        q = (unsigned char *) &x;
 398        q[0] = p[0];
 399        q[1] = p[1];
 400
 401        if (do_sign_extend) {
 402                *result = (__u64)(__s64) *(short *) &x;
 403        } else {
 404                *result = (__u64) x;
 405        }
 406}
 407
 408static void misaligned_kernel_word_store(__u64 address, __u64 value)
 409{
 410        unsigned short x;
 411        unsigned char *p, *q;
 412        p = (unsigned char *) (int) address;
 413        q = (unsigned char *) &x;
 414
 415        x = (__u16) value;
 416        p[0] = q[0];
 417        p[1] = q[1];
 418}
 419
 420static int misaligned_load(struct pt_regs *regs,
 421                           __u32 opcode,
 422                           int displacement_not_indexed,
 423                           int width_shift,
 424                           int do_sign_extend)
 425{
 426        /* Return -1 for a fault, 0 for OK */
 427        int error;
 428        int destreg;
 429        __u64 address;
 430
 431        error = generate_and_check_address(regs, opcode,
 432                        displacement_not_indexed, width_shift, &address);
 433        if (error < 0) {
 434                return error;
 435        }
 436
 437        perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, address);
 438
 439        destreg = (opcode >> 4) & 0x3f;
 440        if (user_mode(regs)) {
 441                __u64 buffer;
 442
 443                if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
 444                        return -1;
 445                }
 446
 447                if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
 448                        return -1; /* fault */
 449                }
 450                switch (width_shift) {
 451                case 1:
 452                        if (do_sign_extend) {
 453                                regs->regs[destreg] = (__u64)(__s64) *(__s16 *) &buffer;
 454                        } else {
 455                                regs->regs[destreg] = (__u64) *(__u16 *) &buffer;
 456                        }
 457                        break;
 458                case 2:
 459                        regs->regs[destreg] = (__u64)(__s64) *(__s32 *) &buffer;
 460                        break;
 461                case 3:
 462                        regs->regs[destreg] = buffer;
 463                        break;
 464                default:
 465                        printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
 466                                width_shift, (unsigned long) regs->pc);
 467                        break;
 468                }
 469        } else {
 470                /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
 471                __u64 lo, hi;
 472
 473                switch (width_shift) {
 474                case 1:
 475                        misaligned_kernel_word_load(address, do_sign_extend, &regs->regs[destreg]);
 476                        break;
 477                case 2:
 478                        asm ("ldlo.l %1, 0, %0" : "=r" (lo) : "r" (address));
 479                        asm ("ldhi.l %1, 3, %0" : "=r" (hi) : "r" (address));
 480                        regs->regs[destreg] = lo | hi;
 481                        break;
 482                case 3:
 483                        asm ("ldlo.q %1, 0, %0" : "=r" (lo) : "r" (address));
 484                        asm ("ldhi.q %1, 7, %0" : "=r" (hi) : "r" (address));
 485                        regs->regs[destreg] = lo | hi;
 486                        break;
 487
 488                default:
 489                        printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
 490                                width_shift, (unsigned long) regs->pc);
 491                        break;
 492                }
 493        }
 494
 495        return 0;
 496
 497}
 498
 499static int misaligned_store(struct pt_regs *regs,
 500                            __u32 opcode,
 501                            int displacement_not_indexed,
 502                            int width_shift)
 503{
 504        /* Return -1 for a fault, 0 for OK */
 505        int error;
 506        int srcreg;
 507        __u64 address;
 508
 509        error = generate_and_check_address(regs, opcode,
 510                        displacement_not_indexed, width_shift, &address);
 511        if (error < 0) {
 512                return error;
 513        }
 514
 515        perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, address);
 516
 517        srcreg = (opcode >> 4) & 0x3f;
 518        if (user_mode(regs)) {
 519                __u64 buffer;
 520
 521                if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
 522                        return -1;
 523                }
 524
 525                switch (width_shift) {
 526                case 1:
 527                        *(__u16 *) &buffer = (__u16) regs->regs[srcreg];
 528                        break;
 529                case 2:
 530                        *(__u32 *) &buffer = (__u32) regs->regs[srcreg];
 531                        break;
 532                case 3:
 533                        buffer = regs->regs[srcreg];
 534                        break;
 535                default:
 536                        printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
 537                                width_shift, (unsigned long) regs->pc);
 538                        break;
 539                }
 540
 541                if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
 542                        return -1; /* fault */
 543                }
 544        } else {
 545                /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
 546                __u64 val = regs->regs[srcreg];
 547
 548                switch (width_shift) {
 549                case 1:
 550                        misaligned_kernel_word_store(address, val);
 551                        break;
 552                case 2:
 553                        asm ("stlo.l %1, 0, %0" : : "r" (val), "r" (address));
 554                        asm ("sthi.l %1, 3, %0" : : "r" (val), "r" (address));
 555                        break;
 556                case 3:
 557                        asm ("stlo.q %1, 0, %0" : : "r" (val), "r" (address));
 558                        asm ("sthi.q %1, 7, %0" : : "r" (val), "r" (address));
 559                        break;
 560
 561                default:
 562                        printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
 563                                width_shift, (unsigned long) regs->pc);
 564                        break;
 565                }
 566        }
 567
 568        return 0;
 569
 570}
 571
 572/* Never need to fix up misaligned FPU accesses within the kernel since that's a real
 573   error. */
 574static int misaligned_fpu_load(struct pt_regs *regs,
 575                           __u32 opcode,
 576                           int displacement_not_indexed,
 577                           int width_shift,
 578                           int do_paired_load)
 579{
 580        /* Return -1 for a fault, 0 for OK */
 581        int error;
 582        int destreg;
 583        __u64 address;
 584
 585        error = generate_and_check_address(regs, opcode,
 586                        displacement_not_indexed, width_shift, &address);
 587        if (error < 0) {
 588                return error;
 589        }
 590
 591        perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, address);
 592
 593        destreg = (opcode >> 4) & 0x3f;
 594        if (user_mode(regs)) {
 595                __u64 buffer;
 596                __u32 buflo, bufhi;
 597
 598                if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
 599                        return -1;
 600                }
 601
 602                if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
 603                        return -1; /* fault */
 604                }
 605                /* 'current' may be the current owner of the FPU state, so
 606                   context switch the registers into memory so they can be
 607                   indexed by register number. */
 608                if (last_task_used_math == current) {
 609                        enable_fpu();
 610                        save_fpu(current);
 611                        disable_fpu();
 612                        last_task_used_math = NULL;
 613                        regs->sr |= SR_FD;
 614                }
 615
 616                buflo = *(__u32*) &buffer;
 617                bufhi = *(1 + (__u32*) &buffer);
 618
 619                switch (width_shift) {
 620                case 2:
 621                        current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
 622                        break;
 623                case 3:
 624                        if (do_paired_load) {
 625                                current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
 626                                current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
 627                        } else {
 628#if defined(CONFIG_CPU_LITTLE_ENDIAN)
 629                                current->thread.xstate->hardfpu.fp_regs[destreg] = bufhi;
 630                                current->thread.xstate->hardfpu.fp_regs[destreg+1] = buflo;
 631#else
 632                                current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
 633                                current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
 634#endif
 635                        }
 636                        break;
 637                default:
 638                        printk("Unexpected width_shift %d in misaligned_fpu_load, PC=%08lx\n",
 639                                width_shift, (unsigned long) regs->pc);
 640                        break;
 641                }
 642                return 0;
 643        } else {
 644                die ("Misaligned FPU load inside kernel", regs, 0);
 645                return -1;
 646        }
 647
 648
 649}
 650
 651static int misaligned_fpu_store(struct pt_regs *regs,
 652                           __u32 opcode,
 653                           int displacement_not_indexed,
 654                           int width_shift,
 655                           int do_paired_load)
 656{
 657        /* Return -1 for a fault, 0 for OK */
 658        int error;
 659        int srcreg;
 660        __u64 address;
 661
 662        error = generate_and_check_address(regs, opcode,
 663                        displacement_not_indexed, width_shift, &address);
 664        if (error < 0) {
 665                return error;
 666        }
 667
 668        perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, address);
 669
 670        srcreg = (opcode >> 4) & 0x3f;
 671        if (user_mode(regs)) {
 672                __u64 buffer;
 673                /* Initialise these to NaNs. */
 674                __u32 buflo=0xffffffffUL, bufhi=0xffffffffUL;
 675
 676                if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
 677                        return -1;
 678                }
 679
 680                /* 'current' may be the current owner of the FPU state, so
 681                   context switch the registers into memory so they can be
 682                   indexed by register number. */
 683                if (last_task_used_math == current) {
 684                        enable_fpu();
 685                        save_fpu(current);
 686                        disable_fpu();
 687                        last_task_used_math = NULL;
 688                        regs->sr |= SR_FD;
 689                }
 690
 691                switch (width_shift) {
 692                case 2:
 693                        buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
 694                        break;
 695                case 3:
 696                        if (do_paired_load) {
 697                                buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
 698                                bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
 699                        } else {
 700#if defined(CONFIG_CPU_LITTLE_ENDIAN)
 701                                bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg];
 702                                buflo = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
 703#else
 704                                buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
 705                                bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
 706#endif
 707                        }
 708                        break;
 709                default:
 710                        printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n",
 711                                width_shift, (unsigned long) regs->pc);
 712                        break;
 713                }
 714
 715                *(__u32*) &buffer = buflo;
 716                *(1 + (__u32*) &buffer) = bufhi;
 717                if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
 718                        return -1; /* fault */
 719                }
 720                return 0;
 721        } else {
 722                die ("Misaligned FPU load inside kernel", regs, 0);
 723                return -1;
 724        }
 725}
 726
 727static int misaligned_fixup(struct pt_regs *regs)
 728{
 729        unsigned long opcode;
 730        int error;
 731        int major, minor;
 732
 733        if (!user_mode_unaligned_fixup_enable)
 734                return -1;
 735
 736        error = read_opcode(regs->pc, &opcode, user_mode(regs));
 737        if (error < 0) {
 738                return error;
 739        }
 740        major = (opcode >> 26) & 0x3f;
 741        minor = (opcode >> 16) & 0xf;
 742
 743        if (user_mode(regs) && (user_mode_unaligned_fixup_count > 0)) {
 744                --user_mode_unaligned_fixup_count;
 745                /* Only do 'count' worth of these reports, to remove a potential DoS against syslog */
 746                printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
 747                       current->comm, task_pid_nr(current), (__u32)regs->pc, opcode);
 748        } else if (!user_mode(regs) && (kernel_mode_unaligned_fixup_count > 0)) {
 749                --kernel_mode_unaligned_fixup_count;
 750                if (in_interrupt()) {
 751                        printk("Fixing up unaligned kernelspace access in interrupt pc=0x%08x ins=0x%08lx\n",
 752                               (__u32)regs->pc, opcode);
 753                } else {
 754                        printk("Fixing up unaligned kernelspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
 755                               current->comm, task_pid_nr(current), (__u32)regs->pc, opcode);
 756                }
 757        }
 758
 759
 760        switch (major) {
 761                case (0x84>>2): /* LD.W */
 762                        error = misaligned_load(regs, opcode, 1, 1, 1);
 763                        break;
 764                case (0xb0>>2): /* LD.UW */
 765                        error = misaligned_load(regs, opcode, 1, 1, 0);
 766                        break;
 767                case (0x88>>2): /* LD.L */
 768                        error = misaligned_load(regs, opcode, 1, 2, 1);
 769                        break;
 770                case (0x8c>>2): /* LD.Q */
 771                        error = misaligned_load(regs, opcode, 1, 3, 0);
 772                        break;
 773
 774                case (0xa4>>2): /* ST.W */
 775                        error = misaligned_store(regs, opcode, 1, 1);
 776                        break;
 777                case (0xa8>>2): /* ST.L */
 778                        error = misaligned_store(regs, opcode, 1, 2);
 779                        break;
 780                case (0xac>>2): /* ST.Q */
 781                        error = misaligned_store(regs, opcode, 1, 3);
 782                        break;
 783
 784                case (0x40>>2): /* indexed loads */
 785                        switch (minor) {
 786                                case 0x1: /* LDX.W */
 787                                        error = misaligned_load(regs, opcode, 0, 1, 1);
 788                                        break;
 789                                case 0x5: /* LDX.UW */
 790                                        error = misaligned_load(regs, opcode, 0, 1, 0);
 791                                        break;
 792                                case 0x2: /* LDX.L */
 793                                        error = misaligned_load(regs, opcode, 0, 2, 1);
 794                                        break;
 795                                case 0x3: /* LDX.Q */
 796                                        error = misaligned_load(regs, opcode, 0, 3, 0);
 797                                        break;
 798                                default:
 799                                        error = -1;
 800                                        break;
 801                        }
 802                        break;
 803
 804                case (0x60>>2): /* indexed stores */
 805                        switch (minor) {
 806                                case 0x1: /* STX.W */
 807                                        error = misaligned_store(regs, opcode, 0, 1);
 808                                        break;
 809                                case 0x2: /* STX.L */
 810                                        error = misaligned_store(regs, opcode, 0, 2);
 811                                        break;
 812                                case 0x3: /* STX.Q */
 813                                        error = misaligned_store(regs, opcode, 0, 3);
 814                                        break;
 815                                default:
 816                                        error = -1;
 817                                        break;
 818                        }
 819                        break;
 820
 821                case (0x94>>2): /* FLD.S */
 822                        error = misaligned_fpu_load(regs, opcode, 1, 2, 0);
 823                        break;
 824                case (0x98>>2): /* FLD.P */
 825                        error = misaligned_fpu_load(regs, opcode, 1, 3, 1);
 826                        break;
 827                case (0x9c>>2): /* FLD.D */
 828                        error = misaligned_fpu_load(regs, opcode, 1, 3, 0);
 829                        break;
 830                case (0x1c>>2): /* floating indexed loads */
 831                        switch (minor) {
 832                        case 0x8: /* FLDX.S */
 833                                error = misaligned_fpu_load(regs, opcode, 0, 2, 0);
 834                                break;
 835                        case 0xd: /* FLDX.P */
 836                                error = misaligned_fpu_load(regs, opcode, 0, 3, 1);
 837                                break;
 838                        case 0x9: /* FLDX.D */
 839                                error = misaligned_fpu_load(regs, opcode, 0, 3, 0);
 840                                break;
 841                        default:
 842                                error = -1;
 843                                break;
 844                        }
 845                        break;
 846                case (0xb4>>2): /* FLD.S */
 847                        error = misaligned_fpu_store(regs, opcode, 1, 2, 0);
 848                        break;
 849                case (0xb8>>2): /* FLD.P */
 850                        error = misaligned_fpu_store(regs, opcode, 1, 3, 1);
 851                        break;
 852                case (0xbc>>2): /* FLD.D */
 853                        error = misaligned_fpu_store(regs, opcode, 1, 3, 0);
 854                        break;
 855                case (0x3c>>2): /* floating indexed stores */
 856                        switch (minor) {
 857                        case 0x8: /* FSTX.S */
 858                                error = misaligned_fpu_store(regs, opcode, 0, 2, 0);
 859                                break;
 860                        case 0xd: /* FSTX.P */
 861                                error = misaligned_fpu_store(regs, opcode, 0, 3, 1);
 862                                break;
 863                        case 0x9: /* FSTX.D */
 864                                error = misaligned_fpu_store(regs, opcode, 0, 3, 0);
 865                                break;
 866                        default:
 867                                error = -1;
 868                                break;
 869                        }
 870                        break;
 871
 872                default:
 873                        /* Fault */
 874                        error = -1;
 875                        break;
 876        }
 877
 878        if (error < 0) {
 879                return error;
 880        } else {
 881                regs->pc += 4; /* Skip the instruction that's just been emulated */
 882                return 0;
 883        }
 884
 885}
 886
 887static ctl_table unaligned_table[] = {
 888        {
 889                .procname       = "kernel_reports",
 890                .data           = &kernel_mode_unaligned_fixup_count,
 891                .maxlen         = sizeof(int),
 892                .mode           = 0644,
 893                .proc_handler   = proc_dointvec
 894        },
 895        {
 896                .procname       = "user_reports",
 897                .data           = &user_mode_unaligned_fixup_count,
 898                .maxlen         = sizeof(int),
 899                .mode           = 0644,
 900                .proc_handler   = proc_dointvec
 901        },
 902        {
 903                .procname       = "user_enable",
 904                .data           = &user_mode_unaligned_fixup_enable,
 905                .maxlen         = sizeof(int),
 906                .mode           = 0644,
 907                .proc_handler   = proc_dointvec},
 908        {}
 909};
 910
 911static ctl_table unaligned_root[] = {
 912        {
 913                .procname       = "unaligned_fixup",
 914                .mode           = 0555,
 915                .child          = unaligned_table
 916        },
 917        {}
 918};
 919
 920static ctl_table sh64_root[] = {
 921        {
 922                .procname       = "sh64",
 923                .mode           = 0555,
 924                .child          = unaligned_root
 925        },
 926        {}
 927};
 928static struct ctl_table_header *sysctl_header;
 929static int __init init_sysctl(void)
 930{
 931        sysctl_header = register_sysctl_table(sh64_root);
 932        return 0;
 933}
 934
 935__initcall(init_sysctl);
 936
 937
 938asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs)
 939{
 940        u64 peek_real_address_q(u64 addr);
 941        u64 poke_real_address_q(u64 addr, u64 val);
 942        unsigned long long DM_EXP_CAUSE_PHY = 0x0c100010;
 943        unsigned long long exp_cause;
 944        /* It's not worth ioremapping the debug module registers for the amount
 945           of access we make to them - just go direct to their physical
 946           addresses. */
 947        exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY);
 948        if (exp_cause & ~4) {
 949                printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n",
 950                        (unsigned long)(exp_cause & 0xffffffff));
 951        }
 952        show_state();
 953        /* Clear all DEBUGINT causes */
 954        poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0);
 955}
 956
 957void __cpuinit per_cpu_trap_init(void)
 958{
 959        /* Nothing to do for now, VBR initialization later. */
 960}
 961