1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/string.h>
21#include <linux/errno.h>
22#include <linux/ptrace.h>
23#include <linux/timer.h>
24#include <linux/mm.h>
25#include <linux/smp.h>
26#include <linux/init.h>
27#include <linux/delay.h>
28#include <linux/spinlock.h>
29#include <linux/kallsyms.h>
30#include <linux/interrupt.h>
31#include <linux/sysctl.h>
32#include <linux/module.h>
33#include <asm/system.h>
34#include <asm/uaccess.h>
35#include <asm/io.h>
36#include <asm/atomic.h>
37#include <asm/processor.h>
38#include <asm/pgtable.h>
39
40#undef DEBUG_EXCEPTION
41#ifdef DEBUG_EXCEPTION
42
43extern void show_excp_regs(char *fname, int trapnr, int signr,
44 struct pt_regs *regs);
45#else
46#define show_excp_regs(a, b, c, d)
47#endif
48
49static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
50 unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk);
51
52#define DO_ERROR(trapnr, signr, str, name, tsk) \
53asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \
54{ \
55 do_unhandled_exception(trapnr, signr, str, __stringify(name), error_code, regs, current); \
56}
57
58spinlock_t die_lock;
59
60void die(const char * str, struct pt_regs * regs, long err)
61{
62 console_verbose();
63 spin_lock_irq(&die_lock);
64 printk("%s: %lx\n", str, (err & 0xffffff));
65 show_regs(regs);
66 spin_unlock_irq(&die_lock);
67 do_exit(SIGSEGV);
68}
69
70static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
71{
72 if (!user_mode(regs))
73 die(str, regs, err);
74}
75
76static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
77{
78 if (!user_mode(regs)) {
79 const struct exception_table_entry *fixup;
80 fixup = search_exception_tables(regs->pc);
81 if (fixup) {
82 regs->pc = fixup->fixup;
83 return;
84 }
85 die(str, regs, err);
86 }
87}
88
89DO_ERROR(13, SIGILL, "illegal slot instruction", illegal_slot_inst, current)
90DO_ERROR(87, SIGSEGV, "address error (exec)", address_error_exec, current)
91
92
93
94
95
96
97static int misaligned_fixup(struct pt_regs *regs);
98
99asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs)
100{
101 if (misaligned_fixup(regs) < 0) {
102 do_unhandled_exception(7, SIGSEGV, "address error(load)",
103 "do_address_error_load",
104 error_code, regs, current);
105 }
106 return;
107}
108
109asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs)
110{
111 if (misaligned_fixup(regs) < 0) {
112 do_unhandled_exception(8, SIGSEGV, "address error(store)",
113 "do_address_error_store",
114 error_code, regs, current);
115 }
116 return;
117}
118
119#if defined(CONFIG_SH64_ID2815_WORKAROUND)
120
121#define OPCODE_INVALID 0
122#define OPCODE_USER_VALID 1
123#define OPCODE_PRIV_VALID 2
124
125
126#define OPCODE_CTRL_REG 3
127
128
129
130
131
132
133static unsigned long shmedia_opcode_table[64] = {
134 0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015,
135 0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000,
136 0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000,
137 0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000,
138 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
139 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
140 0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
141 0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000
142};
143
144void do_reserved_inst(unsigned long error_code, struct pt_regs *regs)
145{
146
147
148
149
150
151 unsigned long opcode = 0x6ff4fff0;
152 unsigned long pc, aligned_pc;
153 int get_user_error;
154 int trapnr = 12;
155 int signr = SIGILL;
156 char *exception_name = "reserved_instruction";
157
158 pc = regs->pc;
159 if ((pc & 3) == 1) {
160
161
162 aligned_pc = pc & ~3;
163 if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
164 get_user_error = -EFAULT;
165 } else {
166 get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
167 }
168 if (get_user_error >= 0) {
169 unsigned long index, shift;
170 unsigned long major, minor, combined;
171 unsigned long reserved_field;
172 reserved_field = opcode & 0xf;
173 major = (opcode >> 26) & 0x3f;
174 minor = (opcode >> 16) & 0xf;
175 combined = (major << 4) | minor;
176 index = major;
177 shift = minor << 1;
178 if (reserved_field == 0) {
179 int opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3;
180 switch (opcode_state) {
181 case OPCODE_INVALID:
182
183 break;
184 case OPCODE_USER_VALID:
185
186
187 return;
188 case OPCODE_PRIV_VALID:
189 if (!user_mode(regs)) {
190
191
192 return;
193 }
194
195
196 break;
197 case OPCODE_CTRL_REG:
198
199 if (!user_mode(regs)) return;
200
201 if (combined == 0x9f) {
202 unsigned long regno = (opcode >> 20) & 0x3f;
203 if (regno >= 62) {
204 return;
205 }
206
207 } else if (combined == 0x1bf) {
208 unsigned long regno = (opcode >> 4) & 0x3f;
209 if (regno >= 62) {
210 return;
211 }
212
213 } else {
214
215 }
216 break;
217 default:
218
219 break;
220 }
221 }
222
223 } else {
224
225
226
227 trapnr = 87;
228 exception_name = "address error (exec)";
229 signr = SIGSEGV;
230 }
231 }
232
233 do_unhandled_exception(trapnr, signr, exception_name, "do_reserved_inst", error_code, regs, current);
234}
235
236#else
237
238
239
240DO_ERROR(12, SIGILL, "reserved instruction", reserved_inst, current)
241
242#endif
243
244
245asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs)
246{
247 PLS();
248 show_excp_regs(__FUNCTION__, -1, -1, regs);
249 die_if_kernel("exception", regs, ex);
250}
251
252int do_unknown_trapa(unsigned long scId, struct pt_regs *regs)
253{
254
255 printk("System call ID error: [0x1#args:8 #syscall:16 0x%lx]\n", scId);
256
257 die_if_kernel("unknown trapa", regs, scId);
258
259 return -ENOSYS;
260}
261
262void show_stack(struct task_struct *tsk, unsigned long *sp)
263{
264#ifdef CONFIG_KALLSYMS
265 extern void sh64_unwind(struct pt_regs *regs);
266 struct pt_regs *regs;
267
268 regs = tsk ? tsk->thread.kregs : NULL;
269
270 sh64_unwind(regs);
271#else
272 printk(KERN_ERR "Can't backtrace on sh64 without CONFIG_KALLSYMS\n");
273#endif
274}
275
276void show_task(unsigned long *sp)
277{
278 show_stack(NULL, sp);
279}
280
281void dump_stack(void)
282{
283 show_task(NULL);
284}
285
286EXPORT_SYMBOL(dump_stack);
287
288static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
289 unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk)
290{
291 show_excp_regs(fn_name, trapnr, signr, regs);
292 tsk->thread.error_code = error_code;
293 tsk->thread.trap_no = trapnr;
294
295 if (user_mode(regs))
296 force_sig(signr, tsk);
297
298 die_if_no_fixup(str, regs, error_code);
299}
300
301static int read_opcode(unsigned long long pc, unsigned long *result_opcode, int from_user_mode)
302{
303 int get_user_error;
304 unsigned long aligned_pc;
305 unsigned long opcode;
306
307 if ((pc & 3) == 1) {
308
309 aligned_pc = pc & ~3;
310 if (from_user_mode) {
311 if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
312 get_user_error = -EFAULT;
313 } else {
314 get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
315 *result_opcode = opcode;
316 }
317 return get_user_error;
318 } else {
319
320
321
322 *result_opcode = *(unsigned long *) aligned_pc;
323 return 0;
324 }
325 } else if ((pc & 1) == 0) {
326
327
328
329
330 return -EFAULT;
331 } else {
332
333 return -EFAULT;
334 }
335}
336
337static int address_is_sign_extended(__u64 a)
338{
339 __u64 b;
340#if (NEFF == 32)
341 b = (__u64)(__s64)(__s32)(a & 0xffffffffUL);
342 return (b == a) ? 1 : 0;
343#else
344#error "Sign extend check only works for NEFF==32"
345#endif
346}
347
348static int generate_and_check_address(struct pt_regs *regs,
349 __u32 opcode,
350 int displacement_not_indexed,
351 int width_shift,
352 __u64 *address)
353{
354
355
356 __u64 base_address, addr;
357 int basereg;
358
359 basereg = (opcode >> 20) & 0x3f;
360 base_address = regs->regs[basereg];
361 if (displacement_not_indexed) {
362 __s64 displacement;
363 displacement = (opcode >> 10) & 0x3ff;
364 displacement = ((displacement << 54) >> 54);
365 addr = (__u64)((__s64)base_address + (displacement << width_shift));
366 } else {
367 __u64 offset;
368 int offsetreg;
369 offsetreg = (opcode >> 10) & 0x3f;
370 offset = regs->regs[offsetreg];
371 addr = base_address + offset;
372 }
373
374
375 if (!address_is_sign_extended(addr)) {
376 return -1;
377 }
378
379#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
380
381
382
383 if (user_mode(regs)) {
384 if (addr >= TASK_SIZE) {
385 return -1;
386 }
387
388 }
389#endif
390
391 *address = addr;
392 return 0;
393}
394
395
396#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
397static int user_mode_unaligned_fixup_count = 10;
398static int user_mode_unaligned_fixup_enable = 1;
399#endif
400
401static int kernel_mode_unaligned_fixup_count = 32;
402
403static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result)
404{
405 unsigned short x;
406 unsigned char *p, *q;
407 p = (unsigned char *) (int) address;
408 q = (unsigned char *) &x;
409 q[0] = p[0];
410 q[1] = p[1];
411
412 if (do_sign_extend) {
413 *result = (__u64)(__s64) *(short *) &x;
414 } else {
415 *result = (__u64) x;
416 }
417}
418
419static void misaligned_kernel_word_store(__u64 address, __u64 value)
420{
421 unsigned short x;
422 unsigned char *p, *q;
423 p = (unsigned char *) (int) address;
424 q = (unsigned char *) &x;
425
426 x = (__u16) value;
427 p[0] = q[0];
428 p[1] = q[1];
429}
430
431static int misaligned_load(struct pt_regs *regs,
432 __u32 opcode,
433 int displacement_not_indexed,
434 int width_shift,
435 int do_sign_extend)
436{
437
438 int error;
439 int destreg;
440 __u64 address;
441
442 error = generate_and_check_address(regs, opcode,
443 displacement_not_indexed, width_shift, &address);
444 if (error < 0) {
445 return error;
446 }
447
448 destreg = (opcode >> 4) & 0x3f;
449#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
450 if (user_mode(regs)) {
451 __u64 buffer;
452
453 if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
454 return -1;
455 }
456
457 if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
458 return -1;
459 }
460 switch (width_shift) {
461 case 1:
462 if (do_sign_extend) {
463 regs->regs[destreg] = (__u64)(__s64) *(__s16 *) &buffer;
464 } else {
465 regs->regs[destreg] = (__u64) *(__u16 *) &buffer;
466 }
467 break;
468 case 2:
469 regs->regs[destreg] = (__u64)(__s64) *(__s32 *) &buffer;
470 break;
471 case 3:
472 regs->regs[destreg] = buffer;
473 break;
474 default:
475 printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
476 width_shift, (unsigned long) regs->pc);
477 break;
478 }
479 } else
480#endif
481 {
482
483 __u64 lo, hi;
484
485 switch (width_shift) {
486 case 1:
487 misaligned_kernel_word_load(address, do_sign_extend, ®s->regs[destreg]);
488 break;
489 case 2:
490 asm ("ldlo.l %1, 0, %0" : "=r" (lo) : "r" (address));
491 asm ("ldhi.l %1, 3, %0" : "=r" (hi) : "r" (address));
492 regs->regs[destreg] = lo | hi;
493 break;
494 case 3:
495 asm ("ldlo.q %1, 0, %0" : "=r" (lo) : "r" (address));
496 asm ("ldhi.q %1, 7, %0" : "=r" (hi) : "r" (address));
497 regs->regs[destreg] = lo | hi;
498 break;
499
500 default:
501 printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
502 width_shift, (unsigned long) regs->pc);
503 break;
504 }
505 }
506
507 return 0;
508
509}
510
511static int misaligned_store(struct pt_regs *regs,
512 __u32 opcode,
513 int displacement_not_indexed,
514 int width_shift)
515{
516
517 int error;
518 int srcreg;
519 __u64 address;
520
521 error = generate_and_check_address(regs, opcode,
522 displacement_not_indexed, width_shift, &address);
523 if (error < 0) {
524 return error;
525 }
526
527 srcreg = (opcode >> 4) & 0x3f;
528#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
529 if (user_mode(regs)) {
530 __u64 buffer;
531
532 if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
533 return -1;
534 }
535
536 switch (width_shift) {
537 case 1:
538 *(__u16 *) &buffer = (__u16) regs->regs[srcreg];
539 break;
540 case 2:
541 *(__u32 *) &buffer = (__u32) regs->regs[srcreg];
542 break;
543 case 3:
544 buffer = regs->regs[srcreg];
545 break;
546 default:
547 printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
548 width_shift, (unsigned long) regs->pc);
549 break;
550 }
551
552 if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
553 return -1;
554 }
555 } else
556#endif
557 {
558
559 __u64 val = regs->regs[srcreg];
560
561 switch (width_shift) {
562 case 1:
563 misaligned_kernel_word_store(address, val);
564 break;
565 case 2:
566 asm ("stlo.l %1, 0, %0" : : "r" (val), "r" (address));
567 asm ("sthi.l %1, 3, %0" : : "r" (val), "r" (address));
568 break;
569 case 3:
570 asm ("stlo.q %1, 0, %0" : : "r" (val), "r" (address));
571 asm ("sthi.q %1, 7, %0" : : "r" (val), "r" (address));
572 break;
573
574 default:
575 printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
576 width_shift, (unsigned long) regs->pc);
577 break;
578 }
579 }
580
581 return 0;
582
583}
584
585#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
586
587
588static int misaligned_fpu_load(struct pt_regs *regs,
589 __u32 opcode,
590 int displacement_not_indexed,
591 int width_shift,
592 int do_paired_load)
593{
594
595 int error;
596 int destreg;
597 __u64 address;
598
599 error = generate_and_check_address(regs, opcode,
600 displacement_not_indexed, width_shift, &address);
601 if (error < 0) {
602 return error;
603 }
604
605 destreg = (opcode >> 4) & 0x3f;
606 if (user_mode(regs)) {
607 __u64 buffer;
608 __u32 buflo, bufhi;
609
610 if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
611 return -1;
612 }
613
614 if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
615 return -1;
616 }
617
618
619
620 if (last_task_used_math == current) {
621 grab_fpu();
622 fpsave(¤t->thread.fpu.hard);
623 release_fpu();
624 last_task_used_math = NULL;
625 regs->sr |= SR_FD;
626 }
627
628 buflo = *(__u32*) &buffer;
629 bufhi = *(1 + (__u32*) &buffer);
630
631 switch (width_shift) {
632 case 2:
633 current->thread.fpu.hard.fp_regs[destreg] = buflo;
634 break;
635 case 3:
636 if (do_paired_load) {
637 current->thread.fpu.hard.fp_regs[destreg] = buflo;
638 current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
639 } else {
640#if defined(CONFIG_LITTLE_ENDIAN)
641 current->thread.fpu.hard.fp_regs[destreg] = bufhi;
642 current->thread.fpu.hard.fp_regs[destreg+1] = buflo;
643#else
644 current->thread.fpu.hard.fp_regs[destreg] = buflo;
645 current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
646#endif
647 }
648 break;
649 default:
650 printk("Unexpected width_shift %d in misaligned_fpu_load, PC=%08lx\n",
651 width_shift, (unsigned long) regs->pc);
652 break;
653 }
654 return 0;
655 } else {
656 die ("Misaligned FPU load inside kernel", regs, 0);
657 return -1;
658 }
659
660
661}
662
663static int misaligned_fpu_store(struct pt_regs *regs,
664 __u32 opcode,
665 int displacement_not_indexed,
666 int width_shift,
667 int do_paired_load)
668{
669
670 int error;
671 int srcreg;
672 __u64 address;
673
674 error = generate_and_check_address(regs, opcode,
675 displacement_not_indexed, width_shift, &address);
676 if (error < 0) {
677 return error;
678 }
679
680 srcreg = (opcode >> 4) & 0x3f;
681 if (user_mode(regs)) {
682 __u64 buffer;
683
684 __u32 buflo=0xffffffffUL, bufhi=0xffffffffUL;
685
686 if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
687 return -1;
688 }
689
690
691
692
693 if (last_task_used_math == current) {
694 grab_fpu();
695 fpsave(¤t->thread.fpu.hard);
696 release_fpu();
697 last_task_used_math = NULL;
698 regs->sr |= SR_FD;
699 }
700
701 switch (width_shift) {
702 case 2:
703 buflo = current->thread.fpu.hard.fp_regs[srcreg];
704 break;
705 case 3:
706 if (do_paired_load) {
707 buflo = current->thread.fpu.hard.fp_regs[srcreg];
708 bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
709 } else {
710#if defined(CONFIG_LITTLE_ENDIAN)
711 bufhi = current->thread.fpu.hard.fp_regs[srcreg];
712 buflo = current->thread.fpu.hard.fp_regs[srcreg+1];
713#else
714 buflo = current->thread.fpu.hard.fp_regs[srcreg];
715 bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
716#endif
717 }
718 break;
719 default:
720 printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n",
721 width_shift, (unsigned long) regs->pc);
722 break;
723 }
724
725 *(__u32*) &buffer = buflo;
726 *(1 + (__u32*) &buffer) = bufhi;
727 if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
728 return -1;
729 }
730 return 0;
731 } else {
732 die ("Misaligned FPU load inside kernel", regs, 0);
733 return -1;
734 }
735}
736#endif
737
738static int misaligned_fixup(struct pt_regs *regs)
739{
740 unsigned long opcode;
741 int error;
742 int major, minor;
743
744#if !defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
745
746 return -1;
747#else
748 if (!user_mode_unaligned_fixup_enable) return -1;
749#endif
750
751 error = read_opcode(regs->pc, &opcode, user_mode(regs));
752 if (error < 0) {
753 return error;
754 }
755 major = (opcode >> 26) & 0x3f;
756 minor = (opcode >> 16) & 0xf;
757
758#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
759 if (user_mode(regs) && (user_mode_unaligned_fixup_count > 0)) {
760 --user_mode_unaligned_fixup_count;
761
762 printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
763 current->comm, task_pid_nr(current), (__u32)regs->pc, opcode);
764 } else
765#endif
766 if (!user_mode(regs) && (kernel_mode_unaligned_fixup_count > 0)) {
767 --kernel_mode_unaligned_fixup_count;
768 if (in_interrupt()) {
769 printk("Fixing up unaligned kernelspace access in interrupt pc=0x%08x ins=0x%08lx\n",
770 (__u32)regs->pc, opcode);
771 } else {
772 printk("Fixing up unaligned kernelspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
773 current->comm, task_pid_nr(current), (__u32)regs->pc, opcode);
774 }
775 }
776
777
778 switch (major) {
779 case (0x84>>2):
780 error = misaligned_load(regs, opcode, 1, 1, 1);
781 break;
782 case (0xb0>>2):
783 error = misaligned_load(regs, opcode, 1, 1, 0);
784 break;
785 case (0x88>>2):
786 error = misaligned_load(regs, opcode, 1, 2, 1);
787 break;
788 case (0x8c>>2):
789 error = misaligned_load(regs, opcode, 1, 3, 0);
790 break;
791
792 case (0xa4>>2):
793 error = misaligned_store(regs, opcode, 1, 1);
794 break;
795 case (0xa8>>2):
796 error = misaligned_store(regs, opcode, 1, 2);
797 break;
798 case (0xac>>2):
799 error = misaligned_store(regs, opcode, 1, 3);
800 break;
801
802 case (0x40>>2):
803 switch (minor) {
804 case 0x1:
805 error = misaligned_load(regs, opcode, 0, 1, 1);
806 break;
807 case 0x5:
808 error = misaligned_load(regs, opcode, 0, 1, 0);
809 break;
810 case 0x2:
811 error = misaligned_load(regs, opcode, 0, 2, 1);
812 break;
813 case 0x3:
814 error = misaligned_load(regs, opcode, 0, 3, 0);
815 break;
816 default:
817 error = -1;
818 break;
819 }
820 break;
821
822 case (0x60>>2):
823 switch (minor) {
824 case 0x1:
825 error = misaligned_store(regs, opcode, 0, 1);
826 break;
827 case 0x2:
828 error = misaligned_store(regs, opcode, 0, 2);
829 break;
830 case 0x3:
831 error = misaligned_store(regs, opcode, 0, 3);
832 break;
833 default:
834 error = -1;
835 break;
836 }
837 break;
838
839#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
840 case (0x94>>2):
841 error = misaligned_fpu_load(regs, opcode, 1, 2, 0);
842 break;
843 case (0x98>>2):
844 error = misaligned_fpu_load(regs, opcode, 1, 3, 1);
845 break;
846 case (0x9c>>2):
847 error = misaligned_fpu_load(regs, opcode, 1, 3, 0);
848 break;
849 case (0x1c>>2):
850 switch (minor) {
851 case 0x8:
852 error = misaligned_fpu_load(regs, opcode, 0, 2, 0);
853 break;
854 case 0xd:
855 error = misaligned_fpu_load(regs, opcode, 0, 3, 1);
856 break;
857 case 0x9:
858 error = misaligned_fpu_load(regs, opcode, 0, 3, 0);
859 break;
860 default:
861 error = -1;
862 break;
863 }
864 break;
865 case (0xb4>>2):
866 error = misaligned_fpu_store(regs, opcode, 1, 2, 0);
867 break;
868 case (0xb8>>2):
869 error = misaligned_fpu_store(regs, opcode, 1, 3, 1);
870 break;
871 case (0xbc>>2):
872 error = misaligned_fpu_store(regs, opcode, 1, 3, 0);
873 break;
874 case (0x3c>>2):
875 switch (minor) {
876 case 0x8:
877 error = misaligned_fpu_store(regs, opcode, 0, 2, 0);
878 break;
879 case 0xd:
880 error = misaligned_fpu_store(regs, opcode, 0, 3, 1);
881 break;
882 case 0x9:
883 error = misaligned_fpu_store(regs, opcode, 0, 3, 0);
884 break;
885 default:
886 error = -1;
887 break;
888 }
889 break;
890#endif
891
892 default:
893
894 error = -1;
895 break;
896 }
897
898 if (error < 0) {
899 return error;
900 } else {
901 regs->pc += 4;
902 return 0;
903 }
904
905}
906
907static ctl_table unaligned_table[] = {
908 {
909 .ctl_name = CTL_UNNUMBERED,
910 .procname = "kernel_reports",
911 .data = &kernel_mode_unaligned_fixup_count,
912 .maxlen = sizeof(int),
913 .mode = 0644,
914 .proc_handler = &proc_dointvec
915 },
916#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
917 {
918 .ctl_name = CTL_UNNUMBERED,
919 .procname = "user_reports",
920 .data = &user_mode_unaligned_fixup_count,
921 .maxlen = sizeof(int),
922 .mode = 0644,
923 .proc_handler = &proc_dointvec
924 },
925 {
926 .ctl_name = CTL_UNNUMBERED,
927 .procname = "user_enable",
928 .data = &user_mode_unaligned_fixup_enable,
929 .maxlen = sizeof(int),
930 .mode = 0644,
931 .proc_handler = &proc_dointvec},
932#endif
933 {}
934};
935
936static ctl_table unaligned_root[] = {
937 {
938 .ctl_name = CTL_UNNUMBERED,
939 .procname = "unaligned_fixup",
940 .mode = 0555,
941 unaligned_table
942 },
943 {}
944};
945
946static ctl_table sh64_root[] = {
947 {
948 .ctl_name = CTL_UNNUMBERED,
949 .procname = "sh64",
950 .mode = 0555,
951 .child = unaligned_root
952 },
953 {}
954};
955static struct ctl_table_header *sysctl_header;
956static int __init init_sysctl(void)
957{
958 sysctl_header = register_sysctl_table(sh64_root);
959 return 0;
960}
961
962__initcall(init_sysctl);
963
964
965asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs)
966{
967 u64 peek_real_address_q(u64 addr);
968 u64 poke_real_address_q(u64 addr, u64 val);
969 unsigned long long DM_EXP_CAUSE_PHY = 0x0c100010;
970 unsigned long long exp_cause;
971
972
973
974 exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY);
975 if (exp_cause & ~4) {
976 printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n",
977 (unsigned long)(exp_cause & 0xffffffff));
978 }
979 show_state();
980
981 poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0);
982}
983