1
2
3
4
5
6
7
8
9
10
11
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/context_tracking.h>
16#include <linux/interrupt.h>
17#include <linux/kallsyms.h>
18#include <linux/spinlock.h>
19#include <linux/kprobes.h>
20#include <linux/uaccess.h>
21#include <linux/kdebug.h>
22#include <linux/kgdb.h>
23#include <linux/kernel.h>
24#include <linux/export.h>
25#include <linux/ptrace.h>
26#include <linux/uprobes.h>
27#include <linux/string.h>
28#include <linux/delay.h>
29#include <linux/errno.h>
30#include <linux/kexec.h>
31#include <linux/sched.h>
32#include <linux/sched/task_stack.h>
33#include <linux/timer.h>
34#include <linux/init.h>
35#include <linux/bug.h>
36#include <linux/nmi.h>
37#include <linux/mm.h>
38#include <linux/smp.h>
39#include <linux/io.h>
40#include <linux/hardirq.h>
41#include <linux/atomic.h>
42
43#include <asm/stacktrace.h>
44#include <asm/processor.h>
45#include <asm/debugreg.h>
46#include <asm/text-patching.h>
47#include <asm/ftrace.h>
48#include <asm/traps.h>
49#include <asm/desc.h>
50#include <asm/fpu/internal.h>
51#include <asm/cpu.h>
52#include <asm/cpu_entry_area.h>
53#include <asm/mce.h>
54#include <asm/fixmap.h>
55#include <asm/mach_traps.h>
56#include <asm/alternative.h>
57#include <asm/fpu/xstate.h>
58#include <asm/vm86.h>
59#include <asm/umip.h>
60#include <asm/insn.h>
61#include <asm/insn-eval.h>
62
63#ifdef CONFIG_X86_64
64#include <asm/x86_init.h>
65#include <asm/proto.h>
66#else
67#include <asm/processor-flags.h>
68#include <asm/setup.h>
69#include <asm/proto.h>
70#endif
71
72DECLARE_BITMAP(system_vectors, NR_VECTORS);
73
74static inline void cond_local_irq_enable(struct pt_regs *regs)
75{
76 if (regs->flags & X86_EFLAGS_IF)
77 local_irq_enable();
78}
79
80static inline void cond_local_irq_disable(struct pt_regs *regs)
81{
82 if (regs->flags & X86_EFLAGS_IF)
83 local_irq_disable();
84}
85
86__always_inline int is_valid_bugaddr(unsigned long addr)
87{
88 if (addr < TASK_SIZE_MAX)
89 return 0;
90
91
92
93
94
95 return *(unsigned short *)addr == INSN_UD2;
96}
97
98static nokprobe_inline int
99do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
100 struct pt_regs *regs, long error_code)
101{
102 if (v8086_mode(regs)) {
103
104
105
106
107 if (trapnr < X86_TRAP_UD) {
108 if (!handle_vm86_trap((struct kernel_vm86_regs *) regs,
109 error_code, trapnr))
110 return 0;
111 }
112 } else if (!user_mode(regs)) {
113 if (fixup_exception(regs, trapnr, error_code, 0))
114 return 0;
115
116 tsk->thread.error_code = error_code;
117 tsk->thread.trap_nr = trapnr;
118 die(str, regs, error_code);
119 }
120
121
122
123
124
125
126
127
128
129
130 tsk->thread.error_code = error_code;
131 tsk->thread.trap_nr = trapnr;
132
133 return -1;
134}
135
136static void show_signal(struct task_struct *tsk, int signr,
137 const char *type, const char *desc,
138 struct pt_regs *regs, long error_code)
139{
140 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
141 printk_ratelimit()) {
142 pr_info("%s[%d] %s%s ip:%lx sp:%lx error:%lx",
143 tsk->comm, task_pid_nr(tsk), type, desc,
144 regs->ip, regs->sp, error_code);
145 print_vma_addr(KERN_CONT " in ", regs->ip);
146 pr_cont("\n");
147 }
148}
149
150static void
151do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
152 long error_code, int sicode, void __user *addr)
153{
154 struct task_struct *tsk = current;
155
156 if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code))
157 return;
158
159 show_signal(tsk, signr, "trap ", str, regs, error_code);
160
161 if (!sicode)
162 force_sig(signr);
163 else
164 force_sig_fault(signr, sicode, addr);
165}
166NOKPROBE_SYMBOL(do_trap);
167
168static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
169 unsigned long trapnr, int signr, int sicode, void __user *addr)
170{
171 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
172
173 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
174 NOTIFY_STOP) {
175 cond_local_irq_enable(regs);
176 do_trap(trapnr, signr, str, regs, error_code, sicode, addr);
177 cond_local_irq_disable(regs);
178 }
179}
180
181
182
183
184
185
186
187
188
189
190
191static __always_inline void __user *error_get_trap_addr(struct pt_regs *regs)
192{
193 return (void __user *)uprobe_get_trap_addr(regs);
194}
195
196DEFINE_IDTENTRY(exc_divide_error)
197{
198 do_error_trap(regs, 0, "divide_error", X86_TRAP_DE, SIGFPE,
199 FPE_INTDIV, error_get_trap_addr(regs));
200}
201
202DEFINE_IDTENTRY(exc_overflow)
203{
204 do_error_trap(regs, 0, "overflow", X86_TRAP_OF, SIGSEGV, 0, NULL);
205}
206
207#ifdef CONFIG_X86_F00F_BUG
208void handle_invalid_op(struct pt_regs *regs)
209#else
210static inline void handle_invalid_op(struct pt_regs *regs)
211#endif
212{
213 do_error_trap(regs, 0, "invalid opcode", X86_TRAP_UD, SIGILL,
214 ILL_ILLOPN, error_get_trap_addr(regs));
215}
216
217static noinstr bool handle_bug(struct pt_regs *regs)
218{
219 bool handled = false;
220
221 if (!is_valid_bugaddr(regs->ip))
222 return handled;
223
224
225
226
227 instrumentation_begin();
228
229
230
231
232 if (regs->flags & X86_EFLAGS_IF)
233 raw_local_irq_enable();
234 if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN) {
235 regs->ip += LEN_UD2;
236 handled = true;
237 }
238 if (regs->flags & X86_EFLAGS_IF)
239 raw_local_irq_disable();
240 instrumentation_end();
241
242 return handled;
243}
244
245DEFINE_IDTENTRY_RAW(exc_invalid_op)
246{
247 irqentry_state_t state;
248
249
250
251
252
253
254 if (!user_mode(regs) && handle_bug(regs))
255 return;
256
257 state = irqentry_enter(regs);
258 instrumentation_begin();
259 handle_invalid_op(regs);
260 instrumentation_end();
261 irqentry_exit(regs, state);
262}
263
264DEFINE_IDTENTRY(exc_coproc_segment_overrun)
265{
266 do_error_trap(regs, 0, "coprocessor segment overrun",
267 X86_TRAP_OLD_MF, SIGFPE, 0, NULL);
268}
269
270DEFINE_IDTENTRY_ERRORCODE(exc_invalid_tss)
271{
272 do_error_trap(regs, error_code, "invalid TSS", X86_TRAP_TS, SIGSEGV,
273 0, NULL);
274}
275
276DEFINE_IDTENTRY_ERRORCODE(exc_segment_not_present)
277{
278 do_error_trap(regs, error_code, "segment not present", X86_TRAP_NP,
279 SIGBUS, 0, NULL);
280}
281
282DEFINE_IDTENTRY_ERRORCODE(exc_stack_segment)
283{
284 do_error_trap(regs, error_code, "stack segment", X86_TRAP_SS, SIGBUS,
285 0, NULL);
286}
287
288DEFINE_IDTENTRY_ERRORCODE(exc_alignment_check)
289{
290 char *str = "alignment check";
291
292 if (notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_AC, SIGBUS) == NOTIFY_STOP)
293 return;
294
295 if (!user_mode(regs))
296 die("Split lock detected\n", regs, error_code);
297
298 local_irq_enable();
299
300 if (handle_user_split_lock(regs, error_code))
301 return;
302
303 do_trap(X86_TRAP_AC, SIGBUS, "alignment check", regs,
304 error_code, BUS_ADRALN, NULL);
305
306 local_irq_disable();
307}
308
309#ifdef CONFIG_VMAP_STACK
310__visible void __noreturn handle_stack_overflow(const char *message,
311 struct pt_regs *regs,
312 unsigned long fault_address)
313{
314 printk(KERN_EMERG "BUG: stack guard page was hit at %p (stack is %p..%p)\n",
315 (void *)fault_address, current->stack,
316 (char *)current->stack + THREAD_SIZE - 1);
317 die(message, regs, 0);
318
319
320 panic("%s", message);
321}
322#endif
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343DEFINE_IDTENTRY_DF(exc_double_fault)
344{
345 static const char str[] = "double fault";
346 struct task_struct *tsk = current;
347
348#ifdef CONFIG_VMAP_STACK
349 unsigned long address = read_cr2();
350#endif
351
352#ifdef CONFIG_X86_ESPFIX64
353 extern unsigned char native_irq_return_iret[];
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369 if (((long)regs->sp >> P4D_SHIFT) == ESPFIX_PGD_ENTRY &&
370 regs->cs == __KERNEL_CS &&
371 regs->ip == (unsigned long)native_irq_return_iret)
372 {
373 struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
374 unsigned long *p = (unsigned long *)regs->sp;
375
376
377
378
379
380
381
382 gpregs->ip = p[0];
383 gpregs->cs = p[1];
384 gpregs->flags = p[2];
385 gpregs->sp = p[3];
386 gpregs->ss = p[4];
387 gpregs->orig_ax = 0;
388
389
390
391
392
393
394
395
396
397
398
399
400 regs->ip = (unsigned long)asm_exc_general_protection;
401 regs->sp = (unsigned long)&gpregs->orig_ax;
402
403 return;
404 }
405#endif
406
407 idtentry_enter_nmi(regs);
408 instrumentation_begin();
409 notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
410
411 tsk->thread.error_code = error_code;
412 tsk->thread.trap_nr = X86_TRAP_DF;
413
414#ifdef CONFIG_VMAP_STACK
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452 if ((unsigned long)task_stack_page(tsk) - 1 - address < PAGE_SIZE) {
453 handle_stack_overflow("kernel stack overflow (double-fault)",
454 regs, address);
455 }
456#endif
457
458 pr_emerg("PANIC: double fault, error_code: 0x%lx\n", error_code);
459 die("double fault", regs, error_code);
460 panic("Machine halted.");
461 instrumentation_end();
462}
463
464DEFINE_IDTENTRY(exc_bounds)
465{
466 if (notify_die(DIE_TRAP, "bounds", regs, 0,
467 X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP)
468 return;
469 cond_local_irq_enable(regs);
470
471 if (!user_mode(regs))
472 die("bounds", regs, 0);
473
474 do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, 0, 0, NULL);
475
476 cond_local_irq_disable(regs);
477}
478
479enum kernel_gp_hint {
480 GP_NO_HINT,
481 GP_NON_CANONICAL,
482 GP_CANONICAL
483};
484
485
486
487
488
489
490static enum kernel_gp_hint get_kernel_gp_address(struct pt_regs *regs,
491 unsigned long *addr)
492{
493 u8 insn_buf[MAX_INSN_SIZE];
494 struct insn insn;
495
496 if (copy_from_kernel_nofault(insn_buf, (void *)regs->ip,
497 MAX_INSN_SIZE))
498 return GP_NO_HINT;
499
500 kernel_insn_init(&insn, insn_buf, MAX_INSN_SIZE);
501 insn_get_modrm(&insn);
502 insn_get_sib(&insn);
503
504 *addr = (unsigned long)insn_get_addr_ref(&insn, regs);
505 if (*addr == -1UL)
506 return GP_NO_HINT;
507
508#ifdef CONFIG_X86_64
509
510
511
512
513
514 if (*addr < ~__VIRTUAL_MASK &&
515 *addr + insn.opnd_bytes - 1 > __VIRTUAL_MASK)
516 return GP_NON_CANONICAL;
517#endif
518
519 return GP_CANONICAL;
520}
521
522#define GPFSTR "general protection fault"
523
524DEFINE_IDTENTRY_ERRORCODE(exc_general_protection)
525{
526 char desc[sizeof(GPFSTR) + 50 + 2*sizeof(unsigned long) + 1] = GPFSTR;
527 enum kernel_gp_hint hint = GP_NO_HINT;
528 struct task_struct *tsk;
529 unsigned long gp_addr;
530 int ret;
531
532 cond_local_irq_enable(regs);
533
534 if (static_cpu_has(X86_FEATURE_UMIP)) {
535 if (user_mode(regs) && fixup_umip_exception(regs))
536 goto exit;
537 }
538
539 if (v8086_mode(regs)) {
540 local_irq_enable();
541 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
542 local_irq_disable();
543 return;
544 }
545
546 tsk = current;
547
548 if (user_mode(regs)) {
549 tsk->thread.error_code = error_code;
550 tsk->thread.trap_nr = X86_TRAP_GP;
551
552 show_signal(tsk, SIGSEGV, "", desc, regs, error_code);
553 force_sig(SIGSEGV);
554 goto exit;
555 }
556
557 if (fixup_exception(regs, X86_TRAP_GP, error_code, 0))
558 goto exit;
559
560 tsk->thread.error_code = error_code;
561 tsk->thread.trap_nr = X86_TRAP_GP;
562
563
564
565
566
567 if (!preemptible() &&
568 kprobe_running() &&
569 kprobe_fault_handler(regs, X86_TRAP_GP))
570 goto exit;
571
572 ret = notify_die(DIE_GPF, desc, regs, error_code, X86_TRAP_GP, SIGSEGV);
573 if (ret == NOTIFY_STOP)
574 goto exit;
575
576 if (error_code)
577 snprintf(desc, sizeof(desc), "segment-related " GPFSTR);
578 else
579 hint = get_kernel_gp_address(regs, &gp_addr);
580
581 if (hint != GP_NO_HINT)
582 snprintf(desc, sizeof(desc), GPFSTR ", %s 0x%lx",
583 (hint == GP_NON_CANONICAL) ? "probably for non-canonical address"
584 : "maybe for address",
585 gp_addr);
586
587
588
589
590
591 if (hint != GP_NON_CANONICAL)
592 gp_addr = 0;
593
594 die_addr(desc, regs, error_code, gp_addr);
595
596exit:
597 cond_local_irq_disable(regs);
598}
599
600static bool do_int3(struct pt_regs *regs)
601{
602 int res;
603
604#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
605 if (kgdb_ll_trap(DIE_INT3, "int3", regs, 0, X86_TRAP_BP,
606 SIGTRAP) == NOTIFY_STOP)
607 return true;
608#endif
609
610#ifdef CONFIG_KPROBES
611 if (kprobe_int3_handler(regs))
612 return true;
613#endif
614 res = notify_die(DIE_INT3, "int3", regs, 0, X86_TRAP_BP, SIGTRAP);
615
616 return res == NOTIFY_STOP;
617}
618
619static void do_int3_user(struct pt_regs *regs)
620{
621 if (do_int3(regs))
622 return;
623
624 cond_local_irq_enable(regs);
625 do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, 0, 0, NULL);
626 cond_local_irq_disable(regs);
627}
628
629DEFINE_IDTENTRY_RAW(exc_int3)
630{
631
632
633
634
635
636 if (poke_int3_handler(regs))
637 return;
638
639
640
641
642
643
644
645
646 if (user_mode(regs)) {
647 irqentry_enter_from_user_mode(regs);
648 instrumentation_begin();
649 do_int3_user(regs);
650 instrumentation_end();
651 irqentry_exit_to_user_mode(regs);
652 } else {
653 bool irq_state = idtentry_enter_nmi(regs);
654 instrumentation_begin();
655 if (!do_int3(regs))
656 die("int3", regs, 0);
657 instrumentation_end();
658 idtentry_exit_nmi(regs, irq_state);
659 }
660}
661
662#ifdef CONFIG_X86_64
663
664
665
666
667
668asmlinkage __visible noinstr struct pt_regs *sync_regs(struct pt_regs *eregs)
669{
670 struct pt_regs *regs = (struct pt_regs *)this_cpu_read(cpu_current_top_of_stack) - 1;
671 if (regs != eregs)
672 *regs = *eregs;
673 return regs;
674}
675
676struct bad_iret_stack {
677 void *error_entry_ret;
678 struct pt_regs regs;
679};
680
681asmlinkage __visible noinstr
682struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
683{
684
685
686
687
688
689
690
691
692 struct bad_iret_stack tmp, *new_stack =
693 (struct bad_iret_stack *)__this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
694
695
696 __memcpy(&tmp.regs.ip, (void *)s->regs.sp, 5*8);
697
698
699 __memcpy(&tmp, s, offsetof(struct bad_iret_stack, regs.ip));
700
701
702 __memcpy(new_stack, &tmp, sizeof(tmp));
703
704 BUG_ON(!user_mode(&new_stack->regs));
705 return new_stack;
706}
707#endif
708
709static bool is_sysenter_singlestep(struct pt_regs *regs)
710{
711
712
713
714
715
716
717
718
719#ifdef CONFIG_X86_32
720 return (regs->ip - (unsigned long)__begin_SYSENTER_singlestep_region) <
721 (unsigned long)__end_SYSENTER_singlestep_region -
722 (unsigned long)__begin_SYSENTER_singlestep_region;
723#elif defined(CONFIG_IA32_EMULATION)
724 return (regs->ip - (unsigned long)entry_SYSENTER_compat) <
725 (unsigned long)__end_entry_SYSENTER_compat -
726 (unsigned long)entry_SYSENTER_compat;
727#else
728 return false;
729#endif
730}
731
732static __always_inline unsigned long debug_read_clear_dr6(void)
733{
734 unsigned long dr6;
735
736
737
738
739
740
741
742
743
744
745
746
747 get_debugreg(dr6, 6);
748 set_debugreg(0, 6);
749
750 dr6 &= ~DR6_RESERVED;
751
752 return dr6;
753}
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779static void handle_debug(struct pt_regs *regs, unsigned long dr6, bool user)
780{
781 struct task_struct *tsk = current;
782 bool user_icebp;
783 int si_code;
784
785
786
787
788
789
790 clear_thread_flag(TIF_BLOCKSTEP);
791
792
793
794
795
796 if (!user && !dr6)
797 return;
798
799
800
801
802
803
804 user_icebp = user && !dr6;
805
806
807 tsk->thread.debugreg6 = dr6;
808
809#ifdef CONFIG_KPROBES
810 if (kprobe_debug_handler(regs)) {
811 return;
812 }
813#endif
814
815 if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, 0,
816 SIGTRAP) == NOTIFY_STOP) {
817 return;
818 }
819
820
821 cond_local_irq_enable(regs);
822
823 if (v8086_mode(regs)) {
824 handle_vm86_trap((struct kernel_vm86_regs *) regs, 0,
825 X86_TRAP_DB);
826 goto out;
827 }
828
829 if (WARN_ON_ONCE((dr6 & DR_STEP) && !user_mode(regs))) {
830
831
832
833
834
835
836 tsk->thread.debugreg6 &= ~DR_STEP;
837 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
838 regs->flags &= ~X86_EFLAGS_TF;
839 }
840
841 si_code = get_si_code(tsk->thread.debugreg6);
842 if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
843 send_sigtrap(regs, 0, si_code);
844
845out:
846 cond_local_irq_disable(regs);
847}
848
849static __always_inline void exc_debug_kernel(struct pt_regs *regs,
850 unsigned long dr6)
851{
852
853
854
855
856
857
858
859
860
861
862
863 unsigned long dr7 = local_db_save();
864 bool irq_state = idtentry_enter_nmi(regs);
865 instrumentation_begin();
866
867
868
869
870
871 WARN_ON_ONCE(user_mode(regs));
872
873
874
875
876
877 if ((dr6 & DR_STEP) && is_sysenter_singlestep(regs))
878 dr6 &= ~DR_STEP;
879
880 handle_debug(regs, dr6, false);
881
882 instrumentation_end();
883 idtentry_exit_nmi(regs, irq_state);
884
885 local_db_restore(dr7);
886}
887
888static __always_inline void exc_debug_user(struct pt_regs *regs,
889 unsigned long dr6)
890{
891
892
893
894
895 WARN_ON_ONCE(!user_mode(regs));
896
897
898
899
900
901
902
903
904
905
906 irqentry_enter_from_user_mode(regs);
907 instrumentation_begin();
908
909 handle_debug(regs, dr6, true);
910
911 instrumentation_end();
912 irqentry_exit_to_user_mode(regs);
913}
914
915#ifdef CONFIG_X86_64
916
917DEFINE_IDTENTRY_DEBUG(exc_debug)
918{
919 exc_debug_kernel(regs, debug_read_clear_dr6());
920}
921
922
923DEFINE_IDTENTRY_DEBUG_USER(exc_debug)
924{
925 exc_debug_user(regs, debug_read_clear_dr6());
926}
927#else
928
929DEFINE_IDTENTRY_RAW(exc_debug)
930{
931 unsigned long dr6 = debug_read_clear_dr6();
932
933 if (user_mode(regs))
934 exc_debug_user(regs, dr6);
935 else
936 exc_debug_kernel(regs, dr6);
937}
938#endif
939
940
941
942
943
944
945static void math_error(struct pt_regs *regs, int trapnr)
946{
947 struct task_struct *task = current;
948 struct fpu *fpu = &task->thread.fpu;
949 int si_code;
950 char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
951 "simd exception";
952
953 cond_local_irq_enable(regs);
954
955 if (!user_mode(regs)) {
956 if (fixup_exception(regs, trapnr, 0, 0))
957 goto exit;
958
959 task->thread.error_code = 0;
960 task->thread.trap_nr = trapnr;
961
962 if (notify_die(DIE_TRAP, str, regs, 0, trapnr,
963 SIGFPE) != NOTIFY_STOP)
964 die(str, regs, 0);
965 goto exit;
966 }
967
968
969
970
971 fpu__save(fpu);
972
973 task->thread.trap_nr = trapnr;
974 task->thread.error_code = 0;
975
976 si_code = fpu__exception_code(fpu, trapnr);
977
978 if (!si_code)
979 goto exit;
980
981 force_sig_fault(SIGFPE, si_code,
982 (void __user *)uprobe_get_trap_addr(regs));
983exit:
984 cond_local_irq_disable(regs);
985}
986
987DEFINE_IDTENTRY(exc_coprocessor_error)
988{
989 math_error(regs, X86_TRAP_MF);
990}
991
992DEFINE_IDTENTRY(exc_simd_coprocessor_error)
993{
994 if (IS_ENABLED(CONFIG_X86_INVD_BUG)) {
995
996 if (!static_cpu_has(X86_FEATURE_XMM)) {
997 __exc_general_protection(regs, 0);
998 return;
999 }
1000 }
1001 math_error(regs, X86_TRAP_XF);
1002}
1003
1004DEFINE_IDTENTRY(exc_spurious_interrupt_bug)
1005{
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025}
1026
1027DEFINE_IDTENTRY(exc_device_not_available)
1028{
1029 unsigned long cr0 = read_cr0();
1030
1031#ifdef CONFIG_MATH_EMULATION
1032 if (!boot_cpu_has(X86_FEATURE_FPU) && (cr0 & X86_CR0_EM)) {
1033 struct math_emu_info info = { };
1034
1035 cond_local_irq_enable(regs);
1036
1037 info.regs = regs;
1038 math_emulate(&info);
1039
1040 cond_local_irq_disable(regs);
1041 return;
1042 }
1043#endif
1044
1045
1046 if (WARN(cr0 & X86_CR0_TS, "CR0.TS was set")) {
1047
1048 write_cr0(cr0 & ~X86_CR0_TS);
1049 } else {
1050
1051
1052
1053
1054
1055 die("unexpected #NM exception", regs, 0);
1056 }
1057}
1058
1059#ifdef CONFIG_X86_32
1060DEFINE_IDTENTRY_SW(iret_error)
1061{
1062 local_irq_enable();
1063 if (notify_die(DIE_TRAP, "iret exception", regs, 0,
1064 X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) {
1065 do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, 0,
1066 ILL_BADSTK, (void __user *)NULL);
1067 }
1068 local_irq_disable();
1069}
1070#endif
1071
1072void __init trap_init(void)
1073{
1074
1075 setup_cpu_entry_areas();
1076
1077 idt_setup_traps();
1078
1079
1080
1081
1082 cpu_init();
1083
1084 idt_setup_ist_traps();
1085}
1086