1
2
3
4
5
6
7
8
9
10
11
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/context_tracking.h>
16#include <linux/interrupt.h>
17#include <linux/kallsyms.h>
18#include <linux/spinlock.h>
19#include <linux/kprobes.h>
20#include <linux/uaccess.h>
21#include <linux/kdebug.h>
22#include <linux/kgdb.h>
23#include <linux/kernel.h>
24#include <linux/export.h>
25#include <linux/ptrace.h>
26#include <linux/uprobes.h>
27#include <linux/string.h>
28#include <linux/delay.h>
29#include <linux/errno.h>
30#include <linux/kexec.h>
31#include <linux/sched.h>
32#include <linux/sched/task_stack.h>
33#include <linux/timer.h>
34#include <linux/init.h>
35#include <linux/bug.h>
36#include <linux/nmi.h>
37#include <linux/mm.h>
38#include <linux/smp.h>
39#include <linux/io.h>
40#include <linux/hardirq.h>
41#include <linux/atomic.h>
42
43#include <asm/stacktrace.h>
44#include <asm/processor.h>
45#include <asm/debugreg.h>
46#include <asm/realmode.h>
47#include <asm/text-patching.h>
48#include <asm/ftrace.h>
49#include <asm/traps.h>
50#include <asm/desc.h>
51#include <asm/fpu/internal.h>
52#include <asm/cpu.h>
53#include <asm/cpu_entry_area.h>
54#include <asm/mce.h>
55#include <asm/fixmap.h>
56#include <asm/mach_traps.h>
57#include <asm/alternative.h>
58#include <asm/fpu/xstate.h>
59#include <asm/vm86.h>
60#include <asm/umip.h>
61#include <asm/insn.h>
62#include <asm/insn-eval.h>
63
64#ifdef CONFIG_X86_64
65#include <asm/x86_init.h>
66#include <asm/proto.h>
67#else
68#include <asm/processor-flags.h>
69#include <asm/setup.h>
70#include <asm/proto.h>
71#endif
72
73DECLARE_BITMAP(system_vectors, NR_VECTORS);
74
75static inline void cond_local_irq_enable(struct pt_regs *regs)
76{
77 if (regs->flags & X86_EFLAGS_IF)
78 local_irq_enable();
79}
80
81static inline void cond_local_irq_disable(struct pt_regs *regs)
82{
83 if (regs->flags & X86_EFLAGS_IF)
84 local_irq_disable();
85}
86
87__always_inline int is_valid_bugaddr(unsigned long addr)
88{
89 if (addr < TASK_SIZE_MAX)
90 return 0;
91
92
93
94
95
96 return *(unsigned short *)addr == INSN_UD2;
97}
98
99static nokprobe_inline int
100do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
101 struct pt_regs *regs, long error_code)
102{
103 if (v8086_mode(regs)) {
104
105
106
107
108 if (trapnr < X86_TRAP_UD) {
109 if (!handle_vm86_trap((struct kernel_vm86_regs *) regs,
110 error_code, trapnr))
111 return 0;
112 }
113 } else if (!user_mode(regs)) {
114 if (fixup_exception(regs, trapnr, error_code, 0))
115 return 0;
116
117 tsk->thread.error_code = error_code;
118 tsk->thread.trap_nr = trapnr;
119 die(str, regs, error_code);
120 }
121
122
123
124
125
126
127
128
129
130
131 tsk->thread.error_code = error_code;
132 tsk->thread.trap_nr = trapnr;
133
134 return -1;
135}
136
137static void show_signal(struct task_struct *tsk, int signr,
138 const char *type, const char *desc,
139 struct pt_regs *regs, long error_code)
140{
141 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
142 printk_ratelimit()) {
143 pr_info("%s[%d] %s%s ip:%lx sp:%lx error:%lx",
144 tsk->comm, task_pid_nr(tsk), type, desc,
145 regs->ip, regs->sp, error_code);
146 print_vma_addr(KERN_CONT " in ", regs->ip);
147 pr_cont("\n");
148 }
149}
150
151static void
152do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
153 long error_code, int sicode, void __user *addr)
154{
155 struct task_struct *tsk = current;
156
157 if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code))
158 return;
159
160 show_signal(tsk, signr, "trap ", str, regs, error_code);
161
162 if (!sicode)
163 force_sig(signr);
164 else
165 force_sig_fault(signr, sicode, addr);
166}
167NOKPROBE_SYMBOL(do_trap);
168
169static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
170 unsigned long trapnr, int signr, int sicode, void __user *addr)
171{
172 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
173
174 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
175 NOTIFY_STOP) {
176 cond_local_irq_enable(regs);
177 do_trap(trapnr, signr, str, regs, error_code, sicode, addr);
178 cond_local_irq_disable(regs);
179 }
180}
181
182
183
184
185
186
187
188
189
190
191
192static __always_inline void __user *error_get_trap_addr(struct pt_regs *regs)
193{
194 return (void __user *)uprobe_get_trap_addr(regs);
195}
196
197DEFINE_IDTENTRY(exc_divide_error)
198{
199 do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE,
200 FPE_INTDIV, error_get_trap_addr(regs));
201}
202
203DEFINE_IDTENTRY(exc_overflow)
204{
205 do_error_trap(regs, 0, "overflow", X86_TRAP_OF, SIGSEGV, 0, NULL);
206}
207
208#ifdef CONFIG_X86_F00F_BUG
209void handle_invalid_op(struct pt_regs *regs)
210#else
211static inline void handle_invalid_op(struct pt_regs *regs)
212#endif
213{
214 do_error_trap(regs, 0, "invalid opcode", X86_TRAP_UD, SIGILL,
215 ILL_ILLOPN, error_get_trap_addr(regs));
216}
217
218static noinstr bool handle_bug(struct pt_regs *regs)
219{
220 bool handled = false;
221
222 if (!is_valid_bugaddr(regs->ip))
223 return handled;
224
225
226
227
228 instrumentation_begin();
229
230
231
232
233 if (regs->flags & X86_EFLAGS_IF)
234 raw_local_irq_enable();
235 if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN) {
236 regs->ip += LEN_UD2;
237 handled = true;
238 }
239 if (regs->flags & X86_EFLAGS_IF)
240 raw_local_irq_disable();
241 instrumentation_end();
242
243 return handled;
244}
245
246DEFINE_IDTENTRY_RAW(exc_invalid_op)
247{
248 irqentry_state_t state;
249
250
251
252
253
254
255 if (!user_mode(regs) && handle_bug(regs))
256 return;
257
258 state = irqentry_enter(regs);
259 instrumentation_begin();
260 handle_invalid_op(regs);
261 instrumentation_end();
262 irqentry_exit(regs, state);
263}
264
265DEFINE_IDTENTRY(exc_coproc_segment_overrun)
266{
267 do_error_trap(regs, 0, "coprocessor segment overrun",
268 X86_TRAP_OLD_MF, SIGFPE, 0, NULL);
269}
270
271DEFINE_IDTENTRY_ERRORCODE(exc_invalid_tss)
272{
273 do_error_trap(regs, error_code, "invalid TSS", X86_TRAP_TS, SIGSEGV,
274 0, NULL);
275}
276
277DEFINE_IDTENTRY_ERRORCODE(exc_segment_not_present)
278{
279 do_error_trap(regs, error_code, "segment not present", X86_TRAP_NP,
280 SIGBUS, 0, NULL);
281}
282
283DEFINE_IDTENTRY_ERRORCODE(exc_stack_segment)
284{
285 do_error_trap(regs, error_code, "stack segment", X86_TRAP_SS, SIGBUS,
286 0, NULL);
287}
288
289DEFINE_IDTENTRY_ERRORCODE(exc_alignment_check)
290{
291 char *str = "alignment check";
292
293 if (notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_AC, SIGBUS) == NOTIFY_STOP)
294 return;
295
296 if (!user_mode(regs))
297 die("Split lock detected\n", regs, error_code);
298
299 local_irq_enable();
300
301 if (handle_user_split_lock(regs, error_code))
302 return;
303
304 do_trap(X86_TRAP_AC, SIGBUS, "alignment check", regs,
305 error_code, BUS_ADRALN, NULL);
306
307 local_irq_disable();
308}
309
310#ifdef CONFIG_VMAP_STACK
311__visible void __noreturn handle_stack_overflow(const char *message,
312 struct pt_regs *regs,
313 unsigned long fault_address)
314{
315 printk(KERN_EMERG "BUG: stack guard page was hit at %p (stack is %p..%p)\n",
316 (void *)fault_address, current->stack,
317 (char *)current->stack + THREAD_SIZE - 1);
318 die(message, regs, 0);
319
320
321 panic("%s", message);
322}
323#endif
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344DEFINE_IDTENTRY_DF(exc_double_fault)
345{
346 static const char str[] = "double fault";
347 struct task_struct *tsk = current;
348
349#ifdef CONFIG_VMAP_STACK
350 unsigned long address = read_cr2();
351#endif
352
353#ifdef CONFIG_X86_ESPFIX64
354 extern unsigned char native_irq_return_iret[];
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370 if (((long)regs->sp >> P4D_SHIFT) == ESPFIX_PGD_ENTRY &&
371 regs->cs == __KERNEL_CS &&
372 regs->ip == (unsigned long)native_irq_return_iret)
373 {
374 struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
375 unsigned long *p = (unsigned long *)regs->sp;
376
377
378
379
380
381
382
383 gpregs->ip = p[0];
384 gpregs->cs = p[1];
385 gpregs->flags = p[2];
386 gpregs->sp = p[3];
387 gpregs->ss = p[4];
388 gpregs->orig_ax = 0;
389
390
391
392
393
394
395
396
397
398
399
400
401 regs->ip = (unsigned long)asm_exc_general_protection;
402 regs->sp = (unsigned long)&gpregs->orig_ax;
403
404 return;
405 }
406#endif
407
408 idtentry_enter_nmi(regs);
409 instrumentation_begin();
410 notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
411
412 tsk->thread.error_code = error_code;
413 tsk->thread.trap_nr = X86_TRAP_DF;
414
415#ifdef CONFIG_VMAP_STACK
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453 if ((unsigned long)task_stack_page(tsk) - 1 - address < PAGE_SIZE) {
454 handle_stack_overflow("kernel stack overflow (double-fault)",
455 regs, address);
456 }
457#endif
458
459 pr_emerg("PANIC: double fault, error_code: 0x%lx\n", error_code);
460 die("double fault", regs, error_code);
461 panic("Machine halted.");
462 instrumentation_end();
463}
464
465DEFINE_IDTENTRY(exc_bounds)
466{
467 if (notify_die(DIE_TRAP, "bounds", regs, 0,
468 X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP)
469 return;
470 cond_local_irq_enable(regs);
471
472 if (!user_mode(regs))
473 die("bounds", regs, 0);
474
475 do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, 0, 0, NULL);
476
477 cond_local_irq_disable(regs);
478}
479
480enum kernel_gp_hint {
481 GP_NO_HINT,
482 GP_NON_CANONICAL,
483 GP_CANONICAL
484};
485
486
487
488
489
490
491static enum kernel_gp_hint get_kernel_gp_address(struct pt_regs *regs,
492 unsigned long *addr)
493{
494 u8 insn_buf[MAX_INSN_SIZE];
495 struct insn insn;
496
497 if (copy_from_kernel_nofault(insn_buf, (void *)regs->ip,
498 MAX_INSN_SIZE))
499 return GP_NO_HINT;
500
501 kernel_insn_init(&insn, insn_buf, MAX_INSN_SIZE);
502 insn_get_modrm(&insn);
503 insn_get_sib(&insn);
504
505 *addr = (unsigned long)insn_get_addr_ref(&insn, regs);
506 if (*addr == -1UL)
507 return GP_NO_HINT;
508
509#ifdef CONFIG_X86_64
510
511
512
513
514
515 if (*addr < ~__VIRTUAL_MASK &&
516 *addr + insn.opnd_bytes - 1 > __VIRTUAL_MASK)
517 return GP_NON_CANONICAL;
518#endif
519
520 return GP_CANONICAL;
521}
522
523#define GPFSTR "general protection fault"
524
525DEFINE_IDTENTRY_ERRORCODE(exc_general_protection)
526{
527 char desc[sizeof(GPFSTR) + 50 + 2*sizeof(unsigned long) + 1] = GPFSTR;
528 enum kernel_gp_hint hint = GP_NO_HINT;
529 struct task_struct *tsk;
530 unsigned long gp_addr;
531 int ret;
532
533 cond_local_irq_enable(regs);
534
535 if (static_cpu_has(X86_FEATURE_UMIP)) {
536 if (user_mode(regs) && fixup_umip_exception(regs))
537 goto exit;
538 }
539
540 if (v8086_mode(regs)) {
541 local_irq_enable();
542 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
543 local_irq_disable();
544 return;
545 }
546
547 tsk = current;
548
549 if (user_mode(regs)) {
550 tsk->thread.error_code = error_code;
551 tsk->thread.trap_nr = X86_TRAP_GP;
552
553 show_signal(tsk, SIGSEGV, "", desc, regs, error_code);
554 force_sig(SIGSEGV);
555 goto exit;
556 }
557
558 if (fixup_exception(regs, X86_TRAP_GP, error_code, 0))
559 goto exit;
560
561 tsk->thread.error_code = error_code;
562 tsk->thread.trap_nr = X86_TRAP_GP;
563
564
565
566
567
568 if (!preemptible() &&
569 kprobe_running() &&
570 kprobe_fault_handler(regs, X86_TRAP_GP))
571 goto exit;
572
573 ret = notify_die(DIE_GPF, desc, regs, error_code, X86_TRAP_GP, SIGSEGV);
574 if (ret == NOTIFY_STOP)
575 goto exit;
576
577 if (error_code)
578 snprintf(desc, sizeof(desc), "segment-related " GPFSTR);
579 else
580 hint = get_kernel_gp_address(regs, &gp_addr);
581
582 if (hint != GP_NO_HINT)
583 snprintf(desc, sizeof(desc), GPFSTR ", %s 0x%lx",
584 (hint == GP_NON_CANONICAL) ? "probably for non-canonical address"
585 : "maybe for address",
586 gp_addr);
587
588
589
590
591
592 if (hint != GP_NON_CANONICAL)
593 gp_addr = 0;
594
595 die_addr(desc, regs, error_code, gp_addr);
596
597exit:
598 cond_local_irq_disable(regs);
599}
600
601static bool do_int3(struct pt_regs *regs)
602{
603 int res;
604
605#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
606 if (kgdb_ll_trap(DIE_INT3, "int3", regs, 0, X86_TRAP_BP,
607 SIGTRAP) == NOTIFY_STOP)
608 return true;
609#endif
610
611#ifdef CONFIG_KPROBES
612 if (kprobe_int3_handler(regs))
613 return true;
614#endif
615 res = notify_die(DIE_INT3, "int3", regs, 0, X86_TRAP_BP, SIGTRAP);
616
617 return res == NOTIFY_STOP;
618}
619
620static void do_int3_user(struct pt_regs *regs)
621{
622 if (do_int3(regs))
623 return;
624
625 cond_local_irq_enable(regs);
626 do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, 0, 0, NULL);
627 cond_local_irq_disable(regs);
628}
629
630DEFINE_IDTENTRY_RAW(exc_int3)
631{
632
633
634
635
636
637 if (poke_int3_handler(regs))
638 return;
639
640
641
642
643
644
645
646
647 if (user_mode(regs)) {
648 irqentry_enter_from_user_mode(regs);
649 instrumentation_begin();
650 do_int3_user(regs);
651 instrumentation_end();
652 irqentry_exit_to_user_mode(regs);
653 } else {
654 bool irq_state = idtentry_enter_nmi(regs);
655 instrumentation_begin();
656 if (!do_int3(regs))
657 die("int3", regs, 0);
658 instrumentation_end();
659 idtentry_exit_nmi(regs, irq_state);
660 }
661}
662
663#ifdef CONFIG_X86_64
664
665
666
667
668
669asmlinkage __visible noinstr struct pt_regs *sync_regs(struct pt_regs *eregs)
670{
671 struct pt_regs *regs = (struct pt_regs *)this_cpu_read(cpu_current_top_of_stack) - 1;
672 if (regs != eregs)
673 *regs = *eregs;
674 return regs;
675}
676
677#ifdef CONFIG_AMD_MEM_ENCRYPT
678asmlinkage __visible noinstr struct pt_regs *vc_switch_off_ist(struct pt_regs *regs)
679{
680 unsigned long sp, *stack;
681 struct stack_info info;
682 struct pt_regs *regs_ret;
683
684
685
686
687
688 if (regs->ip >= (unsigned long)entry_SYSCALL_64 &&
689 regs->ip < (unsigned long)entry_SYSCALL_64_safe_stack) {
690 sp = this_cpu_read(cpu_current_top_of_stack);
691 goto sync;
692 }
693
694
695
696
697
698
699 sp = regs->sp;
700 stack = (unsigned long *)sp;
701
702 if (!get_stack_info_noinstr(stack, current, &info) || info.type == STACK_TYPE_ENTRY ||
703 info.type >= STACK_TYPE_EXCEPTION_LAST)
704 sp = __this_cpu_ist_top_va(VC2);
705
706sync:
707
708
709
710
711
712 sp = ALIGN_DOWN(sp, 8) - sizeof(*regs_ret);
713
714 regs_ret = (struct pt_regs *)sp;
715 *regs_ret = *regs;
716
717 return regs_ret;
718}
719#endif
720
721struct bad_iret_stack {
722 void *error_entry_ret;
723 struct pt_regs regs;
724};
725
726asmlinkage __visible noinstr
727struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
728{
729
730
731
732
733
734
735
736
737 struct bad_iret_stack tmp, *new_stack =
738 (struct bad_iret_stack *)__this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
739
740
741 __memcpy(&tmp.regs.ip, (void *)s->regs.sp, 5*8);
742
743
744 __memcpy(&tmp, s, offsetof(struct bad_iret_stack, regs.ip));
745
746
747 __memcpy(new_stack, &tmp, sizeof(tmp));
748
749 BUG_ON(!user_mode(&new_stack->regs));
750 return new_stack;
751}
752#endif
753
754static bool is_sysenter_singlestep(struct pt_regs *regs)
755{
756
757
758
759
760
761
762
763
764#ifdef CONFIG_X86_32
765 return (regs->ip - (unsigned long)__begin_SYSENTER_singlestep_region) <
766 (unsigned long)__end_SYSENTER_singlestep_region -
767 (unsigned long)__begin_SYSENTER_singlestep_region;
768#elif defined(CONFIG_IA32_EMULATION)
769 return (regs->ip - (unsigned long)entry_SYSENTER_compat) <
770 (unsigned long)__end_entry_SYSENTER_compat -
771 (unsigned long)entry_SYSENTER_compat;
772#else
773 return false;
774#endif
775}
776
777static __always_inline unsigned long debug_read_clear_dr6(void)
778{
779 unsigned long dr6;
780
781
782
783
784
785
786
787
788
789
790
791
792 get_debugreg(dr6, 6);
793 set_debugreg(DR6_RESERVED, 6);
794 dr6 ^= DR6_RESERVED;
795
796 return dr6;
797}
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824static bool notify_debug(struct pt_regs *regs, unsigned long *dr6)
825{
826
827
828
829
830
831
832
833 if (notify_die(DIE_DEBUG, "debug", regs, (long)dr6, 0, SIGTRAP) == NOTIFY_STOP)
834 return true;
835
836 return false;
837}
838
839static __always_inline void exc_debug_kernel(struct pt_regs *regs,
840 unsigned long dr6)
841{
842
843
844
845
846
847
848
849
850
851
852
853 unsigned long dr7 = local_db_save();
854 bool irq_state = idtentry_enter_nmi(regs);
855 instrumentation_begin();
856
857
858
859
860
861 WARN_ON_ONCE(user_mode(regs));
862
863 if (test_thread_flag(TIF_BLOCKSTEP)) {
864
865
866
867
868
869
870 unsigned long debugctl;
871
872 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
873 debugctl |= DEBUGCTLMSR_BTF;
874 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
875 }
876
877
878
879
880
881 if ((dr6 & DR_STEP) && is_sysenter_singlestep(regs))
882 dr6 &= ~DR_STEP;
883
884 if (kprobe_debug_handler(regs))
885 goto out;
886
887
888
889
890 if (!dr6)
891 goto out;
892
893 if (notify_debug(regs, &dr6))
894 goto out;
895
896
897
898
899
900
901
902
903
904
905
906
907 if (WARN_ON_ONCE(dr6 & DR_STEP))
908 regs->flags &= ~X86_EFLAGS_TF;
909out:
910 instrumentation_end();
911 idtentry_exit_nmi(regs, irq_state);
912
913 local_db_restore(dr7);
914}
915
916static __always_inline void exc_debug_user(struct pt_regs *regs,
917 unsigned long dr6)
918{
919 bool icebp;
920
921
922
923
924
925 WARN_ON_ONCE(!user_mode(regs));
926
927
928
929
930
931
932
933
934
935
936 irqentry_enter_from_user_mode(regs);
937 instrumentation_begin();
938
939
940
941
942
943
944
945
946 current->thread.virtual_dr6 = (dr6 & DR_STEP);
947
948
949
950
951
952
953 clear_thread_flag(TIF_BLOCKSTEP);
954
955
956
957
958
959
960 icebp = !dr6;
961
962 if (notify_debug(regs, &dr6))
963 goto out;
964
965
966 local_irq_enable();
967
968 if (v8086_mode(regs)) {
969 handle_vm86_trap((struct kernel_vm86_regs *)regs, 0, X86_TRAP_DB);
970 goto out_irq;
971 }
972
973
974 dr6 |= current->thread.virtual_dr6;
975 if (dr6 & (DR_STEP | DR_TRAP_BITS) || icebp)
976 send_sigtrap(regs, 0, get_si_code(dr6));
977
978out_irq:
979 local_irq_disable();
980out:
981 instrumentation_end();
982 irqentry_exit_to_user_mode(regs);
983}
984
985#ifdef CONFIG_X86_64
986
987DEFINE_IDTENTRY_DEBUG(exc_debug)
988{
989 exc_debug_kernel(regs, debug_read_clear_dr6());
990}
991
992
993DEFINE_IDTENTRY_DEBUG_USER(exc_debug)
994{
995 exc_debug_user(regs, debug_read_clear_dr6());
996}
997#else
998
999DEFINE_IDTENTRY_RAW(exc_debug)
1000{
1001 unsigned long dr6 = debug_read_clear_dr6();
1002
1003 if (user_mode(regs))
1004 exc_debug_user(regs, dr6);
1005 else
1006 exc_debug_kernel(regs, dr6);
1007}
1008#endif
1009
1010
1011
1012
1013
1014
1015static void math_error(struct pt_regs *regs, int trapnr)
1016{
1017 struct task_struct *task = current;
1018 struct fpu *fpu = &task->thread.fpu;
1019 int si_code;
1020 char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
1021 "simd exception";
1022
1023 cond_local_irq_enable(regs);
1024
1025 if (!user_mode(regs)) {
1026 if (fixup_exception(regs, trapnr, 0, 0))
1027 goto exit;
1028
1029 task->thread.error_code = 0;
1030 task->thread.trap_nr = trapnr;
1031
1032 if (notify_die(DIE_TRAP, str, regs, 0, trapnr,
1033 SIGFPE) != NOTIFY_STOP)
1034 die(str, regs, 0);
1035 goto exit;
1036 }
1037
1038
1039
1040
1041 fpu__save(fpu);
1042
1043 task->thread.trap_nr = trapnr;
1044 task->thread.error_code = 0;
1045
1046 si_code = fpu__exception_code(fpu, trapnr);
1047
1048 if (!si_code)
1049 goto exit;
1050
1051 force_sig_fault(SIGFPE, si_code,
1052 (void __user *)uprobe_get_trap_addr(regs));
1053exit:
1054 cond_local_irq_disable(regs);
1055}
1056
1057DEFINE_IDTENTRY(exc_coprocessor_error)
1058{
1059 math_error(regs, X86_TRAP_MF);
1060}
1061
1062DEFINE_IDTENTRY(exc_simd_coprocessor_error)
1063{
1064 if (IS_ENABLED(CONFIG_X86_INVD_BUG)) {
1065
1066 if (!static_cpu_has(X86_FEATURE_XMM)) {
1067 __exc_general_protection(regs, 0);
1068 return;
1069 }
1070 }
1071 math_error(regs, X86_TRAP_XF);
1072}
1073
1074DEFINE_IDTENTRY(exc_spurious_interrupt_bug)
1075{
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095}
1096
1097DEFINE_IDTENTRY(exc_device_not_available)
1098{
1099 unsigned long cr0 = read_cr0();
1100
1101#ifdef CONFIG_MATH_EMULATION
1102 if (!boot_cpu_has(X86_FEATURE_FPU) && (cr0 & X86_CR0_EM)) {
1103 struct math_emu_info info = { };
1104
1105 cond_local_irq_enable(regs);
1106
1107 info.regs = regs;
1108 math_emulate(&info);
1109
1110 cond_local_irq_disable(regs);
1111 return;
1112 }
1113#endif
1114
1115
1116 if (WARN(cr0 & X86_CR0_TS, "CR0.TS was set")) {
1117
1118 write_cr0(cr0 & ~X86_CR0_TS);
1119 } else {
1120
1121
1122
1123
1124
1125 die("unexpected #NM exception", regs, 0);
1126 }
1127}
1128
1129#ifdef CONFIG_X86_32
1130DEFINE_IDTENTRY_SW(iret_error)
1131{
1132 local_irq_enable();
1133 if (notify_die(DIE_TRAP, "iret exception", regs, 0,
1134 X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) {
1135 do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, 0,
1136 ILL_BADSTK, (void __user *)NULL);
1137 }
1138 local_irq_disable();
1139}
1140#endif
1141
1142void __init trap_init(void)
1143{
1144
1145 setup_cpu_entry_areas();
1146
1147
1148 sev_es_init_vc_handling();
1149
1150 idt_setup_traps();
1151
1152
1153
1154
1155 cpu_init();
1156
1157 idt_setup_ist_traps();
1158}
1159