1
2
3
4
5
6
7
8
9
10
11
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/context_tracking.h>
16#include <linux/interrupt.h>
17#include <linux/kallsyms.h>
18#include <linux/spinlock.h>
19#include <linux/kprobes.h>
20#include <linux/uaccess.h>
21#include <linux/kdebug.h>
22#include <linux/kgdb.h>
23#include <linux/kernel.h>
24#include <linux/export.h>
25#include <linux/ptrace.h>
26#include <linux/uprobes.h>
27#include <linux/string.h>
28#include <linux/delay.h>
29#include <linux/errno.h>
30#include <linux/kexec.h>
31#include <linux/sched.h>
32#include <linux/sched/task_stack.h>
33#include <linux/timer.h>
34#include <linux/init.h>
35#include <linux/bug.h>
36#include <linux/nmi.h>
37#include <linux/mm.h>
38#include <linux/smp.h>
39#include <linux/io.h>
40#include <linux/hardirq.h>
41#include <linux/atomic.h>
42
43#include <asm/stacktrace.h>
44#include <asm/processor.h>
45#include <asm/debugreg.h>
46#include <asm/realmode.h>
47#include <asm/text-patching.h>
48#include <asm/ftrace.h>
49#include <asm/traps.h>
50#include <asm/desc.h>
51#include <asm/fpu/internal.h>
52#include <asm/cpu.h>
53#include <asm/cpu_entry_area.h>
54#include <asm/mce.h>
55#include <asm/fixmap.h>
56#include <asm/mach_traps.h>
57#include <asm/alternative.h>
58#include <asm/fpu/xstate.h>
59#include <asm/vm86.h>
60#include <asm/umip.h>
61#include <asm/insn.h>
62#include <asm/insn-eval.h>
63#include <asm/vdso.h>
64
65#ifdef CONFIG_X86_64
66#include <asm/x86_init.h>
67#include <asm/proto.h>
68#else
69#include <asm/processor-flags.h>
70#include <asm/setup.h>
71#include <asm/proto.h>
72#endif
73
74DECLARE_BITMAP(system_vectors, NR_VECTORS);
75
76static inline void cond_local_irq_enable(struct pt_regs *regs)
77{
78 if (regs->flags & X86_EFLAGS_IF)
79 local_irq_enable();
80}
81
82static inline void cond_local_irq_disable(struct pt_regs *regs)
83{
84 if (regs->flags & X86_EFLAGS_IF)
85 local_irq_disable();
86}
87
88__always_inline int is_valid_bugaddr(unsigned long addr)
89{
90 if (addr < TASK_SIZE_MAX)
91 return 0;
92
93
94
95
96
97 return *(unsigned short *)addr == INSN_UD2;
98}
99
100static nokprobe_inline int
101do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
102 struct pt_regs *regs, long error_code)
103{
104 if (v8086_mode(regs)) {
105
106
107
108
109 if (trapnr < X86_TRAP_UD) {
110 if (!handle_vm86_trap((struct kernel_vm86_regs *) regs,
111 error_code, trapnr))
112 return 0;
113 }
114 } else if (!user_mode(regs)) {
115 if (fixup_exception(regs, trapnr, error_code, 0))
116 return 0;
117
118 tsk->thread.error_code = error_code;
119 tsk->thread.trap_nr = trapnr;
120 die(str, regs, error_code);
121 } else {
122 if (fixup_vdso_exception(regs, trapnr, error_code, 0))
123 return 0;
124 }
125
126
127
128
129
130
131
132
133
134
135 tsk->thread.error_code = error_code;
136 tsk->thread.trap_nr = trapnr;
137
138 return -1;
139}
140
141static void show_signal(struct task_struct *tsk, int signr,
142 const char *type, const char *desc,
143 struct pt_regs *regs, long error_code)
144{
145 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
146 printk_ratelimit()) {
147 pr_info("%s[%d] %s%s ip:%lx sp:%lx error:%lx",
148 tsk->comm, task_pid_nr(tsk), type, desc,
149 regs->ip, regs->sp, error_code);
150 print_vma_addr(KERN_CONT " in ", regs->ip);
151 pr_cont("\n");
152 }
153}
154
155static void
156do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
157 long error_code, int sicode, void __user *addr)
158{
159 struct task_struct *tsk = current;
160
161 if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code))
162 return;
163
164 show_signal(tsk, signr, "trap ", str, regs, error_code);
165
166 if (!sicode)
167 force_sig(signr);
168 else
169 force_sig_fault(signr, sicode, addr);
170}
171NOKPROBE_SYMBOL(do_trap);
172
173static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
174 unsigned long trapnr, int signr, int sicode, void __user *addr)
175{
176 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
177
178 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
179 NOTIFY_STOP) {
180 cond_local_irq_enable(regs);
181 do_trap(trapnr, signr, str, regs, error_code, sicode, addr);
182 cond_local_irq_disable(regs);
183 }
184}
185
186
187
188
189
190
191
192
193
194
195
196static __always_inline void __user *error_get_trap_addr(struct pt_regs *regs)
197{
198 return (void __user *)uprobe_get_trap_addr(regs);
199}
200
201DEFINE_IDTENTRY(exc_divide_error)
202{
203 do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE,
204 FPE_INTDIV, error_get_trap_addr(regs));
205}
206
207DEFINE_IDTENTRY(exc_overflow)
208{
209 do_error_trap(regs, 0, "overflow", X86_TRAP_OF, SIGSEGV, 0, NULL);
210}
211
212#ifdef CONFIG_X86_F00F_BUG
213void handle_invalid_op(struct pt_regs *regs)
214#else
215static inline void handle_invalid_op(struct pt_regs *regs)
216#endif
217{
218 do_error_trap(regs, 0, "invalid opcode", X86_TRAP_UD, SIGILL,
219 ILL_ILLOPN, error_get_trap_addr(regs));
220}
221
222static noinstr bool handle_bug(struct pt_regs *regs)
223{
224 bool handled = false;
225
226 if (!is_valid_bugaddr(regs->ip))
227 return handled;
228
229
230
231
232 instrumentation_begin();
233
234
235
236
237 if (regs->flags & X86_EFLAGS_IF)
238 raw_local_irq_enable();
239 if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN) {
240 regs->ip += LEN_UD2;
241 handled = true;
242 }
243 if (regs->flags & X86_EFLAGS_IF)
244 raw_local_irq_disable();
245 instrumentation_end();
246
247 return handled;
248}
249
250DEFINE_IDTENTRY_RAW(exc_invalid_op)
251{
252 irqentry_state_t state;
253
254
255
256
257
258
259 if (!user_mode(regs) && handle_bug(regs))
260 return;
261
262 state = irqentry_enter(regs);
263 instrumentation_begin();
264 handle_invalid_op(regs);
265 instrumentation_end();
266 irqentry_exit(regs, state);
267}
268
269DEFINE_IDTENTRY(exc_coproc_segment_overrun)
270{
271 do_error_trap(regs, 0, "coprocessor segment overrun",
272 X86_TRAP_OLD_MF, SIGFPE, 0, NULL);
273}
274
275DEFINE_IDTENTRY_ERRORCODE(exc_invalid_tss)
276{
277 do_error_trap(regs, error_code, "invalid TSS", X86_TRAP_TS, SIGSEGV,
278 0, NULL);
279}
280
281DEFINE_IDTENTRY_ERRORCODE(exc_segment_not_present)
282{
283 do_error_trap(regs, error_code, "segment not present", X86_TRAP_NP,
284 SIGBUS, 0, NULL);
285}
286
287DEFINE_IDTENTRY_ERRORCODE(exc_stack_segment)
288{
289 do_error_trap(regs, error_code, "stack segment", X86_TRAP_SS, SIGBUS,
290 0, NULL);
291}
292
293DEFINE_IDTENTRY_ERRORCODE(exc_alignment_check)
294{
295 char *str = "alignment check";
296
297 if (notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_AC, SIGBUS) == NOTIFY_STOP)
298 return;
299
300 if (!user_mode(regs))
301 die("Split lock detected\n", regs, error_code);
302
303 local_irq_enable();
304
305 if (handle_user_split_lock(regs, error_code))
306 goto out;
307
308 do_trap(X86_TRAP_AC, SIGBUS, "alignment check", regs,
309 error_code, BUS_ADRALN, NULL);
310
311out:
312 local_irq_disable();
313}
314
315#ifdef CONFIG_VMAP_STACK
316__visible void __noreturn handle_stack_overflow(const char *message,
317 struct pt_regs *regs,
318 unsigned long fault_address)
319{
320 printk(KERN_EMERG "BUG: stack guard page was hit at %p (stack is %p..%p)\n",
321 (void *)fault_address, current->stack,
322 (char *)current->stack + THREAD_SIZE - 1);
323 die(message, regs, 0);
324
325
326 panic("%s", message);
327}
328#endif
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349DEFINE_IDTENTRY_DF(exc_double_fault)
350{
351 static const char str[] = "double fault";
352 struct task_struct *tsk = current;
353
354#ifdef CONFIG_VMAP_STACK
355 unsigned long address = read_cr2();
356#endif
357
358#ifdef CONFIG_X86_ESPFIX64
359 extern unsigned char native_irq_return_iret[];
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375 if (((long)regs->sp >> P4D_SHIFT) == ESPFIX_PGD_ENTRY &&
376 regs->cs == __KERNEL_CS &&
377 regs->ip == (unsigned long)native_irq_return_iret)
378 {
379 struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
380 unsigned long *p = (unsigned long *)regs->sp;
381
382
383
384
385
386
387
388 gpregs->ip = p[0];
389 gpregs->cs = p[1];
390 gpregs->flags = p[2];
391 gpregs->sp = p[3];
392 gpregs->ss = p[4];
393 gpregs->orig_ax = 0;
394
395
396
397
398
399
400
401
402
403
404
405
406 regs->ip = (unsigned long)asm_exc_general_protection;
407 regs->sp = (unsigned long)&gpregs->orig_ax;
408
409 return;
410 }
411#endif
412
413 irqentry_nmi_enter(regs);
414 instrumentation_begin();
415 notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
416
417 tsk->thread.error_code = error_code;
418 tsk->thread.trap_nr = X86_TRAP_DF;
419
420#ifdef CONFIG_VMAP_STACK
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458 if ((unsigned long)task_stack_page(tsk) - 1 - address < PAGE_SIZE) {
459 handle_stack_overflow("kernel stack overflow (double-fault)",
460 regs, address);
461 }
462#endif
463
464 pr_emerg("PANIC: double fault, error_code: 0x%lx\n", error_code);
465 die("double fault", regs, error_code);
466 panic("Machine halted.");
467 instrumentation_end();
468}
469
470DEFINE_IDTENTRY(exc_bounds)
471{
472 if (notify_die(DIE_TRAP, "bounds", regs, 0,
473 X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP)
474 return;
475 cond_local_irq_enable(regs);
476
477 if (!user_mode(regs))
478 die("bounds", regs, 0);
479
480 do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, 0, 0, NULL);
481
482 cond_local_irq_disable(regs);
483}
484
485enum kernel_gp_hint {
486 GP_NO_HINT,
487 GP_NON_CANONICAL,
488 GP_CANONICAL
489};
490
491
492
493
494
495
496static enum kernel_gp_hint get_kernel_gp_address(struct pt_regs *regs,
497 unsigned long *addr)
498{
499 u8 insn_buf[MAX_INSN_SIZE];
500 struct insn insn;
501 int ret;
502
503 if (copy_from_kernel_nofault(insn_buf, (void *)regs->ip,
504 MAX_INSN_SIZE))
505 return GP_NO_HINT;
506
507 ret = insn_decode_kernel(&insn, insn_buf);
508 if (ret < 0)
509 return GP_NO_HINT;
510
511 *addr = (unsigned long)insn_get_addr_ref(&insn, regs);
512 if (*addr == -1UL)
513 return GP_NO_HINT;
514
515#ifdef CONFIG_X86_64
516
517
518
519
520
521 if (*addr < ~__VIRTUAL_MASK &&
522 *addr + insn.opnd_bytes - 1 > __VIRTUAL_MASK)
523 return GP_NON_CANONICAL;
524#endif
525
526 return GP_CANONICAL;
527}
528
529#define GPFSTR "general protection fault"
530
531DEFINE_IDTENTRY_ERRORCODE(exc_general_protection)
532{
533 char desc[sizeof(GPFSTR) + 50 + 2*sizeof(unsigned long) + 1] = GPFSTR;
534 enum kernel_gp_hint hint = GP_NO_HINT;
535 struct task_struct *tsk;
536 unsigned long gp_addr;
537 int ret;
538
539 cond_local_irq_enable(regs);
540
541 if (static_cpu_has(X86_FEATURE_UMIP)) {
542 if (user_mode(regs) && fixup_umip_exception(regs))
543 goto exit;
544 }
545
546 if (v8086_mode(regs)) {
547 local_irq_enable();
548 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
549 local_irq_disable();
550 return;
551 }
552
553 tsk = current;
554
555 if (user_mode(regs)) {
556 tsk->thread.error_code = error_code;
557 tsk->thread.trap_nr = X86_TRAP_GP;
558
559 if (fixup_vdso_exception(regs, X86_TRAP_GP, error_code, 0))
560 goto exit;
561
562 show_signal(tsk, SIGSEGV, "", desc, regs, error_code);
563 force_sig(SIGSEGV);
564 goto exit;
565 }
566
567 if (fixup_exception(regs, X86_TRAP_GP, error_code, 0))
568 goto exit;
569
570 tsk->thread.error_code = error_code;
571 tsk->thread.trap_nr = X86_TRAP_GP;
572
573
574
575
576
577 if (!preemptible() &&
578 kprobe_running() &&
579 kprobe_fault_handler(regs, X86_TRAP_GP))
580 goto exit;
581
582 ret = notify_die(DIE_GPF, desc, regs, error_code, X86_TRAP_GP, SIGSEGV);
583 if (ret == NOTIFY_STOP)
584 goto exit;
585
586 if (error_code)
587 snprintf(desc, sizeof(desc), "segment-related " GPFSTR);
588 else
589 hint = get_kernel_gp_address(regs, &gp_addr);
590
591 if (hint != GP_NO_HINT)
592 snprintf(desc, sizeof(desc), GPFSTR ", %s 0x%lx",
593 (hint == GP_NON_CANONICAL) ? "probably for non-canonical address"
594 : "maybe for address",
595 gp_addr);
596
597
598
599
600
601 if (hint != GP_NON_CANONICAL)
602 gp_addr = 0;
603
604 die_addr(desc, regs, error_code, gp_addr);
605
606exit:
607 cond_local_irq_disable(regs);
608}
609
610static bool do_int3(struct pt_regs *regs)
611{
612 int res;
613
614#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
615 if (kgdb_ll_trap(DIE_INT3, "int3", regs, 0, X86_TRAP_BP,
616 SIGTRAP) == NOTIFY_STOP)
617 return true;
618#endif
619
620#ifdef CONFIG_KPROBES
621 if (kprobe_int3_handler(regs))
622 return true;
623#endif
624 res = notify_die(DIE_INT3, "int3", regs, 0, X86_TRAP_BP, SIGTRAP);
625
626 return res == NOTIFY_STOP;
627}
628
629static void do_int3_user(struct pt_regs *regs)
630{
631 if (do_int3(regs))
632 return;
633
634 cond_local_irq_enable(regs);
635 do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, 0, 0, NULL);
636 cond_local_irq_disable(regs);
637}
638
639DEFINE_IDTENTRY_RAW(exc_int3)
640{
641
642
643
644
645
646 if (poke_int3_handler(regs))
647 return;
648
649
650
651
652
653
654
655
656 if (user_mode(regs)) {
657 irqentry_enter_from_user_mode(regs);
658 instrumentation_begin();
659 do_int3_user(regs);
660 instrumentation_end();
661 irqentry_exit_to_user_mode(regs);
662 } else {
663 irqentry_state_t irq_state = irqentry_nmi_enter(regs);
664
665 instrumentation_begin();
666 if (!do_int3(regs))
667 die("int3", regs, 0);
668 instrumentation_end();
669 irqentry_nmi_exit(regs, irq_state);
670 }
671}
672
673#ifdef CONFIG_X86_64
674
675
676
677
678
679asmlinkage __visible noinstr struct pt_regs *sync_regs(struct pt_regs *eregs)
680{
681 struct pt_regs *regs = (struct pt_regs *)this_cpu_read(cpu_current_top_of_stack) - 1;
682 if (regs != eregs)
683 *regs = *eregs;
684 return regs;
685}
686
687#ifdef CONFIG_AMD_MEM_ENCRYPT
688asmlinkage __visible noinstr struct pt_regs *vc_switch_off_ist(struct pt_regs *regs)
689{
690 unsigned long sp, *stack;
691 struct stack_info info;
692 struct pt_regs *regs_ret;
693
694
695
696
697
698 if (ip_within_syscall_gap(regs)) {
699 sp = this_cpu_read(cpu_current_top_of_stack);
700 goto sync;
701 }
702
703
704
705
706
707
708 sp = regs->sp;
709 stack = (unsigned long *)sp;
710
711 if (!get_stack_info_noinstr(stack, current, &info) || info.type == STACK_TYPE_ENTRY ||
712 info.type >= STACK_TYPE_EXCEPTION_LAST)
713 sp = __this_cpu_ist_top_va(VC2);
714
715sync:
716
717
718
719
720
721 sp = ALIGN_DOWN(sp, 8) - sizeof(*regs_ret);
722
723 regs_ret = (struct pt_regs *)sp;
724 *regs_ret = *regs;
725
726 return regs_ret;
727}
728#endif
729
730struct bad_iret_stack {
731 void *error_entry_ret;
732 struct pt_regs regs;
733};
734
735asmlinkage __visible noinstr
736struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
737{
738
739
740
741
742
743
744
745
746 struct bad_iret_stack tmp, *new_stack =
747 (struct bad_iret_stack *)__this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
748
749
750 __memcpy(&tmp.regs.ip, (void *)s->regs.sp, 5*8);
751
752
753 __memcpy(&tmp, s, offsetof(struct bad_iret_stack, regs.ip));
754
755
756 __memcpy(new_stack, &tmp, sizeof(tmp));
757
758 BUG_ON(!user_mode(&new_stack->regs));
759 return new_stack;
760}
761#endif
762
763static bool is_sysenter_singlestep(struct pt_regs *regs)
764{
765
766
767
768
769
770
771
772
773#ifdef CONFIG_X86_32
774 return (regs->ip - (unsigned long)__begin_SYSENTER_singlestep_region) <
775 (unsigned long)__end_SYSENTER_singlestep_region -
776 (unsigned long)__begin_SYSENTER_singlestep_region;
777#elif defined(CONFIG_IA32_EMULATION)
778 return (regs->ip - (unsigned long)entry_SYSENTER_compat) <
779 (unsigned long)__end_entry_SYSENTER_compat -
780 (unsigned long)entry_SYSENTER_compat;
781#else
782 return false;
783#endif
784}
785
786static __always_inline unsigned long debug_read_clear_dr6(void)
787{
788 unsigned long dr6;
789
790
791
792
793
794
795
796
797
798
799
800
801 get_debugreg(dr6, 6);
802 set_debugreg(DR6_RESERVED, 6);
803 dr6 ^= DR6_RESERVED;
804
805 return dr6;
806}
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833static bool notify_debug(struct pt_regs *regs, unsigned long *dr6)
834{
835
836
837
838
839
840
841
842 if (notify_die(DIE_DEBUG, "debug", regs, (long)dr6, 0, SIGTRAP) == NOTIFY_STOP)
843 return true;
844
845 return false;
846}
847
848static __always_inline void exc_debug_kernel(struct pt_regs *regs,
849 unsigned long dr6)
850{
851
852
853
854
855
856
857
858
859
860
861
862 unsigned long dr7 = local_db_save();
863 irqentry_state_t irq_state = irqentry_nmi_enter(regs);
864 instrumentation_begin();
865
866
867
868
869
870 WARN_ON_ONCE(user_mode(regs));
871
872 if (test_thread_flag(TIF_BLOCKSTEP)) {
873
874
875
876
877
878
879 unsigned long debugctl;
880
881 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
882 debugctl |= DEBUGCTLMSR_BTF;
883 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
884 }
885
886
887
888
889
890 if ((dr6 & DR_STEP) && is_sysenter_singlestep(regs))
891 dr6 &= ~DR_STEP;
892
893
894
895
896 if (!dr6)
897 goto out;
898
899 if (notify_debug(regs, &dr6))
900 goto out;
901
902
903
904
905
906
907
908
909
910
911
912
913 if (WARN_ON_ONCE(dr6 & DR_STEP))
914 regs->flags &= ~X86_EFLAGS_TF;
915out:
916 instrumentation_end();
917 irqentry_nmi_exit(regs, irq_state);
918
919 local_db_restore(dr7);
920}
921
922static __always_inline void exc_debug_user(struct pt_regs *regs,
923 unsigned long dr6)
924{
925 bool icebp;
926
927
928
929
930
931 WARN_ON_ONCE(!user_mode(regs));
932
933
934
935
936
937
938
939
940
941
942 irqentry_enter_from_user_mode(regs);
943 instrumentation_begin();
944
945
946
947
948
949
950
951
952 current->thread.virtual_dr6 = (dr6 & DR_STEP);
953
954
955
956
957
958
959 clear_thread_flag(TIF_BLOCKSTEP);
960
961
962
963
964
965
966 icebp = !dr6;
967
968 if (notify_debug(regs, &dr6))
969 goto out;
970
971
972 local_irq_enable();
973
974 if (v8086_mode(regs)) {
975 handle_vm86_trap((struct kernel_vm86_regs *)regs, 0, X86_TRAP_DB);
976 goto out_irq;
977 }
978
979
980 if (dr6 & DR_BUS_LOCK)
981 handle_bus_lock(regs);
982
983
984 dr6 |= current->thread.virtual_dr6;
985 if (dr6 & (DR_STEP | DR_TRAP_BITS) || icebp)
986 send_sigtrap(regs, 0, get_si_code(dr6));
987
988out_irq:
989 local_irq_disable();
990out:
991 instrumentation_end();
992 irqentry_exit_to_user_mode(regs);
993}
994
995#ifdef CONFIG_X86_64
996
997DEFINE_IDTENTRY_DEBUG(exc_debug)
998{
999 exc_debug_kernel(regs, debug_read_clear_dr6());
1000}
1001
1002
1003DEFINE_IDTENTRY_DEBUG_USER(exc_debug)
1004{
1005 exc_debug_user(regs, debug_read_clear_dr6());
1006}
1007#else
1008
1009DEFINE_IDTENTRY_RAW(exc_debug)
1010{
1011 unsigned long dr6 = debug_read_clear_dr6();
1012
1013 if (user_mode(regs))
1014 exc_debug_user(regs, dr6);
1015 else
1016 exc_debug_kernel(regs, dr6);
1017}
1018#endif
1019
1020
1021
1022
1023
1024
1025static void math_error(struct pt_regs *regs, int trapnr)
1026{
1027 struct task_struct *task = current;
1028 struct fpu *fpu = &task->thread.fpu;
1029 int si_code;
1030 char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
1031 "simd exception";
1032
1033 cond_local_irq_enable(regs);
1034
1035 if (!user_mode(regs)) {
1036 if (fixup_exception(regs, trapnr, 0, 0))
1037 goto exit;
1038
1039 task->thread.error_code = 0;
1040 task->thread.trap_nr = trapnr;
1041
1042 if (notify_die(DIE_TRAP, str, regs, 0, trapnr,
1043 SIGFPE) != NOTIFY_STOP)
1044 die(str, regs, 0);
1045 goto exit;
1046 }
1047
1048
1049
1050
1051 fpu__save(fpu);
1052
1053 task->thread.trap_nr = trapnr;
1054 task->thread.error_code = 0;
1055
1056 si_code = fpu__exception_code(fpu, trapnr);
1057
1058 if (!si_code)
1059 goto exit;
1060
1061 if (fixup_vdso_exception(regs, trapnr, 0, 0))
1062 goto exit;
1063
1064 force_sig_fault(SIGFPE, si_code,
1065 (void __user *)uprobe_get_trap_addr(regs));
1066exit:
1067 cond_local_irq_disable(regs);
1068}
1069
1070DEFINE_IDTENTRY(exc_coprocessor_error)
1071{
1072 math_error(regs, X86_TRAP_MF);
1073}
1074
1075DEFINE_IDTENTRY(exc_simd_coprocessor_error)
1076{
1077 if (IS_ENABLED(CONFIG_X86_INVD_BUG)) {
1078
1079 if (!static_cpu_has(X86_FEATURE_XMM)) {
1080 __exc_general_protection(regs, 0);
1081 return;
1082 }
1083 }
1084 math_error(regs, X86_TRAP_XF);
1085}
1086
1087DEFINE_IDTENTRY(exc_spurious_interrupt_bug)
1088{
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108}
1109
1110DEFINE_IDTENTRY(exc_device_not_available)
1111{
1112 unsigned long cr0 = read_cr0();
1113
1114#ifdef CONFIG_MATH_EMULATION
1115 if (!boot_cpu_has(X86_FEATURE_FPU) && (cr0 & X86_CR0_EM)) {
1116 struct math_emu_info info = { };
1117
1118 cond_local_irq_enable(regs);
1119
1120 info.regs = regs;
1121 math_emulate(&info);
1122
1123 cond_local_irq_disable(regs);
1124 return;
1125 }
1126#endif
1127
1128
1129 if (WARN(cr0 & X86_CR0_TS, "CR0.TS was set")) {
1130
1131 write_cr0(cr0 & ~X86_CR0_TS);
1132 } else {
1133
1134
1135
1136
1137
1138 die("unexpected #NM exception", regs, 0);
1139 }
1140}
1141
1142#ifdef CONFIG_X86_32
1143DEFINE_IDTENTRY_SW(iret_error)
1144{
1145 local_irq_enable();
1146 if (notify_die(DIE_TRAP, "iret exception", regs, 0,
1147 X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) {
1148 do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, 0,
1149 ILL_BADSTK, (void __user *)NULL);
1150 }
1151 local_irq_disable();
1152}
1153#endif
1154
1155void __init trap_init(void)
1156{
1157
1158 setup_cpu_entry_areas();
1159
1160
1161 sev_es_init_vc_handling();
1162
1163 idt_setup_traps();
1164
1165
1166
1167
1168 cpu_init();
1169
1170 idt_setup_ist_traps();
1171}
1172