1
2
3
4
5
6
7
8
9
10
11
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/context_tracking.h>
16#include <linux/interrupt.h>
17#include <linux/kallsyms.h>
18#include <linux/spinlock.h>
19#include <linux/kprobes.h>
20#include <linux/uaccess.h>
21#include <linux/kdebug.h>
22#include <linux/kgdb.h>
23#include <linux/kernel.h>
24#include <linux/export.h>
25#include <linux/ptrace.h>
26#include <linux/uprobes.h>
27#include <linux/string.h>
28#include <linux/delay.h>
29#include <linux/errno.h>
30#include <linux/kexec.h>
31#include <linux/sched.h>
32#include <linux/sched/task_stack.h>
33#include <linux/timer.h>
34#include <linux/init.h>
35#include <linux/bug.h>
36#include <linux/nmi.h>
37#include <linux/mm.h>
38#include <linux/smp.h>
39#include <linux/io.h>
40#include <linux/hardirq.h>
41#include <linux/atomic.h>
42
43#include <asm/stacktrace.h>
44#include <asm/processor.h>
45#include <asm/debugreg.h>
46#include <asm/realmode.h>
47#include <asm/text-patching.h>
48#include <asm/ftrace.h>
49#include <asm/traps.h>
50#include <asm/desc.h>
51#include <asm/fpu/api.h>
52#include <asm/cpu.h>
53#include <asm/cpu_entry_area.h>
54#include <asm/mce.h>
55#include <asm/fixmap.h>
56#include <asm/mach_traps.h>
57#include <asm/alternative.h>
58#include <asm/fpu/xstate.h>
59#include <asm/vm86.h>
60#include <asm/umip.h>
61#include <asm/insn.h>
62#include <asm/insn-eval.h>
63#include <asm/vdso.h>
64
65#ifdef CONFIG_X86_64
66#include <asm/x86_init.h>
67#include <asm/proto.h>
68#else
69#include <asm/processor-flags.h>
70#include <asm/setup.h>
71#include <asm/proto.h>
72#endif
73
74DECLARE_BITMAP(system_vectors, NR_VECTORS);
75
76static inline void cond_local_irq_enable(struct pt_regs *regs)
77{
78 if (regs->flags & X86_EFLAGS_IF)
79 local_irq_enable();
80}
81
82static inline void cond_local_irq_disable(struct pt_regs *regs)
83{
84 if (regs->flags & X86_EFLAGS_IF)
85 local_irq_disable();
86}
87
88__always_inline int is_valid_bugaddr(unsigned long addr)
89{
90 if (addr < TASK_SIZE_MAX)
91 return 0;
92
93
94
95
96
97 return *(unsigned short *)addr == INSN_UD2;
98}
99
100static nokprobe_inline int
101do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
102 struct pt_regs *regs, long error_code)
103{
104 if (v8086_mode(regs)) {
105
106
107
108
109 if (trapnr < X86_TRAP_UD) {
110 if (!handle_vm86_trap((struct kernel_vm86_regs *) regs,
111 error_code, trapnr))
112 return 0;
113 }
114 } else if (!user_mode(regs)) {
115 if (fixup_exception(regs, trapnr, error_code, 0))
116 return 0;
117
118 tsk->thread.error_code = error_code;
119 tsk->thread.trap_nr = trapnr;
120 die(str, regs, error_code);
121 } else {
122 if (fixup_vdso_exception(regs, trapnr, error_code, 0))
123 return 0;
124 }
125
126
127
128
129
130
131
132
133
134
135 tsk->thread.error_code = error_code;
136 tsk->thread.trap_nr = trapnr;
137
138 return -1;
139}
140
141static void show_signal(struct task_struct *tsk, int signr,
142 const char *type, const char *desc,
143 struct pt_regs *regs, long error_code)
144{
145 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
146 printk_ratelimit()) {
147 pr_info("%s[%d] %s%s ip:%lx sp:%lx error:%lx",
148 tsk->comm, task_pid_nr(tsk), type, desc,
149 regs->ip, regs->sp, error_code);
150 print_vma_addr(KERN_CONT " in ", regs->ip);
151 pr_cont("\n");
152 }
153}
154
155static void
156do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
157 long error_code, int sicode, void __user *addr)
158{
159 struct task_struct *tsk = current;
160
161 if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code))
162 return;
163
164 show_signal(tsk, signr, "trap ", str, regs, error_code);
165
166 if (!sicode)
167 force_sig(signr);
168 else
169 force_sig_fault(signr, sicode, addr);
170}
171NOKPROBE_SYMBOL(do_trap);
172
173static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
174 unsigned long trapnr, int signr, int sicode, void __user *addr)
175{
176 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
177
178 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
179 NOTIFY_STOP) {
180 cond_local_irq_enable(regs);
181 do_trap(trapnr, signr, str, regs, error_code, sicode, addr);
182 cond_local_irq_disable(regs);
183 }
184}
185
186
187
188
189
190
191
192
193
194
195
196static __always_inline void __user *error_get_trap_addr(struct pt_regs *regs)
197{
198 return (void __user *)uprobe_get_trap_addr(regs);
199}
200
201DEFINE_IDTENTRY(exc_divide_error)
202{
203 do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE,
204 FPE_INTDIV, error_get_trap_addr(regs));
205}
206
207DEFINE_IDTENTRY(exc_overflow)
208{
209 do_error_trap(regs, 0, "overflow", X86_TRAP_OF, SIGSEGV, 0, NULL);
210}
211
212#ifdef CONFIG_X86_F00F_BUG
213void handle_invalid_op(struct pt_regs *regs)
214#else
215static inline void handle_invalid_op(struct pt_regs *regs)
216#endif
217{
218 do_error_trap(regs, 0, "invalid opcode", X86_TRAP_UD, SIGILL,
219 ILL_ILLOPN, error_get_trap_addr(regs));
220}
221
222static noinstr bool handle_bug(struct pt_regs *regs)
223{
224 bool handled = false;
225
226 if (!is_valid_bugaddr(regs->ip))
227 return handled;
228
229
230
231
232 instrumentation_begin();
233
234
235
236
237 if (regs->flags & X86_EFLAGS_IF)
238 raw_local_irq_enable();
239 if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN) {
240 regs->ip += LEN_UD2;
241 handled = true;
242 }
243 if (regs->flags & X86_EFLAGS_IF)
244 raw_local_irq_disable();
245 instrumentation_end();
246
247 return handled;
248}
249
250DEFINE_IDTENTRY_RAW(exc_invalid_op)
251{
252 irqentry_state_t state;
253
254
255
256
257
258
259 if (!user_mode(regs) && handle_bug(regs))
260 return;
261
262 state = irqentry_enter(regs);
263 instrumentation_begin();
264 handle_invalid_op(regs);
265 instrumentation_end();
266 irqentry_exit(regs, state);
267}
268
269DEFINE_IDTENTRY(exc_coproc_segment_overrun)
270{
271 do_error_trap(regs, 0, "coprocessor segment overrun",
272 X86_TRAP_OLD_MF, SIGFPE, 0, NULL);
273}
274
275DEFINE_IDTENTRY_ERRORCODE(exc_invalid_tss)
276{
277 do_error_trap(regs, error_code, "invalid TSS", X86_TRAP_TS, SIGSEGV,
278 0, NULL);
279}
280
281DEFINE_IDTENTRY_ERRORCODE(exc_segment_not_present)
282{
283 do_error_trap(regs, error_code, "segment not present", X86_TRAP_NP,
284 SIGBUS, 0, NULL);
285}
286
287DEFINE_IDTENTRY_ERRORCODE(exc_stack_segment)
288{
289 do_error_trap(regs, error_code, "stack segment", X86_TRAP_SS, SIGBUS,
290 0, NULL);
291}
292
293DEFINE_IDTENTRY_ERRORCODE(exc_alignment_check)
294{
295 char *str = "alignment check";
296
297 if (notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_AC, SIGBUS) == NOTIFY_STOP)
298 return;
299
300 if (!user_mode(regs))
301 die("Split lock detected\n", regs, error_code);
302
303 local_irq_enable();
304
305 if (handle_user_split_lock(regs, error_code))
306 goto out;
307
308 do_trap(X86_TRAP_AC, SIGBUS, "alignment check", regs,
309 error_code, BUS_ADRALN, NULL);
310
311out:
312 local_irq_disable();
313}
314
315#ifdef CONFIG_VMAP_STACK
316__visible void __noreturn handle_stack_overflow(struct pt_regs *regs,
317 unsigned long fault_address,
318 struct stack_info *info)
319{
320 const char *name = stack_type_name(info->type);
321
322 printk(KERN_EMERG "BUG: %s stack guard page was hit at %p (stack is %p..%p)\n",
323 name, (void *)fault_address, info->begin, info->end);
324
325 die("stack guard page", regs, 0);
326
327
328 panic("%s stack guard hit", name);
329}
330#endif
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351DEFINE_IDTENTRY_DF(exc_double_fault)
352{
353 static const char str[] = "double fault";
354 struct task_struct *tsk = current;
355
356#ifdef CONFIG_VMAP_STACK
357 unsigned long address = read_cr2();
358 struct stack_info info;
359#endif
360
361#ifdef CONFIG_X86_ESPFIX64
362 extern unsigned char native_irq_return_iret[];
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378 if (((long)regs->sp >> P4D_SHIFT) == ESPFIX_PGD_ENTRY &&
379 regs->cs == __KERNEL_CS &&
380 regs->ip == (unsigned long)native_irq_return_iret)
381 {
382 struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
383 unsigned long *p = (unsigned long *)regs->sp;
384
385
386
387
388
389
390
391 gpregs->ip = p[0];
392 gpregs->cs = p[1];
393 gpregs->flags = p[2];
394 gpregs->sp = p[3];
395 gpregs->ss = p[4];
396 gpregs->orig_ax = 0;
397
398
399
400
401
402
403
404
405
406
407
408
409 regs->ip = (unsigned long)asm_exc_general_protection;
410 regs->sp = (unsigned long)&gpregs->orig_ax;
411
412 return;
413 }
414#endif
415
416 irqentry_nmi_enter(regs);
417 instrumentation_begin();
418 notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
419
420 tsk->thread.error_code = error_code;
421 tsk->thread.trap_nr = X86_TRAP_DF;
422
423#ifdef CONFIG_VMAP_STACK
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461 if (get_stack_guard_info((void *)address, &info))
462 handle_stack_overflow(regs, address, &info);
463#endif
464
465 pr_emerg("PANIC: double fault, error_code: 0x%lx\n", error_code);
466 die("double fault", regs, error_code);
467 panic("Machine halted.");
468 instrumentation_end();
469}
470
471DEFINE_IDTENTRY(exc_bounds)
472{
473 if (notify_die(DIE_TRAP, "bounds", regs, 0,
474 X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP)
475 return;
476 cond_local_irq_enable(regs);
477
478 if (!user_mode(regs))
479 die("bounds", regs, 0);
480
481 do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, 0, 0, NULL);
482
483 cond_local_irq_disable(regs);
484}
485
486enum kernel_gp_hint {
487 GP_NO_HINT,
488 GP_NON_CANONICAL,
489 GP_CANONICAL
490};
491
492
493
494
495
496
497static enum kernel_gp_hint get_kernel_gp_address(struct pt_regs *regs,
498 unsigned long *addr)
499{
500 u8 insn_buf[MAX_INSN_SIZE];
501 struct insn insn;
502 int ret;
503
504 if (copy_from_kernel_nofault(insn_buf, (void *)regs->ip,
505 MAX_INSN_SIZE))
506 return GP_NO_HINT;
507
508 ret = insn_decode_kernel(&insn, insn_buf);
509 if (ret < 0)
510 return GP_NO_HINT;
511
512 *addr = (unsigned long)insn_get_addr_ref(&insn, regs);
513 if (*addr == -1UL)
514 return GP_NO_HINT;
515
516#ifdef CONFIG_X86_64
517
518
519
520
521
522 if (*addr < ~__VIRTUAL_MASK &&
523 *addr + insn.opnd_bytes - 1 > __VIRTUAL_MASK)
524 return GP_NON_CANONICAL;
525#endif
526
527 return GP_CANONICAL;
528}
529
530#define GPFSTR "general protection fault"
531
532static bool fixup_iopl_exception(struct pt_regs *regs)
533{
534 struct thread_struct *t = ¤t->thread;
535 unsigned char byte;
536 unsigned long ip;
537
538 if (!IS_ENABLED(CONFIG_X86_IOPL_IOPERM) || t->iopl_emul != 3)
539 return false;
540
541 if (insn_get_effective_ip(regs, &ip))
542 return false;
543
544 if (get_user(byte, (const char __user *)ip))
545 return false;
546
547 if (byte != 0xfa && byte != 0xfb)
548 return false;
549
550 if (!t->iopl_warn && printk_ratelimit()) {
551 pr_err("%s[%d] attempts to use CLI/STI, pretending it's a NOP, ip:%lx",
552 current->comm, task_pid_nr(current), ip);
553 print_vma_addr(KERN_CONT " in ", ip);
554 pr_cont("\n");
555 t->iopl_warn = 1;
556 }
557
558 regs->ip += 1;
559 return true;
560}
561
562DEFINE_IDTENTRY_ERRORCODE(exc_general_protection)
563{
564 char desc[sizeof(GPFSTR) + 50 + 2*sizeof(unsigned long) + 1] = GPFSTR;
565 enum kernel_gp_hint hint = GP_NO_HINT;
566 struct task_struct *tsk;
567 unsigned long gp_addr;
568 int ret;
569
570 cond_local_irq_enable(regs);
571
572 if (static_cpu_has(X86_FEATURE_UMIP)) {
573 if (user_mode(regs) && fixup_umip_exception(regs))
574 goto exit;
575 }
576
577 if (v8086_mode(regs)) {
578 local_irq_enable();
579 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
580 local_irq_disable();
581 return;
582 }
583
584 tsk = current;
585
586 if (user_mode(regs)) {
587 if (fixup_iopl_exception(regs))
588 goto exit;
589
590 tsk->thread.error_code = error_code;
591 tsk->thread.trap_nr = X86_TRAP_GP;
592
593 if (fixup_vdso_exception(regs, X86_TRAP_GP, error_code, 0))
594 goto exit;
595
596 show_signal(tsk, SIGSEGV, "", desc, regs, error_code);
597 force_sig(SIGSEGV);
598 goto exit;
599 }
600
601 if (fixup_exception(regs, X86_TRAP_GP, error_code, 0))
602 goto exit;
603
604 tsk->thread.error_code = error_code;
605 tsk->thread.trap_nr = X86_TRAP_GP;
606
607
608
609
610
611 if (!preemptible() &&
612 kprobe_running() &&
613 kprobe_fault_handler(regs, X86_TRAP_GP))
614 goto exit;
615
616 ret = notify_die(DIE_GPF, desc, regs, error_code, X86_TRAP_GP, SIGSEGV);
617 if (ret == NOTIFY_STOP)
618 goto exit;
619
620 if (error_code)
621 snprintf(desc, sizeof(desc), "segment-related " GPFSTR);
622 else
623 hint = get_kernel_gp_address(regs, &gp_addr);
624
625 if (hint != GP_NO_HINT)
626 snprintf(desc, sizeof(desc), GPFSTR ", %s 0x%lx",
627 (hint == GP_NON_CANONICAL) ? "probably for non-canonical address"
628 : "maybe for address",
629 gp_addr);
630
631
632
633
634
635 if (hint != GP_NON_CANONICAL)
636 gp_addr = 0;
637
638 die_addr(desc, regs, error_code, gp_addr);
639
640exit:
641 cond_local_irq_disable(regs);
642}
643
644static bool do_int3(struct pt_regs *regs)
645{
646 int res;
647
648#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
649 if (kgdb_ll_trap(DIE_INT3, "int3", regs, 0, X86_TRAP_BP,
650 SIGTRAP) == NOTIFY_STOP)
651 return true;
652#endif
653
654#ifdef CONFIG_KPROBES
655 if (kprobe_int3_handler(regs))
656 return true;
657#endif
658 res = notify_die(DIE_INT3, "int3", regs, 0, X86_TRAP_BP, SIGTRAP);
659
660 return res == NOTIFY_STOP;
661}
662
663static void do_int3_user(struct pt_regs *regs)
664{
665 if (do_int3(regs))
666 return;
667
668 cond_local_irq_enable(regs);
669 do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, 0, 0, NULL);
670 cond_local_irq_disable(regs);
671}
672
673DEFINE_IDTENTRY_RAW(exc_int3)
674{
675
676
677
678
679
680 if (poke_int3_handler(regs))
681 return;
682
683
684
685
686
687
688
689
690 if (user_mode(regs)) {
691 irqentry_enter_from_user_mode(regs);
692 instrumentation_begin();
693 do_int3_user(regs);
694 instrumentation_end();
695 irqentry_exit_to_user_mode(regs);
696 } else {
697 irqentry_state_t irq_state = irqentry_nmi_enter(regs);
698
699 instrumentation_begin();
700 if (!do_int3(regs))
701 die("int3", regs, 0);
702 instrumentation_end();
703 irqentry_nmi_exit(regs, irq_state);
704 }
705}
706
707#ifdef CONFIG_X86_64
708
709
710
711
712
713asmlinkage __visible noinstr struct pt_regs *sync_regs(struct pt_regs *eregs)
714{
715 struct pt_regs *regs = (struct pt_regs *)this_cpu_read(cpu_current_top_of_stack) - 1;
716 if (regs != eregs)
717 *regs = *eregs;
718 return regs;
719}
720
721#ifdef CONFIG_AMD_MEM_ENCRYPT
722asmlinkage __visible noinstr struct pt_regs *vc_switch_off_ist(struct pt_regs *regs)
723{
724 unsigned long sp, *stack;
725 struct stack_info info;
726 struct pt_regs *regs_ret;
727
728
729
730
731
732 if (ip_within_syscall_gap(regs)) {
733 sp = this_cpu_read(cpu_current_top_of_stack);
734 goto sync;
735 }
736
737
738
739
740
741
742 sp = regs->sp;
743 stack = (unsigned long *)sp;
744
745 if (!get_stack_info_noinstr(stack, current, &info) || info.type == STACK_TYPE_ENTRY ||
746 info.type > STACK_TYPE_EXCEPTION_LAST)
747 sp = __this_cpu_ist_top_va(VC2);
748
749sync:
750
751
752
753
754
755 sp = ALIGN_DOWN(sp, 8) - sizeof(*regs_ret);
756
757 regs_ret = (struct pt_regs *)sp;
758 *regs_ret = *regs;
759
760 return regs_ret;
761}
762#endif
763
764struct bad_iret_stack {
765 void *error_entry_ret;
766 struct pt_regs regs;
767};
768
769asmlinkage __visible noinstr
770struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
771{
772
773
774
775
776
777
778
779
780 struct bad_iret_stack tmp, *new_stack =
781 (struct bad_iret_stack *)__this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
782
783
784 __memcpy(&tmp.regs.ip, (void *)s->regs.sp, 5*8);
785
786
787 __memcpy(&tmp, s, offsetof(struct bad_iret_stack, regs.ip));
788
789
790 __memcpy(new_stack, &tmp, sizeof(tmp));
791
792 BUG_ON(!user_mode(&new_stack->regs));
793 return new_stack;
794}
795#endif
796
797static bool is_sysenter_singlestep(struct pt_regs *regs)
798{
799
800
801
802
803
804
805
806
807#ifdef CONFIG_X86_32
808 return (regs->ip - (unsigned long)__begin_SYSENTER_singlestep_region) <
809 (unsigned long)__end_SYSENTER_singlestep_region -
810 (unsigned long)__begin_SYSENTER_singlestep_region;
811#elif defined(CONFIG_IA32_EMULATION)
812 return (regs->ip - (unsigned long)entry_SYSENTER_compat) <
813 (unsigned long)__end_entry_SYSENTER_compat -
814 (unsigned long)entry_SYSENTER_compat;
815#else
816 return false;
817#endif
818}
819
820static __always_inline unsigned long debug_read_clear_dr6(void)
821{
822 unsigned long dr6;
823
824
825
826
827
828
829
830
831
832
833
834
835 get_debugreg(dr6, 6);
836 set_debugreg(DR6_RESERVED, 6);
837 dr6 ^= DR6_RESERVED;
838
839 return dr6;
840}
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867static bool notify_debug(struct pt_regs *regs, unsigned long *dr6)
868{
869
870
871
872
873
874
875
876 if (notify_die(DIE_DEBUG, "debug", regs, (long)dr6, 0, SIGTRAP) == NOTIFY_STOP)
877 return true;
878
879 return false;
880}
881
882static __always_inline void exc_debug_kernel(struct pt_regs *regs,
883 unsigned long dr6)
884{
885
886
887
888
889
890
891
892
893
894
895
896 unsigned long dr7 = local_db_save();
897 irqentry_state_t irq_state = irqentry_nmi_enter(regs);
898 instrumentation_begin();
899
900
901
902
903
904 WARN_ON_ONCE(user_mode(regs));
905
906 if (test_thread_flag(TIF_BLOCKSTEP)) {
907
908
909
910
911
912
913 unsigned long debugctl;
914
915 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
916 debugctl |= DEBUGCTLMSR_BTF;
917 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
918 }
919
920
921
922
923
924 if ((dr6 & DR_STEP) && is_sysenter_singlestep(regs))
925 dr6 &= ~DR_STEP;
926
927
928
929
930 if (!dr6)
931 goto out;
932
933 if (notify_debug(regs, &dr6))
934 goto out;
935
936
937
938
939
940
941
942
943
944
945
946
947 if (WARN_ON_ONCE(dr6 & DR_STEP))
948 regs->flags &= ~X86_EFLAGS_TF;
949out:
950 instrumentation_end();
951 irqentry_nmi_exit(regs, irq_state);
952
953 local_db_restore(dr7);
954}
955
956static __always_inline void exc_debug_user(struct pt_regs *regs,
957 unsigned long dr6)
958{
959 bool icebp;
960
961
962
963
964
965 WARN_ON_ONCE(!user_mode(regs));
966
967
968
969
970
971
972
973
974
975
976 irqentry_enter_from_user_mode(regs);
977 instrumentation_begin();
978
979
980
981
982
983
984
985
986 current->thread.virtual_dr6 = (dr6 & DR_STEP);
987
988
989
990
991
992
993 clear_thread_flag(TIF_BLOCKSTEP);
994
995
996
997
998
999
1000 icebp = !dr6;
1001
1002 if (notify_debug(regs, &dr6))
1003 goto out;
1004
1005
1006 local_irq_enable();
1007
1008 if (v8086_mode(regs)) {
1009 handle_vm86_trap((struct kernel_vm86_regs *)regs, 0, X86_TRAP_DB);
1010 goto out_irq;
1011 }
1012
1013
1014 if (dr6 & DR_BUS_LOCK)
1015 handle_bus_lock(regs);
1016
1017
1018 dr6 |= current->thread.virtual_dr6;
1019 if (dr6 & (DR_STEP | DR_TRAP_BITS) || icebp)
1020 send_sigtrap(regs, 0, get_si_code(dr6));
1021
1022out_irq:
1023 local_irq_disable();
1024out:
1025 instrumentation_end();
1026 irqentry_exit_to_user_mode(regs);
1027}
1028
1029#ifdef CONFIG_X86_64
1030
1031DEFINE_IDTENTRY_DEBUG(exc_debug)
1032{
1033 exc_debug_kernel(regs, debug_read_clear_dr6());
1034}
1035
1036
1037DEFINE_IDTENTRY_DEBUG_USER(exc_debug)
1038{
1039 exc_debug_user(regs, debug_read_clear_dr6());
1040}
1041#else
1042
1043DEFINE_IDTENTRY_RAW(exc_debug)
1044{
1045 unsigned long dr6 = debug_read_clear_dr6();
1046
1047 if (user_mode(regs))
1048 exc_debug_user(regs, dr6);
1049 else
1050 exc_debug_kernel(regs, dr6);
1051}
1052#endif
1053
1054
1055
1056
1057
1058
1059static void math_error(struct pt_regs *regs, int trapnr)
1060{
1061 struct task_struct *task = current;
1062 struct fpu *fpu = &task->thread.fpu;
1063 int si_code;
1064 char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
1065 "simd exception";
1066
1067 cond_local_irq_enable(regs);
1068
1069 if (!user_mode(regs)) {
1070 if (fixup_exception(regs, trapnr, 0, 0))
1071 goto exit;
1072
1073 task->thread.error_code = 0;
1074 task->thread.trap_nr = trapnr;
1075
1076 if (notify_die(DIE_TRAP, str, regs, 0, trapnr,
1077 SIGFPE) != NOTIFY_STOP)
1078 die(str, regs, 0);
1079 goto exit;
1080 }
1081
1082
1083
1084
1085
1086 fpu_sync_fpstate(fpu);
1087
1088 task->thread.trap_nr = trapnr;
1089 task->thread.error_code = 0;
1090
1091 si_code = fpu__exception_code(fpu, trapnr);
1092
1093 if (!si_code)
1094 goto exit;
1095
1096 if (fixup_vdso_exception(regs, trapnr, 0, 0))
1097 goto exit;
1098
1099 force_sig_fault(SIGFPE, si_code,
1100 (void __user *)uprobe_get_trap_addr(regs));
1101exit:
1102 cond_local_irq_disable(regs);
1103}
1104
1105DEFINE_IDTENTRY(exc_coprocessor_error)
1106{
1107 math_error(regs, X86_TRAP_MF);
1108}
1109
1110DEFINE_IDTENTRY(exc_simd_coprocessor_error)
1111{
1112 if (IS_ENABLED(CONFIG_X86_INVD_BUG)) {
1113
1114 if (!static_cpu_has(X86_FEATURE_XMM)) {
1115 __exc_general_protection(regs, 0);
1116 return;
1117 }
1118 }
1119 math_error(regs, X86_TRAP_XF);
1120}
1121
1122DEFINE_IDTENTRY(exc_spurious_interrupt_bug)
1123{
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143}
1144
1145static bool handle_xfd_event(struct pt_regs *regs)
1146{
1147 u64 xfd_err;
1148 int err;
1149
1150 if (!IS_ENABLED(CONFIG_X86_64) || !cpu_feature_enabled(X86_FEATURE_XFD))
1151 return false;
1152
1153 rdmsrl(MSR_IA32_XFD_ERR, xfd_err);
1154 if (!xfd_err)
1155 return false;
1156
1157 wrmsrl(MSR_IA32_XFD_ERR, 0);
1158
1159
1160 if (WARN_ON(!user_mode(regs)))
1161 return false;
1162
1163 local_irq_enable();
1164
1165 err = xfd_enable_feature(xfd_err);
1166
1167 switch (err) {
1168 case -EPERM:
1169 force_sig_fault(SIGILL, ILL_ILLOPC, error_get_trap_addr(regs));
1170 break;
1171 case -EFAULT:
1172 force_sig(SIGSEGV);
1173 break;
1174 }
1175
1176 local_irq_disable();
1177 return true;
1178}
1179
1180DEFINE_IDTENTRY(exc_device_not_available)
1181{
1182 unsigned long cr0 = read_cr0();
1183
1184 if (handle_xfd_event(regs))
1185 return;
1186
1187#ifdef CONFIG_MATH_EMULATION
1188 if (!boot_cpu_has(X86_FEATURE_FPU) && (cr0 & X86_CR0_EM)) {
1189 struct math_emu_info info = { };
1190
1191 cond_local_irq_enable(regs);
1192
1193 info.regs = regs;
1194 math_emulate(&info);
1195
1196 cond_local_irq_disable(regs);
1197 return;
1198 }
1199#endif
1200
1201
1202 if (WARN(cr0 & X86_CR0_TS, "CR0.TS was set")) {
1203
1204 write_cr0(cr0 & ~X86_CR0_TS);
1205 } else {
1206
1207
1208
1209
1210
1211 die("unexpected #NM exception", regs, 0);
1212 }
1213}
1214
1215#ifdef CONFIG_X86_32
1216DEFINE_IDTENTRY_SW(iret_error)
1217{
1218 local_irq_enable();
1219 if (notify_die(DIE_TRAP, "iret exception", regs, 0,
1220 X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) {
1221 do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, 0,
1222 ILL_BADSTK, (void __user *)NULL);
1223 }
1224 local_irq_disable();
1225}
1226#endif
1227
1228void __init trap_init(void)
1229{
1230
1231 setup_cpu_entry_areas();
1232
1233
1234 sev_es_init_vc_handling();
1235
1236
1237 cpu_init_exception_handling();
1238
1239 idt_setup_traps();
1240 cpu_init();
1241}
1242