1
2
3
4
5
6
7
8
9
10
11
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/context_tracking.h>
16#include <linux/interrupt.h>
17#include <linux/kallsyms.h>
18#include <linux/spinlock.h>
19#include <linux/kprobes.h>
20#include <linux/uaccess.h>
21#include <linux/kdebug.h>
22#include <linux/kgdb.h>
23#include <linux/kernel.h>
24#include <linux/export.h>
25#include <linux/ptrace.h>
26#include <linux/uprobes.h>
27#include <linux/string.h>
28#include <linux/delay.h>
29#include <linux/errno.h>
30#include <linux/kexec.h>
31#include <linux/sched.h>
32#include <linux/sched/task_stack.h>
33#include <linux/timer.h>
34#include <linux/init.h>
35#include <linux/bug.h>
36#include <linux/nmi.h>
37#include <linux/mm.h>
38#include <linux/smp.h>
39#include <linux/io.h>
40
41#if defined(CONFIG_EDAC)
42#include <linux/edac.h>
43#endif
44#include <linux/hardirq.h>
45#include <linux/atomic.h>
46
47#include <asm/stacktrace.h>
48#include <asm/processor.h>
49#include <asm/debugreg.h>
50#include <asm/realmode.h>
51#include <asm/text-patching.h>
52#include <asm/ftrace.h>
53#include <asm/traps.h>
54#include <asm/desc.h>
55#include <asm/fpu/api.h>
56#include <asm/cpu.h>
57#include <asm/cpu_entry_area.h>
58#include <asm/mce.h>
59#include <asm/fixmap.h>
60#include <asm/mach_traps.h>
61#include <asm/alternative.h>
62#include <asm/fpu/xstate.h>
63#include <asm/trace/mpx.h>
64#include <asm/mpx.h>
65#include <asm/vm86.h>
66#include <asm/umip.h>
67#include <asm/vdso.h>
68
69#ifdef CONFIG_X86_64
70#include <asm/x86_init.h>
71#include <asm/pgalloc.h>
72#include <asm/proto.h>
73#else
74#include <asm/processor-flags.h>
75#include <asm/setup.h>
76#include <asm/proto.h>
77#endif
78
79DECLARE_BITMAP(system_vectors, NR_VECTORS);
80
81static inline void cond_local_irq_enable(struct pt_regs *regs)
82{
83 if (regs->flags & X86_EFLAGS_IF)
84 local_irq_enable();
85}
86
87static inline void cond_local_irq_disable(struct pt_regs *regs)
88{
89 if (regs->flags & X86_EFLAGS_IF)
90 local_irq_disable();
91}
92
93int is_valid_bugaddr(unsigned long addr)
94{
95 unsigned short ud;
96
97 if (addr < TASK_SIZE_MAX)
98 return 0;
99
100 if (probe_kernel_address((unsigned short *)addr, ud))
101 return 0;
102
103 return ud == INSN_UD0 || ud == INSN_UD2;
104}
105
106int fixup_bug(struct pt_regs *regs, int trapnr)
107{
108 if (trapnr != X86_TRAP_UD)
109 return 0;
110
111 switch (report_bug(regs->ip, regs)) {
112 case BUG_TRAP_TYPE_NONE:
113 case BUG_TRAP_TYPE_BUG:
114 break;
115
116 case BUG_TRAP_TYPE_WARN:
117 regs->ip += LEN_UD2;
118 return 1;
119 }
120
121 return 0;
122}
123
124static nokprobe_inline int
125do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
126 struct pt_regs *regs, long error_code)
127{
128 if (v8086_mode(regs)) {
129
130
131
132
133 if (trapnr < X86_TRAP_UD) {
134 if (!handle_vm86_trap((struct kernel_vm86_regs *) regs,
135 error_code, trapnr))
136 return 0;
137 }
138 } else if (!user_mode(regs)) {
139 if (fixup_exception(regs, trapnr))
140 return 0;
141
142 tsk->thread.error_code = error_code;
143 tsk->thread.trap_nr = trapnr;
144 die(str, regs, error_code);
145 } else {
146 if (fixup_vdso_exception(regs, trapnr, error_code, 0))
147 return 0;
148 }
149
150
151
152
153
154
155
156
157
158
159 tsk->thread.error_code = error_code;
160 tsk->thread.trap_nr = trapnr;
161
162 return -1;
163}
164
165static void show_signal(struct task_struct *tsk, int signr,
166 const char *type, const char *desc,
167 struct pt_regs *regs, long error_code)
168{
169 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
170 printk_ratelimit()) {
171 pr_info("%s[%d] %s%s ip:%lx sp:%lx error:%lx",
172 tsk->comm, task_pid_nr(tsk), type, desc,
173 regs->ip, regs->sp, error_code);
174 print_vma_addr(KERN_CONT " in ", regs->ip);
175 pr_cont("\n");
176 }
177}
178
179static void
180do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
181 long error_code, int sicode, void __user *addr)
182{
183 struct task_struct *tsk = current;
184
185 if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code))
186 return;
187
188 show_signal(tsk, signr, "trap ", str, regs, error_code);
189
190 if (!sicode)
191 force_sig(signr, tsk);
192 else
193 force_sig_fault(signr, sicode, addr, tsk);
194}
195NOKPROBE_SYMBOL(do_trap);
196
197static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
198 unsigned long trapnr, int signr, int sicode, void __user *addr)
199{
200 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
201
202
203
204
205
206 if (!user_mode(regs) && fixup_bug(regs, trapnr))
207 return;
208
209 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
210 NOTIFY_STOP) {
211 cond_local_irq_enable(regs);
212 do_trap(trapnr, signr, str, regs, error_code, sicode, addr);
213 }
214}
215
216#define IP ((void __user *)uprobe_get_trap_addr(regs))
217#define DO_ERROR(trapnr, signr, sicode, addr, str, name) \
218dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \
219{ \
220 do_error_trap(regs, error_code, str, trapnr, signr, sicode, addr); \
221}
222
223DO_ERROR(X86_TRAP_DE, SIGFPE, FPE_INTDIV, IP, "divide error", divide_error)
224DO_ERROR(X86_TRAP_OF, SIGSEGV, 0, NULL, "overflow", overflow)
225DO_ERROR(X86_TRAP_UD, SIGILL, ILL_ILLOPN, IP, "invalid opcode", invalid_op)
226DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, 0, NULL, "coprocessor segment overrun", coprocessor_segment_overrun)
227DO_ERROR(X86_TRAP_TS, SIGSEGV, 0, NULL, "invalid TSS", invalid_TSS)
228DO_ERROR(X86_TRAP_NP, SIGBUS, 0, NULL, "segment not present", segment_not_present)
229DO_ERROR(X86_TRAP_SS, SIGBUS, 0, NULL, "stack segment", stack_segment)
230#undef IP
231
232dotraplinkage void do_alignment_check(struct pt_regs *regs, long error_code)
233{
234 char *str = "alignment check";
235
236 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
237
238 if (notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_AC, SIGBUS) == NOTIFY_STOP)
239 return;
240
241 local_irq_enable();
242
243 if (!user_mode(regs)) {
244 handle_kernel_split_lock(regs, error_code);
245 return;
246 }
247
248 if (handle_user_split_lock(regs, error_code))
249 return;
250
251 do_trap(X86_TRAP_AC, SIGBUS, "alignment check", regs,
252 error_code, BUS_ADRALN, NULL);
253}
254
255#ifdef CONFIG_VMAP_STACK
256__visible void __noreturn handle_stack_overflow(const char *message,
257 struct pt_regs *regs,
258 unsigned long fault_address)
259{
260 printk(KERN_EMERG "BUG: stack guard page was hit at %p (stack is %p..%p)\n",
261 (void *)fault_address, current->stack,
262 (char *)current->stack + THREAD_SIZE - 1);
263 die(message, regs, 0);
264
265
266 panic(message);
267}
268#endif
269
270#ifdef CONFIG_X86_64
271
272dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
273{
274 static const char str[] = "double fault";
275 struct task_struct *tsk = current;
276#ifdef CONFIG_VMAP_STACK
277 unsigned long cr2;
278#endif
279
280#ifdef CONFIG_X86_ESPFIX64
281 extern unsigned char native_irq_return_iret[];
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297 if (((long)regs->sp >> P4D_SHIFT) == ESPFIX_PGD_ENTRY &&
298 regs->cs == __KERNEL_CS &&
299 regs->ip == (unsigned long)native_irq_return_iret)
300 {
301 struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
302
303
304
305
306
307
308
309 memmove(&gpregs->ip, (void *)regs->sp, 5*8);
310 gpregs->orig_ax = 0;
311
312
313
314
315
316
317
318
319
320
321
322
323 regs->ip = (unsigned long)general_protection;
324 regs->sp = (unsigned long)&gpregs->orig_ax;
325
326 return;
327 }
328#endif
329
330 nmi_enter();
331 notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
332
333 tsk->thread.error_code = error_code;
334 tsk->thread.trap_nr = X86_TRAP_DF;
335
336#ifdef CONFIG_VMAP_STACK
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374 cr2 = read_cr2();
375 if ((unsigned long)task_stack_page(tsk) - 1 - cr2 < PAGE_SIZE)
376 handle_stack_overflow("kernel stack overflow (double-fault)", regs, cr2);
377#endif
378
379#ifdef CONFIG_DOUBLEFAULT
380 df_debug(regs, error_code);
381#endif
382
383
384
385
386 for (;;)
387 die(str, regs, error_code);
388}
389#endif
390
391dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
392{
393 const struct mpx_bndcsr *bndcsr;
394
395 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
396 if (notify_die(DIE_TRAP, "bounds", regs, error_code,
397 X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP)
398 return;
399 cond_local_irq_enable(regs);
400
401 if (!user_mode(regs))
402 die("bounds", regs, error_code);
403
404 if (!cpu_feature_enabled(X86_FEATURE_MPX)) {
405
406 goto exit_trap;
407 }
408
409
410
411
412
413
414
415 bndcsr = get_xsave_field_ptr(XFEATURE_BNDCSR);
416 if (!bndcsr)
417 goto exit_trap;
418
419 trace_bounds_exception_mpx(bndcsr);
420
421
422
423
424
425 switch (bndcsr->bndstatus & MPX_BNDSTA_ERROR_CODE) {
426 case 2:
427 if (mpx_handle_bd_fault())
428 goto exit_trap;
429 break;
430 case 1:
431 {
432 struct task_struct *tsk = current;
433 struct mpx_fault_info mpx;
434
435 if (mpx_fault_info(&mpx, regs)) {
436
437
438
439
440 goto exit_trap;
441 }
442
443
444
445
446
447
448
449 if (!do_trap_no_signal(tsk, X86_TRAP_BR, "bounds", regs,
450 error_code))
451 break;
452
453 show_signal(tsk, SIGSEGV, "trap ", "bounds", regs, error_code);
454
455 force_sig_bnderr(mpx.addr, mpx.lower, mpx.upper);
456 break;
457 }
458 case 0:
459 goto exit_trap;
460 default:
461 die("bounds", regs, error_code);
462 }
463
464 return;
465
466exit_trap:
467
468
469
470
471
472
473
474 do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, error_code, 0, NULL);
475}
476
477dotraplinkage void
478do_general_protection(struct pt_regs *regs, long error_code)
479{
480 const char *desc = "general protection fault";
481 struct task_struct *tsk;
482
483 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
484 cond_local_irq_enable(regs);
485
486 if (static_cpu_has(X86_FEATURE_UMIP)) {
487 if (user_mode(regs) && fixup_umip_exception(regs))
488 return;
489 }
490
491 if (v8086_mode(regs)) {
492 local_irq_enable();
493 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
494 return;
495 }
496
497 tsk = current;
498 if (!user_mode(regs)) {
499 if (fixup_exception(regs, X86_TRAP_GP))
500 return;
501
502 tsk->thread.error_code = error_code;
503 tsk->thread.trap_nr = X86_TRAP_GP;
504 if (notify_die(DIE_GPF, desc, regs, error_code,
505 X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
506 die(desc, regs, error_code);
507 return;
508 }
509
510 tsk->thread.error_code = error_code;
511 tsk->thread.trap_nr = X86_TRAP_GP;
512
513 if (fixup_vdso_exception(regs, X86_TRAP_GP, error_code, 0))
514 return;
515
516 show_signal(tsk, SIGSEGV, "", desc, regs, error_code);
517
518 force_sig(SIGSEGV, tsk);
519}
520NOKPROBE_SYMBOL(do_general_protection);
521
522dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
523{
524#ifdef CONFIG_DYNAMIC_FTRACE
525
526
527
528
529 if (unlikely(atomic_read(&modifying_ftrace_code)) &&
530 ftrace_int3_handler(regs))
531 return;
532#endif
533 if (poke_int3_handler(regs))
534 return;
535
536
537
538
539
540
541
542 if (!user_mode(regs))
543 nmi_enter();
544
545#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
546 if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
547 SIGTRAP) == NOTIFY_STOP)
548 goto exit;
549#endif
550
551#ifdef CONFIG_KPROBES
552 if (kprobe_int3_handler(regs))
553 goto exit;
554#endif
555
556 if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
557 SIGTRAP) == NOTIFY_STOP)
558 goto exit;
559
560 cond_local_irq_enable(regs);
561 do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, 0, NULL);
562 cond_local_irq_disable(regs);
563
564exit:
565 if (!user_mode(regs))
566 nmi_exit();
567}
568NOKPROBE_SYMBOL(do_int3);
569
570#ifdef CONFIG_X86_64
571
572
573
574
575
576asmlinkage __visible notrace struct pt_regs *sync_regs(struct pt_regs *eregs)
577{
578 struct pt_regs *regs = (struct pt_regs *)this_cpu_read(cpu_current_top_of_stack) - 1;
579 if (regs != eregs)
580 *regs = *eregs;
581 return regs;
582}
583NOKPROBE_SYMBOL(sync_regs);
584
585#ifdef CONFIG_AMD_MEM_ENCRYPT
586asmlinkage __visible noinstr struct pt_regs *vc_switch_off_ist(struct pt_regs *regs)
587{
588 unsigned long sp, *stack;
589 struct stack_info info;
590 struct pt_regs *regs_ret;
591
592
593
594
595
596 if (regs->ip >= (unsigned long)entry_SYSCALL_64 &&
597 regs->ip < (unsigned long)entry_SYSCALL_64_safe_stack) {
598 sp = this_cpu_read(cpu_current_top_of_stack);
599 goto sync;
600 }
601
602
603
604
605
606
607 sp = regs->sp;
608 stack = (unsigned long *)sp;
609
610 if (!get_stack_info_noinstr(stack, current, &info) || info.type == STACK_TYPE_ENTRY ||
611 info.type >= STACK_TYPE_EXCEPTION_LAST)
612 sp = __this_cpu_ist_top_va(VC2);
613
614sync:
615
616
617
618
619
620 sp = ALIGN_DOWN(sp, 8) - sizeof(*regs_ret);
621
622 regs_ret = (struct pt_regs *)sp;
623 *regs_ret = *regs;
624
625 return regs_ret;
626}
627#endif
628
629struct bad_iret_stack {
630 void *error_entry_ret;
631 struct pt_regs regs;
632};
633
634asmlinkage __visible notrace
635struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
636{
637
638
639
640
641
642
643
644
645 struct bad_iret_stack *new_stack =
646 (struct bad_iret_stack *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
647
648
649 memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
650
651
652 memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
653
654 BUG_ON(!user_mode(&new_stack->regs));
655 return new_stack;
656}
657NOKPROBE_SYMBOL(fixup_bad_iret);
658#endif
659
660static bool is_sysenter_singlestep(struct pt_regs *regs)
661{
662
663
664
665
666
667
668
669
670#ifdef CONFIG_X86_32
671 return (regs->ip - (unsigned long)__begin_SYSENTER_singlestep_region) <
672 (unsigned long)__end_SYSENTER_singlestep_region -
673 (unsigned long)__begin_SYSENTER_singlestep_region;
674#elif defined(CONFIG_IA32_EMULATION)
675 return (regs->ip - (unsigned long)entry_SYSENTER_compat) <
676 (unsigned long)__end_entry_SYSENTER_compat -
677 (unsigned long)entry_SYSENTER_compat;
678#else
679 return false;
680#endif
681}
682
683static __always_inline unsigned long debug_read_clear_dr6(void)
684{
685 unsigned long dr6;
686
687
688
689
690
691
692
693
694
695
696
697
698 get_debugreg(dr6, 6);
699 set_debugreg(DR6_RESERVED, 6);
700 dr6 ^= DR6_RESERVED;
701
702 return dr6;
703}
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
730{
731 struct task_struct *tsk = current;
732 unsigned long dr6 = debug_read_clear_dr6(), dr7 = 0;
733 int user_icebp = 0;
734 int si_code;
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755 if (!user_mode(regs))
756 dr7 = local_db_save();
757
758 nmi_enter();
759
760
761
762
763
764
765 clear_tsk_thread_flag(tsk, TIF_BLOCKSTEP);
766
767 if (unlikely(!user_mode(regs) && (dr6 & DR_STEP) &&
768 is_sysenter_singlestep(regs))) {
769 dr6 &= ~DR_STEP;
770 if (!dr6)
771 goto exit;
772
773
774
775
776
777 }
778
779
780
781
782
783
784 if (!dr6 && user_mode(regs))
785 user_icebp = 1;
786
787
788 tsk->thread.debugreg6 = dr6;
789
790#ifdef CONFIG_KPROBES
791 if (kprobe_debug_handler(regs))
792 goto exit;
793#endif
794
795 if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, error_code,
796 SIGTRAP) == NOTIFY_STOP)
797 goto exit;
798
799
800
801
802
803 debug_stack_usage_inc();
804
805
806 cond_local_irq_enable(regs);
807
808 if (v8086_mode(regs)) {
809 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
810 X86_TRAP_DB);
811 cond_local_irq_disable(regs);
812 debug_stack_usage_dec();
813 goto exit;
814 }
815
816
817 if (dr6 & DR_BUS_LOCK)
818 handle_bus_lock(regs);
819
820 if (WARN_ON_ONCE((dr6 & DR_STEP) && !user_mode(regs))) {
821
822
823
824
825
826
827 tsk->thread.debugreg6 &= ~DR_STEP;
828 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
829 regs->flags &= ~X86_EFLAGS_TF;
830 }
831 si_code = get_si_code(tsk->thread.debugreg6);
832 if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
833 send_sigtrap(tsk, regs, error_code, si_code);
834 cond_local_irq_disable(regs);
835 debug_stack_usage_dec();
836
837exit:
838 nmi_exit();
839
840 if (!user_mode(regs))
841 local_db_restore(dr7);
842}
843NOKPROBE_SYMBOL(do_debug);
844
845
846
847
848
849
850static void math_error(struct pt_regs *regs, int error_code, int trapnr)
851{
852 struct task_struct *task = current;
853 struct fpu *fpu = &task->thread.fpu;
854 int si_code;
855 char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
856 "simd exception";
857
858 cond_local_irq_enable(regs);
859
860 if (!user_mode(regs)) {
861 if (fixup_exception(regs, trapnr))
862 return;
863
864 task->thread.error_code = error_code;
865 task->thread.trap_nr = trapnr;
866
867 if (notify_die(DIE_TRAP, str, regs, error_code,
868 trapnr, SIGFPE) != NOTIFY_STOP)
869 die(str, regs, error_code);
870 return;
871 }
872
873
874
875
876
877 fpu_sync_fpstate(fpu);
878
879 task->thread.trap_nr = trapnr;
880 task->thread.error_code = error_code;
881
882 si_code = fpu__exception_code(fpu, trapnr);
883
884 if (!si_code)
885 return;
886
887 if (fixup_vdso_exception(regs, trapnr, 0, 0))
888 return;
889
890 force_sig_fault(SIGFPE, si_code,
891 (void __user *)uprobe_get_trap_addr(regs), task);
892}
893
894dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code)
895{
896 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
897 math_error(regs, error_code, X86_TRAP_MF);
898}
899
900dotraplinkage void
901do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
902{
903 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
904 math_error(regs, error_code, X86_TRAP_XF);
905}
906
907dotraplinkage void
908do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
909{
910 cond_local_irq_enable(regs);
911}
912
913static bool handle_xfd_event(struct pt_regs *regs)
914{
915 struct task_struct *task = current;
916 u64 xfd_err;
917 int err;
918
919 if (!IS_ENABLED(CONFIG_X86_64) || !cpu_feature_enabled(X86_FEATURE_XFD))
920 return false;
921
922 rdmsrl(MSR_IA32_XFD_ERR, xfd_err);
923 if (!xfd_err)
924 return false;
925
926 wrmsrl(MSR_IA32_XFD_ERR, 0);
927
928
929 if (WARN_ON(!user_mode(regs)))
930 return false;
931
932 local_irq_enable();
933
934 err = xfd_enable_feature(xfd_err);
935
936 switch (err) {
937 case -EPERM:
938 force_sig_fault(SIGILL, ILL_ILLOPC,
939 (void __user *)uprobe_get_trap_addr(regs), task);
940 break;
941 case -EFAULT:
942 force_sig(SIGSEGV, task);
943 break;
944 }
945
946 local_irq_disable();
947 return true;
948}
949
950dotraplinkage void
951do_device_not_available(struct pt_regs *regs, long error_code)
952{
953 unsigned long cr0;
954
955 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
956
957 if (handle_xfd_event(regs))
958 return;
959
960#ifdef CONFIG_MATH_EMULATION
961 if (!boot_cpu_has(X86_FEATURE_FPU) && (read_cr0() & X86_CR0_EM)) {
962 struct math_emu_info info = { };
963
964 cond_local_irq_enable(regs);
965
966 info.regs = regs;
967 math_emulate(&info);
968 return;
969 }
970#endif
971
972
973 cr0 = read_cr0();
974 if (WARN(cr0 & X86_CR0_TS, "CR0.TS was set")) {
975
976 write_cr0(cr0 & ~X86_CR0_TS);
977 } else {
978
979
980
981
982
983 die("unexpected #NM exception", regs, error_code);
984 }
985}
986NOKPROBE_SYMBOL(do_device_not_available);
987
988#ifdef CONFIG_X86_32
989dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
990{
991 RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
992 local_irq_enable();
993
994 if (notify_die(DIE_TRAP, "iret exception", regs, error_code,
995 X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) {
996 do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code,
997 ILL_BADSTK, (void __user *)NULL);
998 }
999}
1000#endif
1001
1002void __init trap_init(void)
1003{
1004
1005 setup_cpu_entry_areas();
1006
1007
1008 sev_es_init_vc_handling();
1009
1010 idt_setup_traps();
1011
1012
1013
1014
1015
1016
1017 cea_set_pte(CPU_ENTRY_AREA_RO_IDT_VADDR, __pa_symbol(idt_table),
1018 PAGE_KERNEL_RO);
1019 idt_descr.address = CPU_ENTRY_AREA_RO_IDT;
1020
1021
1022
1023
1024 cpu_init();
1025
1026 idt_setup_ist_traps();
1027
1028 idt_setup_debugidt_traps();
1029}
1030