1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44#include <linux/linkage.h>
45#include <linux/err.h>
46#include <asm/thread_info.h>
47#include <asm/irqflags.h>
48#include <asm/errno.h>
49#include <asm/segment.h>
50#include <asm/smp.h>
51#include <asm/page_types.h>
52#include <asm/percpu.h>
53#include <asm/dwarf2.h>
54#include <asm/processor-flags.h>
55#include <asm/ftrace.h>
56#include <asm/irq_vectors.h>
57#include <asm/cpufeature.h>
58#include <asm/alternative-asm.h>
59#include <asm/asm.h>
60#include <asm/smap.h>
61
62
63#include <linux/elf-em.h>
64#define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
65#define __AUDIT_ARCH_LE 0x40000000
66
67#ifndef CONFIG_AUDITSYSCALL
68#define sysenter_audit syscall_trace_entry
69#define sysexit_audit syscall_exit_work
70#endif
71
72 .section .entry.text, "ax"
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87#ifdef CONFIG_PREEMPT
88#define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
89#else
90#define preempt_stop(clobbers)
91#define resume_kernel restore_all
92#endif
93
94.macro TRACE_IRQS_IRET
95#ifdef CONFIG_TRACE_IRQFLAGS
96 testl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
97 jz 1f
98 TRACE_IRQS_ON
991:
100#endif
101.endm
102
103
104
105
106
107
108
109
110
111
112#ifdef CONFIG_X86_32_LAZY_GS
113
114
115.macro PUSH_GS
116 pushl_cfi $0
117.endm
118.macro POP_GS pop=0
119 addl $(4 + \pop), %esp
120 CFI_ADJUST_CFA_OFFSET -(4 + \pop)
121.endm
122.macro POP_GS_EX
123.endm
124
125
126.macro PTGS_TO_GS
127.endm
128.macro PTGS_TO_GS_EX
129.endm
130.macro GS_TO_REG reg
131.endm
132.macro REG_TO_PTGS reg
133.endm
134.macro SET_KERNEL_GS reg
135.endm
136
137#else
138
139.macro PUSH_GS
140 pushl_cfi %gs
141
142.endm
143
144.macro POP_GS pop=0
14598: popl_cfi %gs
146
147 .if \pop <> 0
148 add $\pop, %esp
149 CFI_ADJUST_CFA_OFFSET -\pop
150 .endif
151.endm
152.macro POP_GS_EX
153.pushsection .fixup, "ax"
15499: movl $0, (%esp)
155 jmp 98b
156.popsection
157 _ASM_EXTABLE(98b,99b)
158.endm
159
160.macro PTGS_TO_GS
16198: mov PT_GS(%esp), %gs
162.endm
163.macro PTGS_TO_GS_EX
164.pushsection .fixup, "ax"
16599: movl $0, PT_GS(%esp)
166 jmp 98b
167.popsection
168 _ASM_EXTABLE(98b,99b)
169.endm
170
171.macro GS_TO_REG reg
172 movl %gs, \reg
173
174.endm
175.macro REG_TO_PTGS reg
176 movl \reg, PT_GS(%esp)
177
178.endm
179.macro SET_KERNEL_GS reg
180 movl $(__KERNEL_STACK_CANARY), \reg
181 movl \reg, %gs
182.endm
183
184#endif
185
186.macro SAVE_ALL
187 cld
188 PUSH_GS
189 pushl_cfi %fs
190
191 pushl_cfi %es
192
193 pushl_cfi %ds
194
195 pushl_cfi %eax
196 CFI_REL_OFFSET eax, 0
197 pushl_cfi %ebp
198 CFI_REL_OFFSET ebp, 0
199 pushl_cfi %edi
200 CFI_REL_OFFSET edi, 0
201 pushl_cfi %esi
202 CFI_REL_OFFSET esi, 0
203 pushl_cfi %edx
204 CFI_REL_OFFSET edx, 0
205 pushl_cfi %ecx
206 CFI_REL_OFFSET ecx, 0
207 pushl_cfi %ebx
208 CFI_REL_OFFSET ebx, 0
209 movl $(__USER_DS), %edx
210 movl %edx, %ds
211 movl %edx, %es
212 movl $(__KERNEL_PERCPU), %edx
213 movl %edx, %fs
214 SET_KERNEL_GS %edx
215.endm
216
217.macro RESTORE_INT_REGS
218 popl_cfi %ebx
219 CFI_RESTORE ebx
220 popl_cfi %ecx
221 CFI_RESTORE ecx
222 popl_cfi %edx
223 CFI_RESTORE edx
224 popl_cfi %esi
225 CFI_RESTORE esi
226 popl_cfi %edi
227 CFI_RESTORE edi
228 popl_cfi %ebp
229 CFI_RESTORE ebp
230 popl_cfi %eax
231 CFI_RESTORE eax
232.endm
233
234.macro RESTORE_REGS pop=0
235 RESTORE_INT_REGS
2361: popl_cfi %ds
237
2382: popl_cfi %es
239
2403: popl_cfi %fs
241
242 POP_GS \pop
243.pushsection .fixup, "ax"
2444: movl $0, (%esp)
245 jmp 1b
2465: movl $0, (%esp)
247 jmp 2b
2486: movl $0, (%esp)
249 jmp 3b
250.popsection
251 _ASM_EXTABLE(1b,4b)
252 _ASM_EXTABLE(2b,5b)
253 _ASM_EXTABLE(3b,6b)
254 POP_GS_EX
255.endm
256
257.macro RING0_INT_FRAME
258 CFI_STARTPROC simple
259 CFI_SIGNAL_FRAME
260 CFI_DEF_CFA esp, 3*4
261
262 CFI_OFFSET eip, -3*4
263.endm
264
265.macro RING0_EC_FRAME
266 CFI_STARTPROC simple
267 CFI_SIGNAL_FRAME
268 CFI_DEF_CFA esp, 4*4
269
270 CFI_OFFSET eip, -3*4
271.endm
272
273.macro RING0_PTREGS_FRAME
274 CFI_STARTPROC simple
275 CFI_SIGNAL_FRAME
276 CFI_DEF_CFA esp, PT_OLDESP-PT_EBX
277
278 CFI_OFFSET eip, PT_EIP-PT_OLDESP
279
280
281 CFI_OFFSET eax, PT_EAX-PT_OLDESP
282 CFI_OFFSET ebp, PT_EBP-PT_OLDESP
283 CFI_OFFSET edi, PT_EDI-PT_OLDESP
284 CFI_OFFSET esi, PT_ESI-PT_OLDESP
285 CFI_OFFSET edx, PT_EDX-PT_OLDESP
286 CFI_OFFSET ecx, PT_ECX-PT_OLDESP
287 CFI_OFFSET ebx, PT_EBX-PT_OLDESP
288.endm
289
290ENTRY(ret_from_fork)
291 CFI_STARTPROC
292 pushl_cfi %eax
293 call schedule_tail
294 GET_THREAD_INFO(%ebp)
295 popl_cfi %eax
296 pushl_cfi $0x0202
297 popfl_cfi
298 jmp syscall_exit
299 CFI_ENDPROC
300END(ret_from_fork)
301
302ENTRY(ret_from_kernel_thread)
303 CFI_STARTPROC
304 pushl_cfi %eax
305 call schedule_tail
306 GET_THREAD_INFO(%ebp)
307 popl_cfi %eax
308 pushl_cfi $0x0202
309 popfl_cfi
310 movl PT_EBP(%esp),%eax
311 call *PT_EBX(%esp)
312 movl $0,PT_EAX(%esp)
313 jmp syscall_exit
314 CFI_ENDPROC
315ENDPROC(ret_from_kernel_thread)
316
317
318
319
320 .pushsection .kprobes.text, "ax"
321
322
323
324
325
326
327
328
329 ALIGN
330 RING0_PTREGS_FRAME
331ret_from_exception:
332 preempt_stop(CLBR_ANY)
333ret_from_intr:
334 GET_THREAD_INFO(%ebp)
335#ifdef CONFIG_VM86
336 movl PT_EFLAGS(%esp), %eax
337 movb PT_CS(%esp), %al
338 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
339#else
340
341
342
343 movl PT_CS(%esp), %eax
344 andl $SEGMENT_RPL_MASK, %eax
345#endif
346 cmpl $USER_RPL, %eax
347 jb resume_kernel
348
349ENTRY(resume_userspace)
350 LOCKDEP_SYS_EXIT
351 DISABLE_INTERRUPTS(CLBR_ANY)
352
353
354 TRACE_IRQS_OFF
355 movl TI_flags(%ebp), %ecx
356 andl $_TIF_WORK_MASK, %ecx
357
358 jne work_pending
359 jmp restore_all
360END(ret_from_exception)
361
362#ifdef CONFIG_PREEMPT
363ENTRY(resume_kernel)
364 DISABLE_INTERRUPTS(CLBR_ANY)
365 cmpl $0,TI_preempt_count(%ebp)
366 jnz restore_all
367need_resched:
368 movl TI_flags(%ebp), %ecx
369 testb $_TIF_NEED_RESCHED, %cl
370 jz restore_all
371 testl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
372 jz restore_all
373 call preempt_schedule_irq
374 jmp need_resched
375END(resume_kernel)
376#endif
377 CFI_ENDPROC
378
379
380
381 .popsection
382
383
384
385
386
387ENTRY(ia32_sysenter_target)
388 CFI_STARTPROC simple
389 CFI_SIGNAL_FRAME
390 CFI_DEF_CFA esp, 0
391 CFI_REGISTER esp, ebp
392 movl TSS_sysenter_sp0(%esp),%esp
393sysenter_past_esp:
394
395
396
397
398
399 pushl_cfi $__USER_DS
400
401 pushl_cfi %ebp
402 CFI_REL_OFFSET esp, 0
403 pushfl_cfi
404 orl $X86_EFLAGS_IF, (%esp)
405 pushl_cfi $__USER_CS
406
407
408
409
410
411
412 pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
413 CFI_REL_OFFSET eip, 0
414
415 pushl_cfi %eax
416 SAVE_ALL
417 ENABLE_INTERRUPTS(CLBR_NONE)
418
419
420
421
422
423 cmpl $__PAGE_OFFSET-3,%ebp
424 jae syscall_fault
425 ASM_STAC
4261: movl (%ebp),%ebp
427 ASM_CLAC
428 movl %ebp,PT_EBP(%esp)
429 _ASM_EXTABLE(1b,syscall_fault)
430
431 GET_THREAD_INFO(%ebp)
432
433 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
434 jnz sysenter_audit
435sysenter_do_call:
436 cmpl $(NR_syscalls), %eax
437 jae syscall_badsys
438 call *sys_call_table(,%eax,4)
439 movl %eax,PT_EAX(%esp)
440 LOCKDEP_SYS_EXIT
441 DISABLE_INTERRUPTS(CLBR_ANY)
442 TRACE_IRQS_OFF
443 movl TI_flags(%ebp), %ecx
444 testl $_TIF_ALLWORK_MASK, %ecx
445 jne sysexit_audit
446sysenter_exit:
447
448 movl PT_EIP(%esp), %edx
449 movl PT_OLDESP(%esp), %ecx
450 xorl %ebp,%ebp
451 TRACE_IRQS_ON
4521: mov PT_FS(%esp), %fs
453 PTGS_TO_GS
454 ENABLE_INTERRUPTS_SYSEXIT
455
456#ifdef CONFIG_AUDITSYSCALL
457sysenter_audit:
458 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
459 jnz syscall_trace_entry
460 addl $4,%esp
461 CFI_ADJUST_CFA_OFFSET -4
462 movl %esi,4(%esp)
463 movl %edx,(%esp)
464
465 movl %ebx,%edx
466
467 call __audit_syscall_entry
468 pushl_cfi %ebx
469 movl PT_EAX(%esp),%eax
470 jmp sysenter_do_call
471
472sysexit_audit:
473 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
474 jne syscall_exit_work
475 TRACE_IRQS_ON
476 ENABLE_INTERRUPTS(CLBR_ANY)
477 movl %eax,%edx
478 cmpl $-MAX_ERRNO,%eax
479 setbe %al
480 movzbl %al,%eax
481 call __audit_syscall_exit
482 DISABLE_INTERRUPTS(CLBR_ANY)
483 TRACE_IRQS_OFF
484 movl TI_flags(%ebp), %ecx
485 testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
486 jne syscall_exit_work
487 movl PT_EAX(%esp),%eax
488 jmp sysenter_exit
489#endif
490
491 CFI_ENDPROC
492.pushsection .fixup,"ax"
4932: movl $0,PT_FS(%esp)
494 jmp 1b
495.popsection
496 _ASM_EXTABLE(1b,2b)
497 PTGS_TO_GS_EX
498ENDPROC(ia32_sysenter_target)
499
500
501
502
503 .pushsection .kprobes.text, "ax"
504
505ENTRY(system_call)
506 RING0_INT_FRAME
507 ASM_CLAC
508 pushl_cfi %eax
509 SAVE_ALL
510 GET_THREAD_INFO(%ebp)
511
512 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
513 jnz syscall_trace_entry
514 cmpl $(NR_syscalls), %eax
515 jae syscall_badsys
516syscall_call:
517 call *sys_call_table(,%eax,4)
518 movl %eax,PT_EAX(%esp)
519syscall_exit:
520 LOCKDEP_SYS_EXIT
521 DISABLE_INTERRUPTS(CLBR_ANY)
522
523
524 TRACE_IRQS_OFF
525 movl TI_flags(%ebp), %ecx
526 testl $_TIF_ALLWORK_MASK, %ecx
527 jne syscall_exit_work
528
529restore_all:
530 TRACE_IRQS_IRET
531restore_all_notrace:
532#ifdef CONFIG_X86_ESPFIX32
533 movl PT_EFLAGS(%esp), %eax
534
535
536
537 movb PT_OLDSS(%esp), %ah
538 movb PT_CS(%esp), %al
539 andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
540 cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
541 CFI_REMEMBER_STATE
542 je ldt_ss
543#endif
544restore_nocheck:
545 RESTORE_REGS 4
546irq_return:
547
548
549
550
551
552 INTERRUPT_RETURN
553.section .fixup,"ax"
554ENTRY(iret_exc)
555 pushl $0
556 pushl $do_iret_error
557 jmp error_code
558.previous
559 _ASM_EXTABLE(irq_return,iret_exc)
560
561#ifdef CONFIG_X86_ESPFIX32
562 CFI_RESTORE_STATE
563ldt_ss:
564 larl PT_OLDSS(%esp), %eax
565 jnz restore_nocheck
566 testl $0x00400000, %eax
567 jnz restore_nocheck
568
569#ifdef CONFIG_PARAVIRT
570
571
572
573
574
575
576
577
578 cmpl $0, pv_info+PARAVIRT_enabled
579 jne restore_nocheck
580#endif
581
582
583
584
585
586
587
588
589
590
591
592
593#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
594 mov %esp, %edx
595 mov PT_OLDESP(%esp), %eax
596 mov %dx, %ax
597 sub %eax, %edx
598 shr $16, %edx
599 mov %dl, GDT_ESPFIX_SS + 4
600 mov %dh, GDT_ESPFIX_SS + 7
601 pushl_cfi $__ESPFIX_SS
602 pushl_cfi %eax
603
604
605
606 DISABLE_INTERRUPTS(CLBR_EAX)
607 lss (%esp), %esp
608 CFI_ADJUST_CFA_OFFSET -8
609 jmp restore_nocheck
610#endif
611 CFI_ENDPROC
612ENDPROC(system_call)
613
614
615 ALIGN
616 RING0_PTREGS_FRAME
617work_pending:
618 testb $_TIF_NEED_RESCHED, %cl
619 jz work_notifysig
620work_resched:
621 call schedule
622 LOCKDEP_SYS_EXIT
623 DISABLE_INTERRUPTS(CLBR_ANY)
624
625
626 TRACE_IRQS_OFF
627 movl TI_flags(%ebp), %ecx
628 andl $_TIF_WORK_MASK, %ecx
629
630 jz restore_all
631 testb $_TIF_NEED_RESCHED, %cl
632 jnz work_resched
633
634work_notifysig:
635
636#ifdef CONFIG_VM86
637 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
638 movl %esp, %eax
639 jne work_notifysig_v86
640
6411:
642#else
643 movl %esp, %eax
644#endif
645 TRACE_IRQS_ON
646 ENABLE_INTERRUPTS(CLBR_NONE)
647 movb PT_CS(%esp), %bl
648 andb $SEGMENT_RPL_MASK, %bl
649 cmpb $USER_RPL, %bl
650 jb resume_kernel
651 xorl %edx, %edx
652 call do_notify_resume
653 jmp resume_userspace
654
655#ifdef CONFIG_VM86
656 ALIGN
657work_notifysig_v86:
658 pushl_cfi %ecx
659 call save_v86_state
660 popl_cfi %ecx
661 movl %eax, %esp
662 jmp 1b
663#endif
664END(work_pending)
665
666
667 ALIGN
668syscall_trace_entry:
669 movl $-ENOSYS,PT_EAX(%esp)
670 movl %esp, %eax
671 call syscall_trace_enter
672
673 cmpl $(NR_syscalls), %eax
674 jnae syscall_call
675 jmp syscall_exit
676END(syscall_trace_entry)
677
678
679 ALIGN
680syscall_exit_work:
681 testl $_TIF_WORK_SYSCALL_EXIT, %ecx
682 jz work_pending
683 TRACE_IRQS_ON
684 ENABLE_INTERRUPTS(CLBR_ANY)
685
686 movl %esp, %eax
687 call syscall_trace_leave
688 jmp resume_userspace
689END(syscall_exit_work)
690 CFI_ENDPROC
691
692 RING0_INT_FRAME
693syscall_fault:
694 ASM_CLAC
695 GET_THREAD_INFO(%ebp)
696 movl $-EFAULT,PT_EAX(%esp)
697 jmp resume_userspace
698END(syscall_fault)
699
700syscall_badsys:
701 movl $-ENOSYS,PT_EAX(%esp)
702 jmp resume_userspace
703END(syscall_badsys)
704 CFI_ENDPROC
705
706
707
708 .popsection
709
710.macro FIXUP_ESPFIX_STACK
711
712
713
714
715
716
717
718#ifdef CONFIG_X86_ESPFIX32
719
720 mov GDT_ESPFIX_SS + 4, %al
721 mov GDT_ESPFIX_SS + 7, %ah
722 shl $16, %eax
723 addl %esp, %eax
724 pushl_cfi $__KERNEL_DS
725 pushl_cfi %eax
726 lss (%esp), %esp
727 CFI_ADJUST_CFA_OFFSET -8
728#endif
729.endm
730.macro UNWIND_ESPFIX_STACK
731#ifdef CONFIG_X86_ESPFIX32
732 movl %ss, %eax
733
734 cmpw $__ESPFIX_SS, %ax
735 jne 27f
736 movl $__KERNEL_DS, %eax
737 movl %eax, %ds
738 movl %eax, %es
739
740 FIXUP_ESPFIX_STACK
74127:
742#endif
743.endm
744
745
746
747
748
749
750.section .init.rodata,"a"
751ENTRY(interrupt)
752.section .entry.text, "ax"
753 .p2align 5
754 .p2align CONFIG_X86_L1_CACHE_SHIFT
755ENTRY(irq_entries_start)
756 RING0_INT_FRAME
757vector=FIRST_EXTERNAL_VECTOR
758.rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
759 .balign 32
760 .rept 7
761 .if vector < NR_VECTORS
762 .if vector <> FIRST_EXTERNAL_VECTOR
763 CFI_ADJUST_CFA_OFFSET -4
764 .endif
7651: pushl_cfi $(~vector+0x80)
766 .if ((vector-FIRST_EXTERNAL_VECTOR)%7) <> 6
767 jmp 2f
768 .endif
769 .previous
770 .long 1b
771 .section .entry.text, "ax"
772vector=vector+1
773 .endif
774 .endr
7752: jmp common_interrupt
776.endr
777END(irq_entries_start)
778
779.previous
780END(interrupt)
781.previous
782
783
784
785
786
787 .p2align CONFIG_X86_L1_CACHE_SHIFT
788common_interrupt:
789 ASM_CLAC
790 addl $-0x80,(%esp)
791 SAVE_ALL
792 TRACE_IRQS_OFF
793 movl %esp,%eax
794 call do_IRQ
795 jmp ret_from_intr
796ENDPROC(common_interrupt)
797 CFI_ENDPROC
798
799
800
801
802 .pushsection .kprobes.text, "ax"
803#define BUILD_INTERRUPT3(name, nr, fn) \
804ENTRY(name) \
805 RING0_INT_FRAME; \
806 ASM_CLAC; \
807 pushl_cfi $~(nr); \
808 SAVE_ALL; \
809 TRACE_IRQS_OFF \
810 movl %esp,%eax; \
811 call fn; \
812 jmp ret_from_intr; \
813 CFI_ENDPROC; \
814ENDPROC(name)
815
816
817#ifdef CONFIG_TRACING
818#define TRACE_BUILD_INTERRUPT(name, nr) \
819 BUILD_INTERRUPT3(trace_
820#else
821#define TRACE_BUILD_INTERRUPT(name, nr)
822#endif
823
824#define BUILD_INTERRUPT(name, nr) \
825 BUILD_INTERRUPT3(name, nr, smp_
826 TRACE_BUILD_INTERRUPT(name, nr)
827
828
829#include <asm/entry_arch.h>
830
831ENTRY(coprocessor_error)
832 RING0_INT_FRAME
833 ASM_CLAC
834 pushl_cfi $0
835 pushl_cfi $do_coprocessor_error
836 jmp error_code
837 CFI_ENDPROC
838END(coprocessor_error)
839
840ENTRY(simd_coprocessor_error)
841 RING0_INT_FRAME
842 ASM_CLAC
843 pushl_cfi $0
844#ifdef CONFIG_X86_INVD_BUG
845
846661: pushl_cfi $do_general_protection
847662:
848.section .altinstructions,"a"
849 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
850.previous
851.section .altinstr_replacement,"ax"
852663: pushl $do_simd_coprocessor_error
853664:
854.previous
855#else
856 pushl_cfi $do_simd_coprocessor_error
857#endif
858 jmp error_code
859 CFI_ENDPROC
860END(simd_coprocessor_error)
861
862ENTRY(device_not_available)
863 RING0_INT_FRAME
864 ASM_CLAC
865 pushl_cfi $-1
866 pushl_cfi $do_device_not_available
867 jmp error_code
868 CFI_ENDPROC
869END(device_not_available)
870
871#ifdef CONFIG_PARAVIRT
872ENTRY(native_iret)
873 iret
874 _ASM_EXTABLE(native_iret, iret_exc)
875END(native_iret)
876
877ENTRY(native_irq_enable_sysexit)
878 sti
879 sysexit
880END(native_irq_enable_sysexit)
881#endif
882
883ENTRY(overflow)
884 RING0_INT_FRAME
885 ASM_CLAC
886 pushl_cfi $0
887 pushl_cfi $do_overflow
888 jmp error_code
889 CFI_ENDPROC
890END(overflow)
891
892ENTRY(bounds)
893 RING0_INT_FRAME
894 ASM_CLAC
895 pushl_cfi $0
896 pushl_cfi $do_bounds
897 jmp error_code
898 CFI_ENDPROC
899END(bounds)
900
901ENTRY(invalid_op)
902 RING0_INT_FRAME
903 ASM_CLAC
904 pushl_cfi $0
905 pushl_cfi $do_invalid_op
906 jmp error_code
907 CFI_ENDPROC
908END(invalid_op)
909
910ENTRY(coprocessor_segment_overrun)
911 RING0_INT_FRAME
912 ASM_CLAC
913 pushl_cfi $0
914 pushl_cfi $do_coprocessor_segment_overrun
915 jmp error_code
916 CFI_ENDPROC
917END(coprocessor_segment_overrun)
918
919ENTRY(invalid_TSS)
920 RING0_EC_FRAME
921 ASM_CLAC
922 pushl_cfi $do_invalid_TSS
923 jmp error_code
924 CFI_ENDPROC
925END(invalid_TSS)
926
927ENTRY(segment_not_present)
928 RING0_EC_FRAME
929 ASM_CLAC
930 pushl_cfi $do_segment_not_present
931 jmp error_code
932 CFI_ENDPROC
933END(segment_not_present)
934
935ENTRY(stack_segment)
936 RING0_EC_FRAME
937 ASM_CLAC
938 pushl_cfi $do_stack_segment
939 jmp error_code
940 CFI_ENDPROC
941END(stack_segment)
942
943ENTRY(alignment_check)
944 RING0_EC_FRAME
945 ASM_CLAC
946 pushl_cfi $do_alignment_check
947 jmp error_code
948 CFI_ENDPROC
949END(alignment_check)
950
951ENTRY(divide_error)
952 RING0_INT_FRAME
953 ASM_CLAC
954 pushl_cfi $0
955 pushl_cfi $do_divide_error
956 jmp error_code
957 CFI_ENDPROC
958END(divide_error)
959
960#ifdef CONFIG_X86_MCE
961ENTRY(machine_check)
962 RING0_INT_FRAME
963 ASM_CLAC
964 pushl_cfi $0
965 pushl_cfi machine_check_vector
966 jmp error_code
967 CFI_ENDPROC
968END(machine_check)
969#endif
970
971ENTRY(spurious_interrupt_bug)
972 RING0_INT_FRAME
973 ASM_CLAC
974 pushl_cfi $0
975 pushl_cfi $do_spurious_interrupt_bug
976 jmp error_code
977 CFI_ENDPROC
978END(spurious_interrupt_bug)
979
980
981
982 .popsection
983
984#ifdef CONFIG_XEN
985
986
987ENTRY(xen_sysenter_target)
988 RING0_INT_FRAME
989 addl $5*4, %esp
990 CFI_ADJUST_CFA_OFFSET -5*4
991 jmp sysenter_past_esp
992 CFI_ENDPROC
993
994ENTRY(xen_hypervisor_callback)
995 CFI_STARTPROC
996 pushl_cfi $-1
997 SAVE_ALL
998 TRACE_IRQS_OFF
999
1000
1001
1002
1003
1004
1005 movl PT_EIP(%esp),%eax
1006 cmpl $xen_iret_start_crit,%eax
1007 jb 1f
1008 cmpl $xen_iret_end_crit,%eax
1009 jae 1f
1010
1011 jmp xen_iret_crit_fixup
1012
1013ENTRY(xen_do_upcall)
10141: mov %esp, %eax
1015 call xen_evtchn_do_upcall
1016 jmp ret_from_intr
1017 CFI_ENDPROC
1018ENDPROC(xen_hypervisor_callback)
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030ENTRY(xen_failsafe_callback)
1031 CFI_STARTPROC
1032 pushl_cfi %eax
1033 movl $1,%eax
10341: mov 4(%esp),%ds
10352: mov 8(%esp),%es
10363: mov 12(%esp),%fs
10374: mov 16(%esp),%gs
1038
1039
1040 testl %eax,%eax
1041 popl_cfi %eax
1042 lea 16(%esp),%esp
1043 CFI_ADJUST_CFA_OFFSET -16
1044 jz 5f
1045 jmp iret_exc
10465: pushl_cfi $-1
1047 SAVE_ALL
1048 jmp ret_from_exception
1049 CFI_ENDPROC
1050
1051.section .fixup,"ax"
10526: xorl %eax,%eax
1053 movl %eax,4(%esp)
1054 jmp 1b
10557: xorl %eax,%eax
1056 movl %eax,8(%esp)
1057 jmp 2b
10588: xorl %eax,%eax
1059 movl %eax,12(%esp)
1060 jmp 3b
10619: xorl %eax,%eax
1062 movl %eax,16(%esp)
1063 jmp 4b
1064.previous
1065 _ASM_EXTABLE(1b,6b)
1066 _ASM_EXTABLE(2b,7b)
1067 _ASM_EXTABLE(3b,8b)
1068 _ASM_EXTABLE(4b,9b)
1069ENDPROC(xen_failsafe_callback)
1070
1071BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
1072 xen_evtchn_do_upcall)
1073
1074#endif
1075
1076
1077
1078BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
1079 hyperv_vector_handler)
1080
1081#endif
1082
1083#ifdef CONFIG_FUNCTION_TRACER
1084#ifdef CONFIG_DYNAMIC_FTRACE
1085
1086ENTRY(mcount)
1087 ret
1088END(mcount)
1089
1090ENTRY(ftrace_caller)
1091 pushl %eax
1092 pushl %ecx
1093 pushl %edx
1094 pushl $0
1095 movl 4*4(%esp), %eax
1096 movl 0x4(%ebp), %edx
1097 movl function_trace_op, %ecx
1098 subl $MCOUNT_INSN_SIZE, %eax
1099
1100.globl ftrace_call
1101ftrace_call:
1102 call ftrace_stub
1103
1104 addl $4,%esp
1105 popl %edx
1106 popl %ecx
1107 popl %eax
1108ftrace_ret:
1109#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1110.globl ftrace_graph_call
1111ftrace_graph_call:
1112 jmp ftrace_stub
1113#endif
1114
1115.globl ftrace_stub
1116ftrace_stub:
1117 ret
1118END(ftrace_caller)
1119
1120ENTRY(ftrace_regs_caller)
1121 pushf
1122
1123
1124
1125
1126
1127
1128
1129
1130 pushl 4(%esp)
1131
1132 pushl $0
1133 pushl %gs
1134 pushl %fs
1135 pushl %es
1136 pushl %ds
1137 pushl %eax
1138 pushl %ebp
1139 pushl %edi
1140 pushl %esi
1141 pushl %edx
1142 pushl %ecx
1143 pushl %ebx
1144
1145 movl 13*4(%esp), %eax
1146 movl %eax, 14*4(%esp)
1147
1148 movl $__KERNEL_CS,13*4(%esp)
1149
1150 movl 12*4(%esp), %eax
1151 subl $MCOUNT_INSN_SIZE, %eax
1152 movl 0x4(%ebp), %edx
1153 movl function_trace_op, %ecx
1154 pushl %esp
1155
1156GLOBAL(ftrace_regs_call)
1157 call ftrace_stub
1158
1159 addl $4, %esp
1160 movl 14*4(%esp), %eax
1161 movl %eax, 13*4(%esp)
1162 movl 12*4(%esp), %eax
1163 movl %eax, 14*4(%esp)
1164
1165 popl %ebx
1166 popl %ecx
1167 popl %edx
1168 popl %esi
1169 popl %edi
1170 popl %ebp
1171 popl %eax
1172 popl %ds
1173 popl %es
1174 popl %fs
1175 popl %gs
1176 addl $8, %esp
1177 popf
1178 jmp ftrace_ret
1179
1180 popf
1181 jmp ftrace_stub
1182#else
1183
1184ENTRY(mcount)
1185 cmpl $ftrace_stub, ftrace_trace_function
1186 jnz trace
1187#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1188 cmpl $ftrace_stub, ftrace_graph_return
1189 jnz ftrace_graph_caller
1190
1191 cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
1192 jnz ftrace_graph_caller
1193#endif
1194.globl ftrace_stub
1195ftrace_stub:
1196 ret
1197
1198
1199trace:
1200 pushl %eax
1201 pushl %ecx
1202 pushl %edx
1203 movl 0xc(%esp), %eax
1204 movl 0x4(%ebp), %edx
1205 subl $MCOUNT_INSN_SIZE, %eax
1206
1207 call *ftrace_trace_function
1208
1209 popl %edx
1210 popl %ecx
1211 popl %eax
1212 jmp ftrace_stub
1213END(mcount)
1214#endif
1215#endif
1216
1217#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1218ENTRY(ftrace_graph_caller)
1219 pushl %eax
1220 pushl %ecx
1221 pushl %edx
1222 movl 0xc(%esp), %edx
1223 lea 0x4(%ebp), %eax
1224 movl (%ebp), %ecx
1225 subl $MCOUNT_INSN_SIZE, %edx
1226 call prepare_ftrace_return
1227 popl %edx
1228 popl %ecx
1229 popl %eax
1230 ret
1231END(ftrace_graph_caller)
1232
1233.globl return_to_handler
1234return_to_handler:
1235 pushl %eax
1236 pushl %edx
1237 movl %ebp, %eax
1238 call ftrace_return_to_handler
1239 movl %eax, %ecx
1240 popl %edx
1241 popl %eax
1242 jmp *%ecx
1243#endif
1244
1245
1246
1247
1248 .pushsection .kprobes.text, "ax"
1249
1250#ifdef CONFIG_TRACING
1251ENTRY(trace_page_fault)
1252 RING0_EC_FRAME
1253 ASM_CLAC
1254 pushl_cfi $trace_do_page_fault
1255 jmp error_code
1256 CFI_ENDPROC
1257END(trace_page_fault)
1258#endif
1259
1260ENTRY(page_fault)
1261 RING0_EC_FRAME
1262 ASM_CLAC
1263 pushl_cfi $do_page_fault
1264 ALIGN
1265error_code:
1266
1267 pushl_cfi %fs
1268
1269 pushl_cfi %es
1270
1271 pushl_cfi %ds
1272
1273 pushl_cfi %eax
1274 CFI_REL_OFFSET eax, 0
1275 pushl_cfi %ebp
1276 CFI_REL_OFFSET ebp, 0
1277 pushl_cfi %edi
1278 CFI_REL_OFFSET edi, 0
1279 pushl_cfi %esi
1280 CFI_REL_OFFSET esi, 0
1281 pushl_cfi %edx
1282 CFI_REL_OFFSET edx, 0
1283 pushl_cfi %ecx
1284 CFI_REL_OFFSET ecx, 0
1285 pushl_cfi %ebx
1286 CFI_REL_OFFSET ebx, 0
1287 cld
1288 movl $(__KERNEL_PERCPU), %ecx
1289 movl %ecx, %fs
1290 UNWIND_ESPFIX_STACK
1291 GS_TO_REG %ecx
1292 movl PT_GS(%esp), %edi
1293 movl PT_ORIG_EAX(%esp), %edx
1294 movl $-1, PT_ORIG_EAX(%esp)
1295 REG_TO_PTGS %ecx
1296 SET_KERNEL_GS %ecx
1297 movl $(__USER_DS), %ecx
1298 movl %ecx, %ds
1299 movl %ecx, %es
1300 TRACE_IRQS_OFF
1301 movl %esp,%eax
1302 call *%edi
1303 jmp ret_from_exception
1304 CFI_ENDPROC
1305END(page_fault)
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320.macro FIX_STACK offset ok label
1321 cmpw $__KERNEL_CS, 4(%esp)
1322 jne \ok
1323\label:
1324 movl TSS_sysenter_sp0 + \offset(%esp), %esp
1325 CFI_DEF_CFA esp, 0
1326 CFI_UNDEFINED eip
1327 pushfl_cfi
1328 pushl_cfi $__KERNEL_CS
1329 pushl_cfi $sysenter_past_esp
1330 CFI_REL_OFFSET eip, 0
1331.endm
1332
1333ENTRY(debug)
1334 RING0_INT_FRAME
1335 ASM_CLAC
1336 cmpl $ia32_sysenter_target,(%esp)
1337 jne debug_stack_correct
1338 FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
1339debug_stack_correct:
1340 pushl_cfi $-1
1341 SAVE_ALL
1342 TRACE_IRQS_OFF
1343 xorl %edx,%edx
1344 movl %esp,%eax
1345 call do_debug
1346 jmp ret_from_exception
1347 CFI_ENDPROC
1348END(debug)
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358ENTRY(nmi)
1359 RING0_INT_FRAME
1360 ASM_CLAC
1361#ifdef CONFIG_X86_ESPFIX32
1362 pushl_cfi %eax
1363 movl %ss, %eax
1364 cmpw $__ESPFIX_SS, %ax
1365 popl_cfi %eax
1366 je nmi_espfix_stack
1367#endif
1368 cmpl $ia32_sysenter_target,(%esp)
1369 je nmi_stack_fixup
1370 pushl_cfi %eax
1371 movl %esp,%eax
1372
1373
1374
1375 andl $(THREAD_SIZE-1),%eax
1376 cmpl $(THREAD_SIZE-20),%eax
1377 popl_cfi %eax
1378 jae nmi_stack_correct
1379 cmpl $ia32_sysenter_target,12(%esp)
1380 je nmi_debug_stack_check
1381nmi_stack_correct:
1382
1383 pushl_cfi %eax
1384 SAVE_ALL
1385 xorl %edx,%edx
1386 movl %esp,%eax
1387 call do_nmi
1388 jmp restore_all_notrace
1389 CFI_ENDPROC
1390
1391nmi_stack_fixup:
1392 RING0_INT_FRAME
1393 FIX_STACK 12, nmi_stack_correct, 1
1394 jmp nmi_stack_correct
1395
1396nmi_debug_stack_check:
1397
1398 cmpw $__KERNEL_CS,16(%esp)
1399 jne nmi_stack_correct
1400 cmpl $debug,(%esp)
1401 jb nmi_stack_correct
1402 cmpl $debug_esp_fix_insn,(%esp)
1403 ja nmi_stack_correct
1404 FIX_STACK 24, nmi_stack_correct, 1
1405 jmp nmi_stack_correct
1406
1407#ifdef CONFIG_X86_ESPFIX32
1408nmi_espfix_stack:
1409
1410
1411
1412
1413 pushl_cfi %ss
1414 pushl_cfi %esp
1415 addl $4, (%esp)
1416
1417 .rept 3
1418 pushl_cfi 16(%esp)
1419 .endr
1420 pushl_cfi %eax
1421 SAVE_ALL
1422 FIXUP_ESPFIX_STACK
1423 xorl %edx,%edx
1424 call do_nmi
1425 RESTORE_REGS
1426 lss 12+4(%esp), %esp
1427 CFI_ADJUST_CFA_OFFSET -24
1428 jmp irq_return
1429#endif
1430 CFI_ENDPROC
1431END(nmi)
1432
1433ENTRY(int3)
1434 RING0_INT_FRAME
1435 ASM_CLAC
1436 pushl_cfi $-1
1437 SAVE_ALL
1438 TRACE_IRQS_OFF
1439 xorl %edx,%edx
1440 movl %esp,%eax
1441 call do_int3
1442 jmp ret_from_exception
1443 CFI_ENDPROC
1444END(int3)
1445
1446ENTRY(general_protection)
1447 RING0_EC_FRAME
1448 pushl_cfi $do_general_protection
1449 jmp error_code
1450 CFI_ENDPROC
1451END(general_protection)
1452
1453#ifdef CONFIG_KVM_GUEST
1454ENTRY(async_page_fault)
1455 RING0_EC_FRAME
1456 ASM_CLAC
1457 pushl_cfi $do_async_page_fault
1458 jmp error_code
1459 CFI_ENDPROC
1460END(async_page_fault)
1461#endif
1462
1463
1464
1465
1466 .popsection
1467