linux/arch/x86/entry/entry_32.S
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 *  Copyright (C) 1991,1992  Linus Torvalds
   4 *
   5 * entry_32.S contains the system-call and low-level fault and trap handling routines.
   6 *
   7 * Stack layout while running C code:
   8 *      ptrace needs to have all registers on the stack.
   9 *      If the order here is changed, it needs to be
  10 *      updated in fork.c:copy_process(), signal.c:do_signal(),
  11 *      ptrace.c and ptrace.h
  12 *
  13 *       0(%esp) - %ebx
  14 *       4(%esp) - %ecx
  15 *       8(%esp) - %edx
  16 *       C(%esp) - %esi
  17 *      10(%esp) - %edi
  18 *      14(%esp) - %ebp
  19 *      18(%esp) - %eax
  20 *      1C(%esp) - %ds
  21 *      20(%esp) - %es
  22 *      24(%esp) - %fs
  23 *      28(%esp) - unused -- was %gs on old stackprotector kernels
  24 *      2C(%esp) - orig_eax
  25 *      30(%esp) - %eip
  26 *      34(%esp) - %cs
  27 *      38(%esp) - %eflags
  28 *      3C(%esp) - %oldesp
  29 *      40(%esp) - %oldss
  30 */
  31
  32#include <linux/linkage.h>
  33#include <linux/err.h>
  34#include <asm/thread_info.h>
  35#include <asm/irqflags.h>
  36#include <asm/errno.h>
  37#include <asm/segment.h>
  38#include <asm/smp.h>
  39#include <asm/percpu.h>
  40#include <asm/processor-flags.h>
  41#include <asm/irq_vectors.h>
  42#include <asm/cpufeatures.h>
  43#include <asm/alternative.h>
  44#include <asm/asm.h>
  45#include <asm/smap.h>
  46#include <asm/frame.h>
  47#include <asm/trapnr.h>
  48#include <asm/nospec-branch.h>
  49
  50#include "calling.h"
  51
  52        .section .entry.text, "ax"
  53
  54#define PTI_SWITCH_MASK         (1 << PAGE_SHIFT)
  55
  56/* Unconditionally switch to user cr3 */
  57.macro SWITCH_TO_USER_CR3 scratch_reg:req
  58        ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
  59
  60        movl    %cr3, \scratch_reg
  61        orl     $PTI_SWITCH_MASK, \scratch_reg
  62        movl    \scratch_reg, %cr3
  63.Lend_\@:
  64.endm
  65
  66.macro BUG_IF_WRONG_CR3 no_user_check=0
  67#ifdef CONFIG_DEBUG_ENTRY
  68        ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
  69        .if \no_user_check == 0
  70        /* coming from usermode? */
  71        testl   $USER_SEGMENT_RPL_MASK, PT_CS(%esp)
  72        jz      .Lend_\@
  73        .endif
  74        /* On user-cr3? */
  75        movl    %cr3, %eax
  76        testl   $PTI_SWITCH_MASK, %eax
  77        jnz     .Lend_\@
  78        /* From userspace with kernel cr3 - BUG */
  79        ud2
  80.Lend_\@:
  81#endif
  82.endm
  83
  84/*
  85 * Switch to kernel cr3 if not already loaded and return current cr3 in
  86 * \scratch_reg
  87 */
  88.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
  89        ALTERNATIVE "jmp .Lend_\@", "", X86_FEATURE_PTI
  90        movl    %cr3, \scratch_reg
  91        /* Test if we are already on kernel CR3 */
  92        testl   $PTI_SWITCH_MASK, \scratch_reg
  93        jz      .Lend_\@
  94        andl    $(~PTI_SWITCH_MASK), \scratch_reg
  95        movl    \scratch_reg, %cr3
  96        /* Return original CR3 in \scratch_reg */
  97        orl     $PTI_SWITCH_MASK, \scratch_reg
  98.Lend_\@:
  99.endm
 100
 101#define CS_FROM_ENTRY_STACK     (1 << 31)
 102#define CS_FROM_USER_CR3        (1 << 30)
 103#define CS_FROM_KERNEL          (1 << 29)
 104#define CS_FROM_ESPFIX          (1 << 28)
 105
 106.macro FIXUP_FRAME
 107        /*
 108         * The high bits of the CS dword (__csh) are used for CS_FROM_*.
 109         * Clear them in case hardware didn't do this for us.
 110         */
 111        andl    $0x0000ffff, 4*4(%esp)
 112
 113#ifdef CONFIG_VM86
 114        testl   $X86_EFLAGS_VM, 5*4(%esp)
 115        jnz     .Lfrom_usermode_no_fixup_\@
 116#endif
 117        testl   $USER_SEGMENT_RPL_MASK, 4*4(%esp)
 118        jnz     .Lfrom_usermode_no_fixup_\@
 119
 120        orl     $CS_FROM_KERNEL, 4*4(%esp)
 121
 122        /*
 123         * When we're here from kernel mode; the (exception) stack looks like:
 124         *
 125         *  6*4(%esp) - <previous context>
 126         *  5*4(%esp) - flags
 127         *  4*4(%esp) - cs
 128         *  3*4(%esp) - ip
 129         *  2*4(%esp) - orig_eax
 130         *  1*4(%esp) - gs / function
 131         *  0*4(%esp) - fs
 132         *
 133         * Lets build a 5 entry IRET frame after that, such that struct pt_regs
 134         * is complete and in particular regs->sp is correct. This gives us
 135         * the original 6 entries as gap:
 136         *
 137         * 14*4(%esp) - <previous context>
 138         * 13*4(%esp) - gap / flags
 139         * 12*4(%esp) - gap / cs
 140         * 11*4(%esp) - gap / ip
 141         * 10*4(%esp) - gap / orig_eax
 142         *  9*4(%esp) - gap / gs / function
 143         *  8*4(%esp) - gap / fs
 144         *  7*4(%esp) - ss
 145         *  6*4(%esp) - sp
 146         *  5*4(%esp) - flags
 147         *  4*4(%esp) - cs
 148         *  3*4(%esp) - ip
 149         *  2*4(%esp) - orig_eax
 150         *  1*4(%esp) - gs / function
 151         *  0*4(%esp) - fs
 152         */
 153
 154        pushl   %ss             # ss
 155        pushl   %esp            # sp (points at ss)
 156        addl    $7*4, (%esp)    # point sp back at the previous context
 157        pushl   7*4(%esp)       # flags
 158        pushl   7*4(%esp)       # cs
 159        pushl   7*4(%esp)       # ip
 160        pushl   7*4(%esp)       # orig_eax
 161        pushl   7*4(%esp)       # gs / function
 162        pushl   7*4(%esp)       # fs
 163.Lfrom_usermode_no_fixup_\@:
 164.endm
 165
 166.macro IRET_FRAME
 167        /*
 168         * We're called with %ds, %es, %fs, and %gs from the interrupted
 169         * frame, so we shouldn't use them.  Also, we may be in ESPFIX
 170         * mode and therefore have a nonzero SS base and an offset ESP,
 171         * so any attempt to access the stack needs to use SS.  (except for
 172         * accesses through %esp, which automatically use SS.)
 173         */
 174        testl $CS_FROM_KERNEL, 1*4(%esp)
 175        jz .Lfinished_frame_\@
 176
 177        /*
 178         * Reconstruct the 3 entry IRET frame right after the (modified)
 179         * regs->sp without lowering %esp in between, such that an NMI in the
 180         * middle doesn't scribble our stack.
 181         */
 182        pushl   %eax
 183        pushl   %ecx
 184        movl    5*4(%esp), %eax         # (modified) regs->sp
 185
 186        movl    4*4(%esp), %ecx         # flags
 187        movl    %ecx, %ss:-1*4(%eax)
 188
 189        movl    3*4(%esp), %ecx         # cs
 190        andl    $0x0000ffff, %ecx
 191        movl    %ecx, %ss:-2*4(%eax)
 192
 193        movl    2*4(%esp), %ecx         # ip
 194        movl    %ecx, %ss:-3*4(%eax)
 195
 196        movl    1*4(%esp), %ecx         # eax
 197        movl    %ecx, %ss:-4*4(%eax)
 198
 199        popl    %ecx
 200        lea     -4*4(%eax), %esp
 201        popl    %eax
 202.Lfinished_frame_\@:
 203.endm
 204
 205.macro SAVE_ALL pt_regs_ax=%eax switch_stacks=0 skip_gs=0 unwind_espfix=0
 206        cld
 207.if \skip_gs == 0
 208        pushl   $0
 209.endif
 210        pushl   %fs
 211
 212        pushl   %eax
 213        movl    $(__KERNEL_PERCPU), %eax
 214        movl    %eax, %fs
 215.if \unwind_espfix > 0
 216        UNWIND_ESPFIX_STACK
 217.endif
 218        popl    %eax
 219
 220        FIXUP_FRAME
 221        pushl   %es
 222        pushl   %ds
 223        pushl   \pt_regs_ax
 224        pushl   %ebp
 225        pushl   %edi
 226        pushl   %esi
 227        pushl   %edx
 228        pushl   %ecx
 229        pushl   %ebx
 230        movl    $(__USER_DS), %edx
 231        movl    %edx, %ds
 232        movl    %edx, %es
 233        /* Switch to kernel stack if necessary */
 234.if \switch_stacks > 0
 235        SWITCH_TO_KERNEL_STACK
 236.endif
 237.endm
 238
 239.macro SAVE_ALL_NMI cr3_reg:req unwind_espfix=0
 240        SAVE_ALL unwind_espfix=\unwind_espfix
 241
 242        BUG_IF_WRONG_CR3
 243
 244        /*
 245         * Now switch the CR3 when PTI is enabled.
 246         *
 247         * We can enter with either user or kernel cr3, the code will
 248         * store the old cr3 in \cr3_reg and switches to the kernel cr3
 249         * if necessary.
 250         */
 251        SWITCH_TO_KERNEL_CR3 scratch_reg=\cr3_reg
 252
 253.Lend_\@:
 254.endm
 255
 256.macro RESTORE_INT_REGS
 257        popl    %ebx
 258        popl    %ecx
 259        popl    %edx
 260        popl    %esi
 261        popl    %edi
 262        popl    %ebp
 263        popl    %eax
 264.endm
 265
 266.macro RESTORE_REGS pop=0
 267        RESTORE_INT_REGS
 2681:      popl    %ds
 2692:      popl    %es
 2703:      popl    %fs
 271        addl    $(4 + \pop), %esp       /* pop the unused "gs" slot */
 272        IRET_FRAME
 273.pushsection .fixup, "ax"
 2744:      movl    $0, (%esp)
 275        jmp     1b
 2765:      movl    $0, (%esp)
 277        jmp     2b
 2786:      movl    $0, (%esp)
 279        jmp     3b
 280.popsection
 281        _ASM_EXTABLE(1b, 4b)
 282        _ASM_EXTABLE(2b, 5b)
 283        _ASM_EXTABLE(3b, 6b)
 284.endm
 285
 286.macro RESTORE_ALL_NMI cr3_reg:req pop=0
 287        /*
 288         * Now switch the CR3 when PTI is enabled.
 289         *
 290         * We enter with kernel cr3 and switch the cr3 to the value
 291         * stored on \cr3_reg, which is either a user or a kernel cr3.
 292         */
 293        ALTERNATIVE "jmp .Lswitched_\@", "", X86_FEATURE_PTI
 294
 295        testl   $PTI_SWITCH_MASK, \cr3_reg
 296        jz      .Lswitched_\@
 297
 298        /* User cr3 in \cr3_reg - write it to hardware cr3 */
 299        movl    \cr3_reg, %cr3
 300
 301.Lswitched_\@:
 302
 303        BUG_IF_WRONG_CR3
 304
 305        RESTORE_REGS pop=\pop
 306.endm
 307
 308.macro CHECK_AND_APPLY_ESPFIX
 309#ifdef CONFIG_X86_ESPFIX32
 310#define GDT_ESPFIX_OFFSET (GDT_ENTRY_ESPFIX_SS * 8)
 311#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + GDT_ESPFIX_OFFSET
 312
 313        ALTERNATIVE     "jmp .Lend_\@", "", X86_BUG_ESPFIX
 314
 315        movl    PT_EFLAGS(%esp), %eax           # mix EFLAGS, SS and CS
 316        /*
 317         * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
 318         * are returning to the kernel.
 319         * See comments in process.c:copy_thread() for details.
 320         */
 321        movb    PT_OLDSS(%esp), %ah
 322        movb    PT_CS(%esp), %al
 323        andl    $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
 324        cmpl    $((SEGMENT_LDT << 8) | USER_RPL), %eax
 325        jne     .Lend_\@        # returning to user-space with LDT SS
 326
 327        /*
 328         * Setup and switch to ESPFIX stack
 329         *
 330         * We're returning to userspace with a 16 bit stack. The CPU will not
 331         * restore the high word of ESP for us on executing iret... This is an
 332         * "official" bug of all the x86-compatible CPUs, which we can work
 333         * around to make dosemu and wine happy. We do this by preloading the
 334         * high word of ESP with the high word of the userspace ESP while
 335         * compensating for the offset by changing to the ESPFIX segment with
 336         * a base address that matches for the difference.
 337         */
 338        mov     %esp, %edx                      /* load kernel esp */
 339        mov     PT_OLDESP(%esp), %eax           /* load userspace esp */
 340        mov     %dx, %ax                        /* eax: new kernel esp */
 341        sub     %eax, %edx                      /* offset (low word is 0) */
 342        shr     $16, %edx
 343        mov     %dl, GDT_ESPFIX_SS + 4          /* bits 16..23 */
 344        mov     %dh, GDT_ESPFIX_SS + 7          /* bits 24..31 */
 345        pushl   $__ESPFIX_SS
 346        pushl   %eax                            /* new kernel esp */
 347        /*
 348         * Disable interrupts, but do not irqtrace this section: we
 349         * will soon execute iret and the tracer was already set to
 350         * the irqstate after the IRET:
 351         */
 352        cli
 353        lss     (%esp), %esp                    /* switch to espfix segment */
 354.Lend_\@:
 355#endif /* CONFIG_X86_ESPFIX32 */
 356.endm
 357
 358/*
 359 * Called with pt_regs fully populated and kernel segments loaded,
 360 * so we can access PER_CPU and use the integer registers.
 361 *
 362 * We need to be very careful here with the %esp switch, because an NMI
 363 * can happen everywhere. If the NMI handler finds itself on the
 364 * entry-stack, it will overwrite the task-stack and everything we
 365 * copied there. So allocate the stack-frame on the task-stack and
 366 * switch to it before we do any copying.
 367 */
 368
 369.macro SWITCH_TO_KERNEL_STACK
 370
 371        BUG_IF_WRONG_CR3
 372
 373        SWITCH_TO_KERNEL_CR3 scratch_reg=%eax
 374
 375        /*
 376         * %eax now contains the entry cr3 and we carry it forward in
 377         * that register for the time this macro runs
 378         */
 379
 380        /* Are we on the entry stack? Bail out if not! */
 381        movl    PER_CPU_VAR(cpu_entry_area), %ecx
 382        addl    $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
 383        subl    %esp, %ecx      /* ecx = (end of entry_stack) - esp */
 384        cmpl    $SIZEOF_entry_stack, %ecx
 385        jae     .Lend_\@
 386
 387        /* Load stack pointer into %esi and %edi */
 388        movl    %esp, %esi
 389        movl    %esi, %edi
 390
 391        /* Move %edi to the top of the entry stack */
 392        andl    $(MASK_entry_stack), %edi
 393        addl    $(SIZEOF_entry_stack), %edi
 394
 395        /* Load top of task-stack into %edi */
 396        movl    TSS_entry2task_stack(%edi), %edi
 397
 398        /* Special case - entry from kernel mode via entry stack */
 399#ifdef CONFIG_VM86
 400        movl    PT_EFLAGS(%esp), %ecx           # mix EFLAGS and CS
 401        movb    PT_CS(%esp), %cl
 402        andl    $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %ecx
 403#else
 404        movl    PT_CS(%esp), %ecx
 405        andl    $SEGMENT_RPL_MASK, %ecx
 406#endif
 407        cmpl    $USER_RPL, %ecx
 408        jb      .Lentry_from_kernel_\@
 409
 410        /* Bytes to copy */
 411        movl    $PTREGS_SIZE, %ecx
 412
 413#ifdef CONFIG_VM86
 414        testl   $X86_EFLAGS_VM, PT_EFLAGS(%esi)
 415        jz      .Lcopy_pt_regs_\@
 416
 417        /*
 418         * Stack-frame contains 4 additional segment registers when
 419         * coming from VM86 mode
 420         */
 421        addl    $(4 * 4), %ecx
 422
 423#endif
 424.Lcopy_pt_regs_\@:
 425
 426        /* Allocate frame on task-stack */
 427        subl    %ecx, %edi
 428
 429        /* Switch to task-stack */
 430        movl    %edi, %esp
 431
 432        /*
 433         * We are now on the task-stack and can safely copy over the
 434         * stack-frame
 435         */
 436        shrl    $2, %ecx
 437        cld
 438        rep movsl
 439
 440        jmp .Lend_\@
 441
 442.Lentry_from_kernel_\@:
 443
 444        /*
 445         * This handles the case when we enter the kernel from
 446         * kernel-mode and %esp points to the entry-stack. When this
 447         * happens we need to switch to the task-stack to run C code,
 448         * but switch back to the entry-stack again when we approach
 449         * iret and return to the interrupted code-path. This usually
 450         * happens when we hit an exception while restoring user-space
 451         * segment registers on the way back to user-space or when the
 452         * sysenter handler runs with eflags.tf set.
 453         *
 454         * When we switch to the task-stack here, we can't trust the
 455         * contents of the entry-stack anymore, as the exception handler
 456         * might be scheduled out or moved to another CPU. Therefore we
 457         * copy the complete entry-stack to the task-stack and set a
 458         * marker in the iret-frame (bit 31 of the CS dword) to detect
 459         * what we've done on the iret path.
 460         *
 461         * On the iret path we copy everything back and switch to the
 462         * entry-stack, so that the interrupted kernel code-path
 463         * continues on the same stack it was interrupted with.
 464         *
 465         * Be aware that an NMI can happen anytime in this code.
 466         *
 467         * %esi: Entry-Stack pointer (same as %esp)
 468         * %edi: Top of the task stack
 469         * %eax: CR3 on kernel entry
 470         */
 471
 472        /* Calculate number of bytes on the entry stack in %ecx */
 473        movl    %esi, %ecx
 474
 475        /* %ecx to the top of entry-stack */
 476        andl    $(MASK_entry_stack), %ecx
 477        addl    $(SIZEOF_entry_stack), %ecx
 478
 479        /* Number of bytes on the entry stack to %ecx */
 480        sub     %esi, %ecx
 481
 482        /* Mark stackframe as coming from entry stack */
 483        orl     $CS_FROM_ENTRY_STACK, PT_CS(%esp)
 484
 485        /*
 486         * Test the cr3 used to enter the kernel and add a marker
 487         * so that we can switch back to it before iret.
 488         */
 489        testl   $PTI_SWITCH_MASK, %eax
 490        jz      .Lcopy_pt_regs_\@
 491        orl     $CS_FROM_USER_CR3, PT_CS(%esp)
 492
 493        /*
 494         * %esi and %edi are unchanged, %ecx contains the number of
 495         * bytes to copy. The code at .Lcopy_pt_regs_\@ will allocate
 496         * the stack-frame on task-stack and copy everything over
 497         */
 498        jmp .Lcopy_pt_regs_\@
 499
 500.Lend_\@:
 501.endm
 502
 503/*
 504 * Switch back from the kernel stack to the entry stack.
 505 *
 506 * The %esp register must point to pt_regs on the task stack. It will
 507 * first calculate the size of the stack-frame to copy, depending on
 508 * whether we return to VM86 mode or not. With that it uses 'rep movsl'
 509 * to copy the contents of the stack over to the entry stack.
 510 *
 511 * We must be very careful here, as we can't trust the contents of the
 512 * task-stack once we switched to the entry-stack. When an NMI happens
 513 * while on the entry-stack, the NMI handler will switch back to the top
 514 * of the task stack, overwriting our stack-frame we are about to copy.
 515 * Therefore we switch the stack only after everything is copied over.
 516 */
 517.macro SWITCH_TO_ENTRY_STACK
 518
 519        /* Bytes to copy */
 520        movl    $PTREGS_SIZE, %ecx
 521
 522#ifdef CONFIG_VM86
 523        testl   $(X86_EFLAGS_VM), PT_EFLAGS(%esp)
 524        jz      .Lcopy_pt_regs_\@
 525
 526        /* Additional 4 registers to copy when returning to VM86 mode */
 527        addl    $(4 * 4), %ecx
 528
 529.Lcopy_pt_regs_\@:
 530#endif
 531
 532        /* Initialize source and destination for movsl */
 533        movl    PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi
 534        subl    %ecx, %edi
 535        movl    %esp, %esi
 536
 537        /* Save future stack pointer in %ebx */
 538        movl    %edi, %ebx
 539
 540        /* Copy over the stack-frame */
 541        shrl    $2, %ecx
 542        cld
 543        rep movsl
 544
 545        /*
 546         * Switch to entry-stack - needs to happen after everything is
 547         * copied because the NMI handler will overwrite the task-stack
 548         * when on entry-stack
 549         */
 550        movl    %ebx, %esp
 551
 552.Lend_\@:
 553.endm
 554
 555/*
 556 * This macro handles the case when we return to kernel-mode on the iret
 557 * path and have to switch back to the entry stack and/or user-cr3
 558 *
 559 * See the comments below the .Lentry_from_kernel_\@ label in the
 560 * SWITCH_TO_KERNEL_STACK macro for more details.
 561 */
 562.macro PARANOID_EXIT_TO_KERNEL_MODE
 563
 564        /*
 565         * Test if we entered the kernel with the entry-stack. Most
 566         * likely we did not, because this code only runs on the
 567         * return-to-kernel path.
 568         */
 569        testl   $CS_FROM_ENTRY_STACK, PT_CS(%esp)
 570        jz      .Lend_\@
 571
 572        /* Unlikely slow-path */
 573
 574        /* Clear marker from stack-frame */
 575        andl    $(~CS_FROM_ENTRY_STACK), PT_CS(%esp)
 576
 577        /* Copy the remaining task-stack contents to entry-stack */
 578        movl    %esp, %esi
 579        movl    PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %edi
 580
 581        /* Bytes on the task-stack to ecx */
 582        movl    PER_CPU_VAR(cpu_tss_rw + TSS_sp1), %ecx
 583        subl    %esi, %ecx
 584
 585        /* Allocate stack-frame on entry-stack */
 586        subl    %ecx, %edi
 587
 588        /*
 589         * Save future stack-pointer, we must not switch until the
 590         * copy is done, otherwise the NMI handler could destroy the
 591         * contents of the task-stack we are about to copy.
 592         */
 593        movl    %edi, %ebx
 594
 595        /* Do the copy */
 596        shrl    $2, %ecx
 597        cld
 598        rep movsl
 599
 600        /* Safe to switch to entry-stack now */
 601        movl    %ebx, %esp
 602
 603        /*
 604         * We came from entry-stack and need to check if we also need to
 605         * switch back to user cr3.
 606         */
 607        testl   $CS_FROM_USER_CR3, PT_CS(%esp)
 608        jz      .Lend_\@
 609
 610        /* Clear marker from stack-frame */
 611        andl    $(~CS_FROM_USER_CR3), PT_CS(%esp)
 612
 613        SWITCH_TO_USER_CR3 scratch_reg=%eax
 614
 615.Lend_\@:
 616.endm
 617
 618/**
 619 * idtentry - Macro to generate entry stubs for simple IDT entries
 620 * @vector:             Vector number
 621 * @asmsym:             ASM symbol for the entry point
 622 * @cfunc:              C function to be called
 623 * @has_error_code:     Hardware pushed error code on stack
 624 */
 625.macro idtentry vector asmsym cfunc has_error_code:req
 626SYM_CODE_START(\asmsym)
 627        ASM_CLAC
 628        cld
 629
 630        .if \has_error_code == 0
 631                pushl   $0              /* Clear the error code */
 632        .endif
 633
 634        /* Push the C-function address into the GS slot */
 635        pushl   $\cfunc
 636        /* Invoke the common exception entry */
 637        jmp     handle_exception
 638SYM_CODE_END(\asmsym)
 639.endm
 640
 641.macro idtentry_irq vector cfunc
 642        .p2align CONFIG_X86_L1_CACHE_SHIFT
 643SYM_CODE_START_LOCAL(asm_\cfunc)
 644        ASM_CLAC
 645        SAVE_ALL switch_stacks=1
 646        ENCODE_FRAME_POINTER
 647        movl    %esp, %eax
 648        movl    PT_ORIG_EAX(%esp), %edx         /* get the vector from stack */
 649        movl    $-1, PT_ORIG_EAX(%esp)          /* no syscall to restart */
 650        call    \cfunc
 651        jmp     handle_exception_return
 652SYM_CODE_END(asm_\cfunc)
 653.endm
 654
 655.macro idtentry_sysvec vector cfunc
 656        idtentry \vector asm_\cfunc \cfunc has_error_code=0
 657.endm
 658
 659/*
 660 * Include the defines which emit the idt entries which are shared
 661 * shared between 32 and 64 bit and emit the __irqentry_text_* markers
 662 * so the stacktrace boundary checks work.
 663 */
 664        .align 16
 665        .globl __irqentry_text_start
 666__irqentry_text_start:
 667
 668#include <asm/idtentry.h>
 669
 670        .align 16
 671        .globl __irqentry_text_end
 672__irqentry_text_end:
 673
 674/*
 675 * %eax: prev task
 676 * %edx: next task
 677 */
 678.pushsection .text, "ax"
 679SYM_CODE_START(__switch_to_asm)
 680        /*
 681         * Save callee-saved registers
 682         * This must match the order in struct inactive_task_frame
 683         */
 684        pushl   %ebp
 685        pushl   %ebx
 686        pushl   %edi
 687        pushl   %esi
 688        /*
 689         * Flags are saved to prevent AC leakage. This could go
 690         * away if objtool would have 32bit support to verify
 691         * the STAC/CLAC correctness.
 692         */
 693        pushfl
 694
 695        /* switch stack */
 696        movl    %esp, TASK_threadsp(%eax)
 697        movl    TASK_threadsp(%edx), %esp
 698
 699#ifdef CONFIG_STACKPROTECTOR
 700        movl    TASK_stack_canary(%edx), %ebx
 701        movl    %ebx, PER_CPU_VAR(__stack_chk_guard)
 702#endif
 703
 704#ifdef CONFIG_RETPOLINE
 705        /*
 706         * When switching from a shallower to a deeper call stack
 707         * the RSB may either underflow or use entries populated
 708         * with userspace addresses. On CPUs where those concerns
 709         * exist, overwrite the RSB with entries which capture
 710         * speculative execution to prevent attack.
 711         */
 712        FILL_RETURN_BUFFER %ebx, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_CTXSW
 713#endif
 714
 715        /* Restore flags or the incoming task to restore AC state. */
 716        popfl
 717        /* restore callee-saved registers */
 718        popl    %esi
 719        popl    %edi
 720        popl    %ebx
 721        popl    %ebp
 722
 723        jmp     __switch_to
 724SYM_CODE_END(__switch_to_asm)
 725.popsection
 726
 727/*
 728 * The unwinder expects the last frame on the stack to always be at the same
 729 * offset from the end of the page, which allows it to validate the stack.
 730 * Calling schedule_tail() directly would break that convention because its an
 731 * asmlinkage function so its argument has to be pushed on the stack.  This
 732 * wrapper creates a proper "end of stack" frame header before the call.
 733 */
 734.pushsection .text, "ax"
 735SYM_FUNC_START(schedule_tail_wrapper)
 736        FRAME_BEGIN
 737
 738        pushl   %eax
 739        call    schedule_tail
 740        popl    %eax
 741
 742        FRAME_END
 743        ret
 744SYM_FUNC_END(schedule_tail_wrapper)
 745.popsection
 746
 747/*
 748 * A newly forked process directly context switches into this address.
 749 *
 750 * eax: prev task we switched from
 751 * ebx: kernel thread func (NULL for user thread)
 752 * edi: kernel thread arg
 753 */
 754.pushsection .text, "ax"
 755SYM_CODE_START(ret_from_fork)
 756        call    schedule_tail_wrapper
 757
 758        testl   %ebx, %ebx
 759        jnz     1f              /* kernel threads are uncommon */
 760
 7612:
 762        /* When we fork, we trace the syscall return in the child, too. */
 763        movl    %esp, %eax
 764        call    syscall_exit_to_user_mode
 765        jmp     .Lsyscall_32_done
 766
 767        /* kernel thread */
 7681:      movl    %edi, %eax
 769        CALL_NOSPEC ebx
 770        /*
 771         * A kernel thread is allowed to return here after successfully
 772         * calling kernel_execve().  Exit to userspace to complete the execve()
 773         * syscall.
 774         */
 775        movl    $0, PT_EAX(%esp)
 776        jmp     2b
 777SYM_CODE_END(ret_from_fork)
 778.popsection
 779
 780SYM_ENTRY(__begin_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE)
 781/*
 782 * All code from here through __end_SYSENTER_singlestep_region is subject
 783 * to being single-stepped if a user program sets TF and executes SYSENTER.
 784 * There is absolutely nothing that we can do to prevent this from happening
 785 * (thanks Intel!).  To keep our handling of this situation as simple as
 786 * possible, we handle TF just like AC and NT, except that our #DB handler
 787 * will ignore all of the single-step traps generated in this range.
 788 */
 789
 790/*
 791 * 32-bit SYSENTER entry.
 792 *
 793 * 32-bit system calls through the vDSO's __kernel_vsyscall enter here
 794 * if X86_FEATURE_SEP is available.  This is the preferred system call
 795 * entry on 32-bit systems.
 796 *
 797 * The SYSENTER instruction, in principle, should *only* occur in the
 798 * vDSO.  In practice, a small number of Android devices were shipped
 799 * with a copy of Bionic that inlined a SYSENTER instruction.  This
 800 * never happened in any of Google's Bionic versions -- it only happened
 801 * in a narrow range of Intel-provided versions.
 802 *
 803 * SYSENTER loads SS, ESP, CS, and EIP from previously programmed MSRs.
 804 * IF and VM in RFLAGS are cleared (IOW: interrupts are off).
 805 * SYSENTER does not save anything on the stack,
 806 * and does not save old EIP (!!!), ESP, or EFLAGS.
 807 *
 808 * To avoid losing track of EFLAGS.VM (and thus potentially corrupting
 809 * user and/or vm86 state), we explicitly disable the SYSENTER
 810 * instruction in vm86 mode by reprogramming the MSRs.
 811 *
 812 * Arguments:
 813 * eax  system call number
 814 * ebx  arg1
 815 * ecx  arg2
 816 * edx  arg3
 817 * esi  arg4
 818 * edi  arg5
 819 * ebp  user stack
 820 * 0(%ebp) arg6
 821 */
 822SYM_FUNC_START(entry_SYSENTER_32)
 823        /*
 824         * On entry-stack with all userspace-regs live - save and
 825         * restore eflags and %eax to use it as scratch-reg for the cr3
 826         * switch.
 827         */
 828        pushfl
 829        pushl   %eax
 830        BUG_IF_WRONG_CR3 no_user_check=1
 831        SWITCH_TO_KERNEL_CR3 scratch_reg=%eax
 832        popl    %eax
 833        popfl
 834
 835        /* Stack empty again, switch to task stack */
 836        movl    TSS_entry2task_stack(%esp), %esp
 837
 838.Lsysenter_past_esp:
 839        pushl   $__USER_DS              /* pt_regs->ss */
 840        pushl   $0                      /* pt_regs->sp (placeholder) */
 841        pushfl                          /* pt_regs->flags (except IF = 0) */
 842        pushl   $__USER_CS              /* pt_regs->cs */
 843        pushl   $0                      /* pt_regs->ip = 0 (placeholder) */
 844        pushl   %eax                    /* pt_regs->orig_ax */
 845        SAVE_ALL pt_regs_ax=$-ENOSYS    /* save rest, stack already switched */
 846
 847        /*
 848         * SYSENTER doesn't filter flags, so we need to clear NT, AC
 849         * and TF ourselves.  To save a few cycles, we can check whether
 850         * either was set instead of doing an unconditional popfq.
 851         * This needs to happen before enabling interrupts so that
 852         * we don't get preempted with NT set.
 853         *
 854         * If TF is set, we will single-step all the way to here -- do_debug
 855         * will ignore all the traps.  (Yes, this is slow, but so is
 856         * single-stepping in general.  This allows us to avoid having
 857         * a more complicated code to handle the case where a user program
 858         * forces us to single-step through the SYSENTER entry code.)
 859         *
 860         * NB.: .Lsysenter_fix_flags is a label with the code under it moved
 861         * out-of-line as an optimization: NT is unlikely to be set in the
 862         * majority of the cases and instead of polluting the I$ unnecessarily,
 863         * we're keeping that code behind a branch which will predict as
 864         * not-taken and therefore its instructions won't be fetched.
 865         */
 866        testl   $X86_EFLAGS_NT|X86_EFLAGS_AC|X86_EFLAGS_TF, PT_EFLAGS(%esp)
 867        jnz     .Lsysenter_fix_flags
 868.Lsysenter_flags_fixed:
 869
 870        movl    %esp, %eax
 871        call    do_SYSENTER_32
 872        testl   %eax, %eax
 873        jz      .Lsyscall_32_done
 874
 875        STACKLEAK_ERASE
 876
 877        /* Opportunistic SYSEXIT */
 878
 879        /*
 880         * Setup entry stack - we keep the pointer in %eax and do the
 881         * switch after almost all user-state is restored.
 882         */
 883
 884        /* Load entry stack pointer and allocate frame for eflags/eax */
 885        movl    PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %eax
 886        subl    $(2*4), %eax
 887
 888        /* Copy eflags and eax to entry stack */
 889        movl    PT_EFLAGS(%esp), %edi
 890        movl    PT_EAX(%esp), %esi
 891        movl    %edi, (%eax)
 892        movl    %esi, 4(%eax)
 893
 894        /* Restore user registers and segments */
 895        movl    PT_EIP(%esp), %edx      /* pt_regs->ip */
 896        movl    PT_OLDESP(%esp), %ecx   /* pt_regs->sp */
 8971:      mov     PT_FS(%esp), %fs
 898
 899        popl    %ebx                    /* pt_regs->bx */
 900        addl    $2*4, %esp              /* skip pt_regs->cx and pt_regs->dx */
 901        popl    %esi                    /* pt_regs->si */
 902        popl    %edi                    /* pt_regs->di */
 903        popl    %ebp                    /* pt_regs->bp */
 904
 905        /* Switch to entry stack */
 906        movl    %eax, %esp
 907
 908        /* Now ready to switch the cr3 */
 909        SWITCH_TO_USER_CR3 scratch_reg=%eax
 910
 911        /*
 912         * Restore all flags except IF. (We restore IF separately because
 913         * STI gives a one-instruction window in which we won't be interrupted,
 914         * whereas POPF does not.)
 915         */
 916        btrl    $X86_EFLAGS_IF_BIT, (%esp)
 917        BUG_IF_WRONG_CR3 no_user_check=1
 918        popfl
 919        popl    %eax
 920
 921        /*
 922         * Return back to the vDSO, which will pop ecx and edx.
 923         * Don't bother with DS and ES (they already contain __USER_DS).
 924         */
 925        sti
 926        sysexit
 927
 928.pushsection .fixup, "ax"
 9292:      movl    $0, PT_FS(%esp)
 930        jmp     1b
 931.popsection
 932        _ASM_EXTABLE(1b, 2b)
 933
 934.Lsysenter_fix_flags:
 935        pushl   $X86_EFLAGS_FIXED
 936        popfl
 937        jmp     .Lsysenter_flags_fixed
 938SYM_ENTRY(__end_SYSENTER_singlestep_region, SYM_L_GLOBAL, SYM_A_NONE)
 939SYM_FUNC_END(entry_SYSENTER_32)
 940
 941/*
 942 * 32-bit legacy system call entry.
 943 *
 944 * 32-bit x86 Linux system calls traditionally used the INT $0x80
 945 * instruction.  INT $0x80 lands here.
 946 *
 947 * This entry point can be used by any 32-bit perform system calls.
 948 * Instances of INT $0x80 can be found inline in various programs and
 949 * libraries.  It is also used by the vDSO's __kernel_vsyscall
 950 * fallback for hardware that doesn't support a faster entry method.
 951 * Restarted 32-bit system calls also fall back to INT $0x80
 952 * regardless of what instruction was originally used to do the system
 953 * call.  (64-bit programs can use INT $0x80 as well, but they can
 954 * only run on 64-bit kernels and therefore land in
 955 * entry_INT80_compat.)
 956 *
 957 * This is considered a slow path.  It is not used by most libc
 958 * implementations on modern hardware except during process startup.
 959 *
 960 * Arguments:
 961 * eax  system call number
 962 * ebx  arg1
 963 * ecx  arg2
 964 * edx  arg3
 965 * esi  arg4
 966 * edi  arg5
 967 * ebp  arg6
 968 */
 969SYM_FUNC_START(entry_INT80_32)
 970        ASM_CLAC
 971        pushl   %eax                    /* pt_regs->orig_ax */
 972
 973        SAVE_ALL pt_regs_ax=$-ENOSYS switch_stacks=1    /* save rest */
 974
 975        movl    %esp, %eax
 976        call    do_int80_syscall_32
 977.Lsyscall_32_done:
 978        STACKLEAK_ERASE
 979
 980restore_all_switch_stack:
 981        SWITCH_TO_ENTRY_STACK
 982        CHECK_AND_APPLY_ESPFIX
 983
 984        /* Switch back to user CR3 */
 985        SWITCH_TO_USER_CR3 scratch_reg=%eax
 986
 987        BUG_IF_WRONG_CR3
 988
 989        /* Restore user state */
 990        RESTORE_REGS pop=4                      # skip orig_eax/error_code
 991.Lirq_return:
 992        /*
 993         * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on IRET core serialization
 994         * when returning from IPI handler and when returning from
 995         * scheduler to user-space.
 996         */
 997        iret
 998
 999.section .fixup, "ax"
1000SYM_CODE_START(asm_iret_error)
1001        pushl   $0                              # no error code
1002        pushl   $iret_error
1003
1004#ifdef CONFIG_DEBUG_ENTRY
1005        /*
1006         * The stack-frame here is the one that iret faulted on, so its a
1007         * return-to-user frame. We are on kernel-cr3 because we come here from
1008         * the fixup code. This confuses the CR3 checker, so switch to user-cr3
1009         * as the checker expects it.
1010         */
1011        pushl   %eax
1012        SWITCH_TO_USER_CR3 scratch_reg=%eax
1013        popl    %eax
1014#endif
1015
1016        jmp     handle_exception
1017SYM_CODE_END(asm_iret_error)
1018.previous
1019        _ASM_EXTABLE(.Lirq_return, asm_iret_error)
1020SYM_FUNC_END(entry_INT80_32)
1021
1022.macro FIXUP_ESPFIX_STACK
1023/*
1024 * Switch back for ESPFIX stack to the normal zerobased stack
1025 *
1026 * We can't call C functions using the ESPFIX stack. This code reads
1027 * the high word of the segment base from the GDT and swiches to the
1028 * normal stack and adjusts ESP with the matching offset.
1029 *
1030 * We might be on user CR3 here, so percpu data is not mapped and we can't
1031 * access the GDT through the percpu segment.  Instead, use SGDT to find
1032 * the cpu_entry_area alias of the GDT.
1033 */
1034#ifdef CONFIG_X86_ESPFIX32
1035        /* fixup the stack */
1036        pushl   %ecx
1037        subl    $2*4, %esp
1038        sgdt    (%esp)
1039        movl    2(%esp), %ecx                           /* GDT address */
1040        /*
1041         * Careful: ECX is a linear pointer, so we need to force base
1042         * zero.  %cs is the only known-linear segment we have right now.
1043         */
1044        mov     %cs:GDT_ESPFIX_OFFSET + 4(%ecx), %al    /* bits 16..23 */
1045        mov     %cs:GDT_ESPFIX_OFFSET + 7(%ecx), %ah    /* bits 24..31 */
1046        shl     $16, %eax
1047        addl    $2*4, %esp
1048        popl    %ecx
1049        addl    %esp, %eax                      /* the adjusted stack pointer */
1050        pushl   $__KERNEL_DS
1051        pushl   %eax
1052        lss     (%esp), %esp                    /* switch to the normal stack segment */
1053#endif
1054.endm
1055
1056.macro UNWIND_ESPFIX_STACK
1057        /* It's safe to clobber %eax, all other regs need to be preserved */
1058#ifdef CONFIG_X86_ESPFIX32
1059        movl    %ss, %eax
1060        /* see if on espfix stack */
1061        cmpw    $__ESPFIX_SS, %ax
1062        jne     .Lno_fixup_\@
1063        /* switch to normal stack */
1064        FIXUP_ESPFIX_STACK
1065.Lno_fixup_\@:
1066#endif
1067.endm
1068
1069SYM_CODE_START_LOCAL_NOALIGN(handle_exception)
1070        /* the function address is in %gs's slot on the stack */
1071        SAVE_ALL switch_stacks=1 skip_gs=1 unwind_espfix=1
1072        ENCODE_FRAME_POINTER
1073
1074        movl    PT_GS(%esp), %edi               # get the function address
1075
1076        /* fixup orig %eax */
1077        movl    PT_ORIG_EAX(%esp), %edx         # get the error code
1078        movl    $-1, PT_ORIG_EAX(%esp)          # no syscall to restart
1079
1080        movl    %esp, %eax                      # pt_regs pointer
1081        CALL_NOSPEC edi
1082
1083handle_exception_return:
1084#ifdef CONFIG_VM86
1085        movl    PT_EFLAGS(%esp), %eax           # mix EFLAGS and CS
1086        movb    PT_CS(%esp), %al
1087        andl    $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
1088#else
1089        /*
1090         * We can be coming here from child spawned by kernel_thread().
1091         */
1092        movl    PT_CS(%esp), %eax
1093        andl    $SEGMENT_RPL_MASK, %eax
1094#endif
1095        cmpl    $USER_RPL, %eax                 # returning to v8086 or userspace ?
1096        jnb     ret_to_user
1097
1098        PARANOID_EXIT_TO_KERNEL_MODE
1099        BUG_IF_WRONG_CR3
1100        RESTORE_REGS 4
1101        jmp     .Lirq_return
1102
1103ret_to_user:
1104        movl    %esp, %eax
1105        jmp     restore_all_switch_stack
1106SYM_CODE_END(handle_exception)
1107
1108SYM_CODE_START(asm_exc_double_fault)
11091:
1110        /*
1111         * This is a task gate handler, not an interrupt gate handler.
1112         * The error code is on the stack, but the stack is otherwise
1113         * empty.  Interrupts are off.  Our state is sane with the following
1114         * exceptions:
1115         *
1116         *  - CR0.TS is set.  "TS" literally means "task switched".
1117         *  - EFLAGS.NT is set because we're a "nested task".
1118         *  - The doublefault TSS has back_link set and has been marked busy.
1119         *  - TR points to the doublefault TSS and the normal TSS is busy.
1120         *  - CR3 is the normal kernel PGD.  This would be delightful, except
1121         *    that the CPU didn't bother to save the old CR3 anywhere.  This
1122         *    would make it very awkward to return back to the context we came
1123         *    from.
1124         *
1125         * The rest of EFLAGS is sanitized for us, so we don't need to
1126         * worry about AC or DF.
1127         *
1128         * Don't even bother popping the error code.  It's always zero,
1129         * and ignoring it makes us a bit more robust against buggy
1130         * hypervisor task gate implementations.
1131         *
1132         * We will manually undo the task switch instead of doing a
1133         * task-switching IRET.
1134         */
1135
1136        clts                            /* clear CR0.TS */
1137        pushl   $X86_EFLAGS_FIXED
1138        popfl                           /* clear EFLAGS.NT */
1139
1140        call    doublefault_shim
1141
1142        /* We don't support returning, so we have no IRET here. */
11431:
1144        hlt
1145        jmp 1b
1146SYM_CODE_END(asm_exc_double_fault)
1147
1148/*
1149 * NMI is doubly nasty.  It can happen on the first instruction of
1150 * entry_SYSENTER_32 (just like #DB), but it can also interrupt the beginning
1151 * of the #DB handler even if that #DB in turn hit before entry_SYSENTER_32
1152 * switched stacks.  We handle both conditions by simply checking whether we
1153 * interrupted kernel code running on the SYSENTER stack.
1154 */
1155SYM_CODE_START(asm_exc_nmi)
1156        ASM_CLAC
1157
1158#ifdef CONFIG_X86_ESPFIX32
1159        /*
1160         * ESPFIX_SS is only ever set on the return to user path
1161         * after we've switched to the entry stack.
1162         */
1163        pushl   %eax
1164        movl    %ss, %eax
1165        cmpw    $__ESPFIX_SS, %ax
1166        popl    %eax
1167        je      .Lnmi_espfix_stack
1168#endif
1169
1170        pushl   %eax                            # pt_regs->orig_ax
1171        SAVE_ALL_NMI cr3_reg=%edi
1172        ENCODE_FRAME_POINTER
1173        xorl    %edx, %edx                      # zero error code
1174        movl    %esp, %eax                      # pt_regs pointer
1175
1176        /* Are we currently on the SYSENTER stack? */
1177        movl    PER_CPU_VAR(cpu_entry_area), %ecx
1178        addl    $CPU_ENTRY_AREA_entry_stack + SIZEOF_entry_stack, %ecx
1179        subl    %eax, %ecx      /* ecx = (end of entry_stack) - esp */
1180        cmpl    $SIZEOF_entry_stack, %ecx
1181        jb      .Lnmi_from_sysenter_stack
1182
1183        /* Not on SYSENTER stack. */
1184        call    exc_nmi
1185        jmp     .Lnmi_return
1186
1187.Lnmi_from_sysenter_stack:
1188        /*
1189         * We're on the SYSENTER stack.  Switch off.  No one (not even debug)
1190         * is using the thread stack right now, so it's safe for us to use it.
1191         */
1192        movl    %esp, %ebx
1193        movl    PER_CPU_VAR(cpu_current_top_of_stack), %esp
1194        call    exc_nmi
1195        movl    %ebx, %esp
1196
1197.Lnmi_return:
1198#ifdef CONFIG_X86_ESPFIX32
1199        testl   $CS_FROM_ESPFIX, PT_CS(%esp)
1200        jnz     .Lnmi_from_espfix
1201#endif
1202
1203        CHECK_AND_APPLY_ESPFIX
1204        RESTORE_ALL_NMI cr3_reg=%edi pop=4
1205        jmp     .Lirq_return
1206
1207#ifdef CONFIG_X86_ESPFIX32
1208.Lnmi_espfix_stack:
1209        /*
1210         * Create the pointer to LSS back
1211         */
1212        pushl   %ss
1213        pushl   %esp
1214        addl    $4, (%esp)
1215
1216        /* Copy the (short) IRET frame */
1217        pushl   4*4(%esp)       # flags
1218        pushl   4*4(%esp)       # cs
1219        pushl   4*4(%esp)       # ip
1220
1221        pushl   %eax            # orig_ax
1222
1223        SAVE_ALL_NMI cr3_reg=%edi unwind_espfix=1
1224        ENCODE_FRAME_POINTER
1225
1226        /* clear CS_FROM_KERNEL, set CS_FROM_ESPFIX */
1227        xorl    $(CS_FROM_ESPFIX | CS_FROM_KERNEL), PT_CS(%esp)
1228
1229        xorl    %edx, %edx                      # zero error code
1230        movl    %esp, %eax                      # pt_regs pointer
1231        jmp     .Lnmi_from_sysenter_stack
1232
1233.Lnmi_from_espfix:
1234        RESTORE_ALL_NMI cr3_reg=%edi
1235        /*
1236         * Because we cleared CS_FROM_KERNEL, IRET_FRAME 'forgot' to
1237         * fix up the gap and long frame:
1238         *
1239         *  3 - original frame  (exception)
1240         *  2 - ESPFIX block    (above)
1241         *  6 - gap             (FIXUP_FRAME)
1242         *  5 - long frame      (FIXUP_FRAME)
1243         *  1 - orig_ax
1244         */
1245        lss     (1+5+6)*4(%esp), %esp                   # back to espfix stack
1246        jmp     .Lirq_return
1247#endif
1248SYM_CODE_END(asm_exc_nmi)
1249
1250.pushsection .text, "ax"
1251SYM_CODE_START(rewind_stack_do_exit)
1252        /* Prevent any naive code from trying to unwind to our caller. */
1253        xorl    %ebp, %ebp
1254
1255        movl    PER_CPU_VAR(cpu_current_top_of_stack), %esi
1256        leal    -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp
1257
1258        call    do_exit
12591:      jmp 1b
1260SYM_CODE_END(rewind_stack_do_exit)
1261.popsection
1262