linux/arch/x86/power/hibernate_asm_32.S
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * This may not use any stack, nor any variable that is not "NoSave":
   4 *
   5 * Its rewriting one kernel image with another. What is stack in "old"
   6 * image could very well be data page in "new" image, and overwriting
   7 * your own stack under you is bad idea.
   8 */
   9
  10#include <linux/linkage.h>
  11#include <asm/segment.h>
  12#include <asm/page_types.h>
  13#include <asm/asm-offsets.h>
  14#include <asm/processor-flags.h>
  15#include <asm/frame.h>
  16
  17.text
  18
  19SYM_FUNC_START(swsusp_arch_suspend)
  20        movl %esp, saved_context_esp
  21        movl %ebx, saved_context_ebx
  22        movl %ebp, saved_context_ebp
  23        movl %esi, saved_context_esi
  24        movl %edi, saved_context_edi
  25        pushfl
  26        popl saved_context_eflags
  27
  28        /* save cr3 */
  29        movl    %cr3, %eax
  30        movl    %eax, restore_cr3
  31
  32        FRAME_BEGIN
  33        call swsusp_save
  34        FRAME_END
  35        ret
  36SYM_FUNC_END(swsusp_arch_suspend)
  37
  38SYM_CODE_START(restore_image)
  39        /* prepare to jump to the image kernel */
  40        movl    restore_jump_address, %ebx
  41        movl    restore_cr3, %ebp
  42
  43        movl    mmu_cr4_features, %ecx
  44
  45        /* jump to relocated restore code */
  46        movl    relocated_restore_code, %eax
  47        jmpl    *%eax
  48SYM_CODE_END(restore_image)
  49
  50/* code below has been relocated to a safe page */
  51SYM_CODE_START(core_restore_code)
  52        movl    temp_pgt, %eax
  53        movl    %eax, %cr3
  54
  55        jecxz   1f      # cr4 Pentium and higher, skip if zero
  56        andl    $~(X86_CR4_PGE), %ecx
  57        movl    %ecx, %cr4;  # turn off PGE
  58        movl    %cr3, %eax;  # flush TLB
  59        movl    %eax, %cr3
  601:
  61        movl    restore_pblist, %edx
  62        .p2align 4,,7
  63
  64copy_loop:
  65        testl   %edx, %edx
  66        jz      done
  67
  68        movl    pbe_address(%edx), %esi
  69        movl    pbe_orig_address(%edx), %edi
  70
  71        movl    $(PAGE_SIZE >> 2), %ecx
  72        rep
  73        movsl
  74
  75        movl    pbe_next(%edx), %edx
  76        jmp     copy_loop
  77        .p2align 4,,7
  78
  79done:
  80        jmpl    *%ebx
  81SYM_CODE_END(core_restore_code)
  82
  83        /* code below belongs to the image kernel */
  84        .align PAGE_SIZE
  85SYM_FUNC_START(restore_registers)
  86        /* go back to the original page tables */
  87        movl    %ebp, %cr3
  88        movl    mmu_cr4_features, %ecx
  89        jecxz   1f      # cr4 Pentium and higher, skip if zero
  90        movl    %ecx, %cr4;  # turn PGE back on
  911:
  92
  93        movl saved_context_esp, %esp
  94        movl saved_context_ebp, %ebp
  95        movl saved_context_ebx, %ebx
  96        movl saved_context_esi, %esi
  97        movl saved_context_edi, %edi
  98
  99        pushl saved_context_eflags
 100        popfl
 101
 102        /* Saved in save_processor_state. */
 103        movl $saved_context, %eax
 104        lgdt saved_context_gdt_desc(%eax)
 105
 106        xorl    %eax, %eax
 107
 108        /* tell the hibernation core that we've just restored the memory */
 109        movl    %eax, in_suspend
 110
 111        ret
 112SYM_FUNC_END(restore_registers)
 113