linux/arch/x86/power/hibernate_64.c
<<
>>
Prefs
   1/*
   2 * Hibernation support for x86-64
   3 *
   4 * Distribute under GPLv2
   5 *
   6 * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
   7 * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
   8 * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
   9 */
  10
  11#include <linux/gfp.h>
  12#include <linux/smp.h>
  13#include <linux/suspend.h>
  14
  15#include <asm/init.h>
  16#include <asm/proto.h>
  17#include <asm/page.h>
  18#include <asm/pgtable.h>
  19#include <asm/mtrr.h>
  20#include <asm/suspend.h>
  21#include <asm/tlbflush.h>
  22
  23/* References to section boundaries */
  24extern const void __nosave_begin, __nosave_end;
  25
  26/* Defined in hibernate_asm_64.S */
  27extern int restore_image(void);
  28
  29/*
  30 * Address to jump to in the last phase of restore in order to get to the image
  31 * kernel's text (this value is passed in the image header).
  32 */
  33unsigned long restore_jump_address;
  34unsigned long jump_address_phys;
  35
  36/*
  37 * Value of the cr3 register from before the hibernation (this value is passed
  38 * in the image header).
  39 */
  40unsigned long restore_cr3;
  41
  42unsigned long temp_level4_pgt;
  43
  44unsigned long relocated_restore_code;
  45
  46static int set_up_temporary_text_mapping(pgd_t *pgd)
  47{
  48        pmd_t *pmd;
  49        pud_t *pud;
  50
  51        /*
  52         * The new mapping only has to cover the page containing the image
  53         * kernel's entry point (jump_address_phys), because the switch over to
  54         * it is carried out by relocated code running from a page allocated
  55         * specifically for this purpose and covered by the identity mapping, so
  56         * the temporary kernel text mapping is only needed for the final jump.
  57         * Moreover, in that mapping the virtual address of the image kernel's
  58         * entry point must be the same as its virtual address in the image
  59         * kernel (restore_jump_address), so the image kernel's
  60         * restore_registers() code doesn't find itself in a different area of
  61         * the virtual address space after switching over to the original page
  62         * tables used by the image kernel.
  63         */
  64        pud = (pud_t *)get_safe_page(GFP_ATOMIC);
  65        if (!pud)
  66                return -ENOMEM;
  67
  68        pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
  69        if (!pmd)
  70                return -ENOMEM;
  71
  72        set_pmd(pmd + pmd_index(restore_jump_address),
  73                __pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC));
  74        set_pud(pud + pud_index(restore_jump_address),
  75                __pud(__pa(pmd) | _KERNPG_TABLE));
  76        set_pgd(pgd + pgd_index(restore_jump_address),
  77                __pgd(__pa(pud) | _KERNPG_TABLE));
  78
  79        return 0;
  80}
  81
  82static void *alloc_pgt_page(void *context)
  83{
  84        return (void *)get_safe_page(GFP_ATOMIC);
  85}
  86
  87static int set_up_temporary_mappings(void)
  88{
  89        struct x86_mapping_info info = {
  90                .alloc_pgt_page = alloc_pgt_page,
  91                .pmd_flag       = __PAGE_KERNEL_LARGE_EXEC,
  92                .offset         = __PAGE_OFFSET,
  93        };
  94        unsigned long mstart, mend;
  95        pgd_t *pgd;
  96        int result;
  97        int i;
  98
  99        pgd = (pgd_t *)get_safe_page(GFP_ATOMIC);
 100        if (!pgd)
 101                return -ENOMEM;
 102
 103        /* Prepare a temporary mapping for the kernel text */
 104        result = set_up_temporary_text_mapping(pgd);
 105        if (result)
 106                return result;
 107
 108        /* Set up the direct mapping from scratch */
 109        for (i = 0; i < nr_pfn_mapped; i++) {
 110                mstart = pfn_mapped[i].start << PAGE_SHIFT;
 111                mend   = pfn_mapped[i].end << PAGE_SHIFT;
 112
 113                result = kernel_ident_mapping_init(&info, pgd, mstart, mend);
 114                if (result)
 115                        return result;
 116        }
 117
 118        temp_level4_pgt = __pa(pgd);
 119        return 0;
 120}
 121
 122static int relocate_restore_code(void)
 123{
 124        pgd_t *pgd;
 125        pud_t *pud;
 126
 127        relocated_restore_code = get_safe_page(GFP_ATOMIC);
 128        if (!relocated_restore_code)
 129                return -ENOMEM;
 130
 131        memcpy((void *)relocated_restore_code, &core_restore_code, PAGE_SIZE);
 132
 133        /* Make the page containing the relocated code executable */
 134        pgd = (pgd_t *)__va(read_cr3()) + pgd_index(relocated_restore_code);
 135        pud = pud_offset(pgd, relocated_restore_code);
 136        if (pud_large(*pud)) {
 137                set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
 138        } else {
 139                pmd_t *pmd = pmd_offset(pud, relocated_restore_code);
 140
 141                if (pmd_large(*pmd)) {
 142                        set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
 143                } else {
 144                        pte_t *pte = pte_offset_kernel(pmd, relocated_restore_code);
 145
 146                        set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX));
 147                }
 148        }
 149        __flush_tlb_all();
 150
 151        return 0;
 152}
 153
 154int swsusp_arch_resume(void)
 155{
 156        int error;
 157
 158        /* We have got enough memory and from now on we cannot recover */
 159        error = set_up_temporary_mappings();
 160        if (error)
 161                return error;
 162
 163        error = relocate_restore_code();
 164        if (error)
 165                return error;
 166
 167        restore_image();
 168        return 0;
 169}
 170
 171/*
 172 *      pfn_is_nosave - check if given pfn is in the 'nosave' section
 173 */
 174
 175int pfn_is_nosave(unsigned long pfn)
 176{
 177        unsigned long nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT;
 178        unsigned long nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
 179        return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn);
 180}
 181
 182struct restore_data_record {
 183        unsigned long jump_address;
 184        unsigned long jump_address_phys;
 185        unsigned long cr3;
 186        unsigned long magic;
 187};
 188
 189#define RESTORE_MAGIC   0x123456789ABCDEF0UL
 190
 191/**
 192 *      arch_hibernation_header_save - populate the architecture specific part
 193 *              of a hibernation image header
 194 *      @addr: address to save the data at
 195 */
 196int arch_hibernation_header_save(void *addr, unsigned int max_size)
 197{
 198        struct restore_data_record *rdr = addr;
 199
 200        if (max_size < sizeof(struct restore_data_record))
 201                return -EOVERFLOW;
 202        rdr->jump_address = (unsigned long)&restore_registers;
 203        rdr->jump_address_phys = __pa_symbol(&restore_registers);
 204        rdr->cr3 = restore_cr3;
 205        rdr->magic = RESTORE_MAGIC;
 206        return 0;
 207}
 208
 209/**
 210 *      arch_hibernation_header_restore - read the architecture specific data
 211 *              from the hibernation image header
 212 *      @addr: address to read the data from
 213 */
 214int arch_hibernation_header_restore(void *addr)
 215{
 216        struct restore_data_record *rdr = addr;
 217
 218        restore_jump_address = rdr->jump_address;
 219        jump_address_phys = rdr->jump_address_phys;
 220        restore_cr3 = rdr->cr3;
 221        return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL;
 222}
 223