linux/arch/unicore32/kernel/hibernate.c
<<
>>
Prefs
   1/*
   2 *  linux/arch/unicore32/kernel/hibernate.c
   3 *
   4 * Code specific to PKUnity SoC and UniCore ISA
   5 *
   6 *      Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
   7 *      Copyright (C) 2001-2010 Guan Xuetao
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as
  11 * published by the Free Software Foundation.
  12 */
  13
  14#include <linux/gfp.h>
  15#include <linux/suspend.h>
  16#include <linux/bootmem.h>
  17
  18#include <asm/page.h>
  19#include <asm/pgtable.h>
  20#include <asm/pgalloc.h>
  21#include <asm/sections.h>
  22#include <asm/suspend.h>
  23
  24#include "mach/pm.h"
  25
  26/* Pointer to the temporary resume page tables */
  27pgd_t *resume_pg_dir;
  28
  29struct swsusp_arch_regs swsusp_arch_regs_cpu0;
  30
  31/*
  32 * Create a middle page table on a resume-safe page and put a pointer to it in
  33 * the given global directory entry.  This only returns the gd entry
  34 * in non-PAE compilation mode, since the middle layer is folded.
  35 */
  36static pmd_t *resume_one_md_table_init(pgd_t *pgd)
  37{
  38        pud_t *pud;
  39        pmd_t *pmd_table;
  40
  41        pud = pud_offset(pgd, 0);
  42        pmd_table = pmd_offset(pud, 0);
  43
  44        return pmd_table;
  45}
  46
  47/*
  48 * Create a page table on a resume-safe page and place a pointer to it in
  49 * a middle page directory entry.
  50 */
  51static pte_t *resume_one_page_table_init(pmd_t *pmd)
  52{
  53        if (pmd_none(*pmd)) {
  54                pte_t *page_table = (pte_t *)get_safe_page(GFP_ATOMIC);
  55                if (!page_table)
  56                        return NULL;
  57
  58                set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_KERNEL_TABLE));
  59
  60                BUG_ON(page_table != pte_offset_kernel(pmd, 0));
  61
  62                return page_table;
  63        }
  64
  65        return pte_offset_kernel(pmd, 0);
  66}
  67
  68/*
  69 * This maps the physical memory to kernel virtual address space, a total
  70 * of max_low_pfn pages, by creating page tables starting from address
  71 * PAGE_OFFSET.  The page tables are allocated out of resume-safe pages.
  72 */
  73static int resume_physical_mapping_init(pgd_t *pgd_base)
  74{
  75        unsigned long pfn;
  76        pgd_t *pgd;
  77        pmd_t *pmd;
  78        pte_t *pte;
  79        int pgd_idx, pmd_idx;
  80
  81        pgd_idx = pgd_index(PAGE_OFFSET);
  82        pgd = pgd_base + pgd_idx;
  83        pfn = 0;
  84
  85        for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
  86                pmd = resume_one_md_table_init(pgd);
  87                if (!pmd)
  88                        return -ENOMEM;
  89
  90                if (pfn >= max_low_pfn)
  91                        continue;
  92
  93                for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) {
  94                        pte_t *max_pte;
  95
  96                        if (pfn >= max_low_pfn)
  97                                break;
  98
  99                        /* Map with normal page tables.
 100                         * NOTE: We can mark everything as executable here
 101                         */
 102                        pte = resume_one_page_table_init(pmd);
 103                        if (!pte)
 104                                return -ENOMEM;
 105
 106                        max_pte = pte + PTRS_PER_PTE;
 107                        for (; pte < max_pte; pte++, pfn++) {
 108                                if (pfn >= max_low_pfn)
 109                                        break;
 110
 111                                set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
 112                        }
 113                }
 114        }
 115
 116        return 0;
 117}
 118
 119static inline void resume_init_first_level_page_table(pgd_t *pg_dir)
 120{
 121}
 122
 123int swsusp_arch_resume(void)
 124{
 125        int error;
 126
 127        resume_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
 128        if (!resume_pg_dir)
 129                return -ENOMEM;
 130
 131        resume_init_first_level_page_table(resume_pg_dir);
 132        error = resume_physical_mapping_init(resume_pg_dir);
 133        if (error)
 134                return error;
 135
 136        /* We have got enough memory and from now on we cannot recover */
 137        restore_image(resume_pg_dir, restore_pblist);
 138        return 0;
 139}
 140
 141/*
 142 *      pfn_is_nosave - check if given pfn is in the 'nosave' section
 143 */
 144
 145int pfn_is_nosave(unsigned long pfn)
 146{
 147        unsigned long begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT;
 148        unsigned long end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) >> PAGE_SHIFT;
 149
 150        return (pfn >= begin_pfn) && (pfn < end_pfn);
 151}
 152
 153void save_processor_state(void)
 154{
 155}
 156
 157void restore_processor_state(void)
 158{
 159        local_flush_tlb_all();
 160}
 161