linux/arch/arm/kernel/suspend.c
<<
>>
Prefs
   1#include <linux/init.h>
   2
   3#include <asm/idmap.h>
   4#include <asm/pgalloc.h>
   5#include <asm/pgtable.h>
   6#include <asm/memory.h>
   7#include <asm/suspend.h>
   8#include <asm/tlbflush.h>
   9
  10extern int __cpu_suspend(unsigned long, int (*)(unsigned long));
  11extern void cpu_resume_mmu(void);
  12
  13/*
  14 * This is called by __cpu_suspend() to save the state, and do whatever
  15 * flushing is required to ensure that when the CPU goes to sleep we have
  16 * the necessary data available when the caches are not searched.
  17 */
  18void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr)
  19{
  20        u32 *ctx = ptr;
  21
  22        *save_ptr = virt_to_phys(ptr);
  23
  24        /* This must correspond to the LDM in cpu_resume() assembly */
  25        *ptr++ = virt_to_phys(idmap_pgd);
  26        *ptr++ = sp;
  27        *ptr++ = virt_to_phys(cpu_do_resume);
  28
  29        cpu_do_suspend(ptr);
  30
  31        flush_cache_louis();
  32
  33        /*
  34         * flush_cache_louis does not guarantee that
  35         * save_ptr and ptr are cleaned to main memory,
  36         * just up to the Level of Unification Inner Shareable.
  37         * Since the context pointer and context itself
  38         * are to be retrieved with the MMU off that
  39         * data must be cleaned from all cache levels
  40         * to main memory using "area" cache primitives.
  41        */
  42        __cpuc_flush_dcache_area(ctx, ptrsz);
  43        __cpuc_flush_dcache_area(save_ptr, sizeof(*save_ptr));
  44
  45        outer_clean_range(*save_ptr, *save_ptr + ptrsz);
  46        outer_clean_range(virt_to_phys(save_ptr),
  47                          virt_to_phys(save_ptr) + sizeof(*save_ptr));
  48}
  49
  50/*
  51 * Hide the first two arguments to __cpu_suspend - these are an implementation
  52 * detail which platform code shouldn't have to know about.
  53 */
  54int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
  55{
  56        struct mm_struct *mm = current->active_mm;
  57        int ret;
  58
  59        if (!idmap_pgd)
  60                return -EINVAL;
  61
  62        /*
  63         * Provide a temporary page table with an identity mapping for
  64         * the MMU-enable code, required for resuming.  On successful
  65         * resume (indicated by a zero return code), we need to switch
  66         * back to the correct page tables.
  67         */
  68        ret = __cpu_suspend(arg, fn);
  69        if (ret == 0) {
  70                cpu_switch_mm(mm->pgd, mm);
  71                local_flush_bp_all();
  72                local_flush_tlb_all();
  73        }
  74
  75        return ret;
  76}
  77