linux/arch/arm64/kernel/suspend.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <linux/ftrace.h>
   3#include <linux/percpu.h>
   4#include <linux/slab.h>
   5#include <linux/uaccess.h>
   6#include <linux/pgtable.h>
   7#include <asm/alternative.h>
   8#include <asm/cacheflush.h>
   9#include <asm/cpufeature.h>
  10#include <asm/cpuidle.h>
  11#include <asm/daifflags.h>
  12#include <asm/debug-monitors.h>
  13#include <asm/exec.h>
  14#include <asm/mte.h>
  15#include <asm/memory.h>
  16#include <asm/mmu_context.h>
  17#include <asm/smp_plat.h>
  18#include <asm/suspend.h>
  19
  20/*
  21 * This is allocated by cpu_suspend_init(), and used to store a pointer to
  22 * the 'struct sleep_stack_data' the contains a particular CPUs state.
  23 */
  24unsigned long *sleep_save_stash;
  25
  26/*
  27 * This hook is provided so that cpu_suspend code can restore HW
  28 * breakpoints as early as possible in the resume path, before reenabling
  29 * debug exceptions. Code cannot be run from a CPU PM notifier since by the
  30 * time the notifier runs debug exceptions might have been enabled already,
  31 * with HW breakpoints registers content still in an unknown state.
  32 */
  33static int (*hw_breakpoint_restore)(unsigned int);
  34void __init cpu_suspend_set_dbg_restorer(int (*hw_bp_restore)(unsigned int))
  35{
  36        /* Prevent multiple restore hook initializations */
  37        if (WARN_ON(hw_breakpoint_restore))
  38                return;
  39        hw_breakpoint_restore = hw_bp_restore;
  40}
  41
  42void notrace __cpu_suspend_exit(void)
  43{
  44        unsigned int cpu = smp_processor_id();
  45
  46        /*
  47         * We are resuming from reset with the idmap active in TTBR0_EL1.
  48         * We must uninstall the idmap and restore the expected MMU
  49         * state before we can possibly return to userspace.
  50         */
  51        cpu_uninstall_idmap();
  52
  53        /* Restore CnP bit in TTBR1_EL1 */
  54        if (system_supports_cnp())
  55                cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
  56
  57        /*
  58         * PSTATE was not saved over suspend/resume, re-enable any detected
  59         * features that might not have been set correctly.
  60         */
  61        __uaccess_enable_hw_pan();
  62
  63        /*
  64         * Restore HW breakpoint registers to sane values
  65         * before debug exceptions are possibly reenabled
  66         * by cpu_suspend()s local_daif_restore() call.
  67         */
  68        if (hw_breakpoint_restore)
  69                hw_breakpoint_restore(cpu);
  70
  71        /*
  72         * On resume, firmware implementing dynamic mitigation will
  73         * have turned the mitigation on. If the user has forcefully
  74         * disabled it, make sure their wishes are obeyed.
  75         */
  76        spectre_v4_enable_mitigation(NULL);
  77
  78        /* Restore additional feature-specific configuration */
  79        ptrauth_suspend_exit();
  80}
  81
  82/*
  83 * cpu_suspend
  84 *
  85 * arg: argument to pass to the finisher function
  86 * fn: finisher function pointer
  87 *
  88 */
  89int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
  90{
  91        int ret = 0;
  92        unsigned long flags;
  93        struct sleep_stack_data state;
  94        struct arm_cpuidle_irq_context context;
  95
  96        /* Report any MTE async fault before going to suspend */
  97        mte_suspend_enter();
  98
  99        /*
 100         * From this point debug exceptions are disabled to prevent
 101         * updates to mdscr register (saved and restored along with
 102         * general purpose registers) from kernel debuggers.
 103         */
 104        flags = local_daif_save();
 105
 106        /*
 107         * Function graph tracer state gets inconsistent when the kernel
 108         * calls functions that never return (aka suspend finishers) hence
 109         * disable graph tracing during their execution.
 110         */
 111        pause_graph_tracing();
 112
 113        /*
 114         * Switch to using DAIF.IF instead of PMR in order to reliably
 115         * resume if we're using pseudo-NMIs.
 116         */
 117        arm_cpuidle_save_irq_context(&context);
 118
 119        if (__cpu_suspend_enter(&state)) {
 120                /* Call the suspend finisher */
 121                ret = fn(arg);
 122
 123                /*
 124                 * Never gets here, unless the suspend finisher fails.
 125                 * Successful cpu_suspend() should return from cpu_resume(),
 126                 * returning through this code path is considered an error
 127                 * If the return value is set to 0 force ret = -EOPNOTSUPP
 128                 * to make sure a proper error condition is propagated
 129                 */
 130                if (!ret)
 131                        ret = -EOPNOTSUPP;
 132        } else {
 133                RCU_NONIDLE(__cpu_suspend_exit());
 134        }
 135
 136        arm_cpuidle_restore_irq_context(&context);
 137
 138        unpause_graph_tracing();
 139
 140        /*
 141         * Restore pstate flags. OS lock and mdscr have been already
 142         * restored, so from this point onwards, debugging is fully
 143         * renabled if it was enabled when core started shutdown.
 144         */
 145        local_daif_restore(flags);
 146
 147        return ret;
 148}
 149
 150static int __init cpu_suspend_init(void)
 151{
 152        /* ctx_ptr is an array of physical addresses */
 153        sleep_save_stash = kcalloc(mpidr_hash_size(), sizeof(*sleep_save_stash),
 154                                   GFP_KERNEL);
 155
 156        if (WARN_ON(!sleep_save_stash))
 157                return -ENOMEM;
 158
 159        return 0;
 160}
 161early_initcall(cpu_suspend_init);
 162