linux/kernel/cpu/idle.c
<<
>>
Prefs
   1/*
   2 * Generic entry point for the idle threads
   3 */
   4#include <linux/sched.h>
   5#include <linux/cpu.h>
   6#include <linux/tick.h>
   7#include <linux/mm.h>
   8#include <linux/stackprotector.h>
   9
  10#include <asm/tlb.h>
  11
  12#include <trace/events/power.h>
  13
  14static int __read_mostly cpu_idle_force_poll;
  15
  16void cpu_idle_poll_ctrl(bool enable)
  17{
  18        if (enable) {
  19                cpu_idle_force_poll++;
  20        } else {
  21                cpu_idle_force_poll--;
  22                WARN_ON_ONCE(cpu_idle_force_poll < 0);
  23        }
  24}
  25
  26#ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
  27static int __init cpu_idle_poll_setup(char *__unused)
  28{
  29        cpu_idle_force_poll = 1;
  30        return 1;
  31}
  32__setup("nohlt", cpu_idle_poll_setup);
  33
  34static int __init cpu_idle_nopoll_setup(char *__unused)
  35{
  36        cpu_idle_force_poll = 0;
  37        return 1;
  38}
  39__setup("hlt", cpu_idle_nopoll_setup);
  40#endif
  41
  42static inline int cpu_idle_poll(void)
  43{
  44        rcu_idle_enter();
  45        trace_cpu_idle_rcuidle(0, smp_processor_id());
  46        local_irq_enable();
  47        while (!need_resched())
  48                cpu_relax();
  49        trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
  50        rcu_idle_exit();
  51        return 1;
  52}
  53
  54/* Weak implementations for optional arch specific functions */
  55void __weak arch_cpu_idle_prepare(void) { }
  56void __weak arch_cpu_idle_enter(void) { }
  57void __weak arch_cpu_idle_exit(void) { }
  58void __weak arch_cpu_idle_dead(void) { }
  59void __weak arch_cpu_idle(void)
  60{
  61        cpu_idle_force_poll = 1;
  62        local_irq_enable();
  63}
  64
  65/*
  66 * Generic idle loop implementation
  67 */
  68static void cpu_idle_loop(void)
  69{
  70        while (1) {
  71                tick_nohz_idle_enter();
  72
  73                while (!need_resched()) {
  74                        check_pgt_cache();
  75                        rmb();
  76
  77                        if (cpu_is_offline(smp_processor_id()))
  78                                arch_cpu_idle_dead();
  79
  80                        local_irq_disable();
  81                        arch_cpu_idle_enter();
  82
  83                        /*
  84                         * In poll mode we reenable interrupts and spin.
  85                         *
  86                         * Also if we detected in the wakeup from idle
  87                         * path that the tick broadcast device expired
  88                         * for us, we don't want to go deep idle as we
  89                         * know that the IPI is going to arrive right
  90                         * away
  91                         */
  92                        if (cpu_idle_force_poll || tick_check_broadcast_expired()) {
  93                                cpu_idle_poll();
  94                        } else {
  95                                current_clr_polling();
  96                                if (!need_resched()) {
  97                                        stop_critical_timings();
  98                                        rcu_idle_enter();
  99                                        arch_cpu_idle();
 100                                        WARN_ON_ONCE(irqs_disabled());
 101                                        rcu_idle_exit();
 102                                        start_critical_timings();
 103                                } else {
 104                                        local_irq_enable();
 105                                }
 106                                current_set_polling();
 107                        }
 108                        arch_cpu_idle_exit();
 109                }
 110                tick_nohz_idle_exit();
 111                schedule_preempt_disabled();
 112        }
 113}
 114
 115void cpu_startup_entry(enum cpuhp_state state)
 116{
 117        /*
 118         * This #ifdef needs to die, but it's too late in the cycle to
 119         * make this generic (arm and sh have never invoked the canary
 120         * init for the non boot cpus!). Will be fixed in 3.11
 121         */
 122#ifdef CONFIG_X86
 123        /*
 124         * If we're the non-boot CPU, nothing set the stack canary up
 125         * for us. The boot CPU already has it initialized but no harm
 126         * in doing it again. This is a good place for updating it, as
 127         * we wont ever return from this function (so the invalid
 128         * canaries already on the stack wont ever trigger).
 129         */
 130        boot_init_stack_canary();
 131#endif
 132        current_set_polling();
 133        arch_cpu_idle_prepare();
 134        cpu_idle_loop();
 135}
 136