linux/arch/sh/kernel/idle.c
<<
>>
Prefs
   1/*
   2 * The idle loop for all SuperH platforms.
   3 *
   4 *  Copyright (C) 2002 - 2009  Paul Mundt
   5 *
   6 * This file is subject to the terms and conditions of the GNU General Public
   7 * License.  See the file "COPYING" in the main directory of this archive
   8 * for more details.
   9 */
  10#include <linux/module.h>
  11#include <linux/init.h>
  12#include <linux/mm.h>
  13#include <linux/pm.h>
  14#include <linux/tick.h>
  15#include <linux/preempt.h>
  16#include <linux/thread_info.h>
  17#include <linux/irqflags.h>
  18#include <linux/smp.h>
  19#include <linux/cpuidle.h>
  20#include <asm/pgalloc.h>
  21#include <asm/system.h>
  22#include <linux/atomic.h>
  23#include <asm/smp.h>
  24
  25void (*pm_idle)(void);
  26
  27static int hlt_counter;
  28
  29static int __init nohlt_setup(char *__unused)
  30{
  31        hlt_counter = 1;
  32        return 1;
  33}
  34__setup("nohlt", nohlt_setup);
  35
  36static int __init hlt_setup(char *__unused)
  37{
  38        hlt_counter = 0;
  39        return 1;
  40}
  41__setup("hlt", hlt_setup);
  42
  43static inline int hlt_works(void)
  44{
  45        return !hlt_counter;
  46}
  47
  48/*
  49 * On SMP it's slightly faster (but much more power-consuming!)
  50 * to poll the ->work.need_resched flag instead of waiting for the
  51 * cross-CPU IPI to arrive. Use this option with caution.
  52 */
  53static void poll_idle(void)
  54{
  55        local_irq_enable();
  56        while (!need_resched())
  57                cpu_relax();
  58}
  59
  60void default_idle(void)
  61{
  62        if (hlt_works()) {
  63                clear_thread_flag(TIF_POLLING_NRFLAG);
  64                smp_mb__after_clear_bit();
  65
  66                set_bl_bit();
  67                if (!need_resched()) {
  68                        local_irq_enable();
  69                        cpu_sleep();
  70                } else
  71                        local_irq_enable();
  72
  73                set_thread_flag(TIF_POLLING_NRFLAG);
  74                clear_bl_bit();
  75        } else
  76                poll_idle();
  77}
  78
  79/*
  80 * The idle thread. There's no useful work to be done, so just try to conserve
  81 * power and have a low exit latency (ie sit in a loop waiting for somebody to
  82 * say that they'd like to reschedule)
  83 */
  84void cpu_idle(void)
  85{
  86        unsigned int cpu = smp_processor_id();
  87
  88        set_thread_flag(TIF_POLLING_NRFLAG);
  89
  90        /* endless idle loop with no priority at all */
  91        while (1) {
  92                tick_nohz_stop_sched_tick(1);
  93
  94                while (!need_resched()) {
  95                        check_pgt_cache();
  96                        rmb();
  97
  98                        if (cpu_is_offline(cpu))
  99                                play_dead();
 100
 101                        local_irq_disable();
 102                        /* Don't trace irqs off for idle */
 103                        stop_critical_timings();
 104                        if (cpuidle_idle_call())
 105                                pm_idle();
 106                        /*
 107                         * Sanity check to ensure that pm_idle() returns
 108                         * with IRQs enabled
 109                         */
 110                        WARN_ON(irqs_disabled());
 111                        start_critical_timings();
 112                }
 113
 114                tick_nohz_restart_sched_tick();
 115                preempt_enable_no_resched();
 116                schedule();
 117                preempt_disable();
 118        }
 119}
 120
 121void __init select_idle_routine(void)
 122{
 123        /*
 124         * If a platform has set its own idle routine, leave it alone.
 125         */
 126        if (pm_idle)
 127                return;
 128
 129        if (hlt_works())
 130                pm_idle = default_idle;
 131        else
 132                pm_idle = poll_idle;
 133}
 134
 135static void do_nothing(void *unused)
 136{
 137}
 138
 139void stop_this_cpu(void *unused)
 140{
 141        local_irq_disable();
 142        set_cpu_online(smp_processor_id(), false);
 143
 144        for (;;)
 145                cpu_sleep();
 146}
 147
 148/*
 149 * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
 150 * pm_idle and update to new pm_idle value. Required while changing pm_idle
 151 * handler on SMP systems.
 152 *
 153 * Caller must have changed pm_idle to the new value before the call. Old
 154 * pm_idle value will not be used by any CPU after the return of this function.
 155 */
 156void cpu_idle_wait(void)
 157{
 158        smp_mb();
 159        /* kick all the CPUs so that they exit out of pm_idle */
 160        smp_call_function(do_nothing, NULL, 1);
 161}
 162EXPORT_SYMBOL_GPL(cpu_idle_wait);
 163