linux/arch/arm/mach-omap2/cpuidle44xx.c
<<
>>
Prefs
   1/*
   2 * OMAP4 CPU idle Routines
   3 *
   4 * Copyright (C) 2011 Texas Instruments, Inc.
   5 * Santosh Shilimkar <santosh.shilimkar@ti.com>
   6 * Rajendra Nayak <rnayak@ti.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 */
  12
  13#include <linux/sched.h>
  14#include <linux/cpuidle.h>
  15#include <linux/cpu_pm.h>
  16#include <linux/export.h>
  17#include <linux/clockchips.h>
  18
  19#include <asm/proc-fns.h>
  20
  21#include "common.h"
  22#include "pm.h"
  23#include "prm.h"
  24#include "clockdomain.h"
  25
  26/* Machine specific information */
  27struct omap4_idle_statedata {
  28        u32 cpu_state;
  29        u32 mpu_logic_state;
  30        u32 mpu_state;
  31};
  32
  33static struct omap4_idle_statedata omap4_idle_data[] = {
  34        {
  35                .cpu_state = PWRDM_POWER_ON,
  36                .mpu_state = PWRDM_POWER_ON,
  37                .mpu_logic_state = PWRDM_POWER_RET,
  38        },
  39        {
  40                .cpu_state = PWRDM_POWER_OFF,
  41                .mpu_state = PWRDM_POWER_RET,
  42                .mpu_logic_state = PWRDM_POWER_RET,
  43        },
  44        {
  45                .cpu_state = PWRDM_POWER_OFF,
  46                .mpu_state = PWRDM_POWER_RET,
  47                .mpu_logic_state = PWRDM_POWER_OFF,
  48        },
  49};
  50
  51static struct powerdomain *mpu_pd, *cpu_pd[NR_CPUS];
  52static struct clockdomain *cpu_clkdm[NR_CPUS];
  53
  54static atomic_t abort_barrier;
  55static bool cpu_done[NR_CPUS];
  56
  57/* Private functions */
  58
  59/**
  60 * omap4_enter_idle_coupled_[simple/coupled] - OMAP4 cpuidle entry functions
  61 * @dev: cpuidle device
  62 * @drv: cpuidle driver
  63 * @index: the index of state to be entered
  64 *
  65 * Called from the CPUidle framework to program the device to the
  66 * specified low power state selected by the governor.
  67 * Returns the amount of time spent in the low power state.
  68 */
  69static int omap4_enter_idle_simple(struct cpuidle_device *dev,
  70                        struct cpuidle_driver *drv,
  71                        int index)
  72{
  73        local_fiq_disable();
  74        omap_do_wfi();
  75        local_fiq_enable();
  76
  77        return index;
  78}
  79
  80static int omap4_enter_idle_coupled(struct cpuidle_device *dev,
  81                        struct cpuidle_driver *drv,
  82                        int index)
  83{
  84        struct omap4_idle_statedata *cx = &omap4_idle_data[index];
  85        int cpu_id = smp_processor_id();
  86
  87        local_fiq_disable();
  88
  89        /*
  90         * CPU0 has to wait and stay ON until CPU1 is OFF state.
  91         * This is necessary to honour hardware recommondation
  92         * of triggeing all the possible low power modes once CPU1 is
  93         * out of coherency and in OFF mode.
  94         */
  95        if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
  96                while (pwrdm_read_pwrst(cpu_pd[1]) != PWRDM_POWER_OFF) {
  97                        cpu_relax();
  98
  99                        /*
 100                         * CPU1 could have already entered & exited idle
 101                         * without hitting off because of a wakeup
 102                         * or a failed attempt to hit off mode.  Check for
 103                         * that here, otherwise we could spin forever
 104                         * waiting for CPU1 off.
 105                         */
 106                        if (cpu_done[1])
 107                            goto fail;
 108
 109                }
 110        }
 111
 112        clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id);
 113
 114        /*
 115         * Call idle CPU PM enter notifier chain so that
 116         * VFP and per CPU interrupt context is saved.
 117         */
 118        cpu_pm_enter();
 119
 120        if (dev->cpu == 0) {
 121                pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
 122                omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
 123
 124                /*
 125                 * Call idle CPU cluster PM enter notifier chain
 126                 * to save GIC and wakeupgen context.
 127                 */
 128                if ((cx->mpu_state == PWRDM_POWER_RET) &&
 129                        (cx->mpu_logic_state == PWRDM_POWER_OFF))
 130                                cpu_cluster_pm_enter();
 131        }
 132
 133        omap4_enter_lowpower(dev->cpu, cx->cpu_state);
 134        cpu_done[dev->cpu] = true;
 135
 136        /* Wakeup CPU1 only if it is not offlined */
 137        if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
 138                clkdm_wakeup(cpu_clkdm[1]);
 139                clkdm_allow_idle(cpu_clkdm[1]);
 140        }
 141
 142        /*
 143         * Call idle CPU PM exit notifier chain to restore
 144         * VFP and per CPU IRQ context.
 145         */
 146        cpu_pm_exit();
 147
 148        /*
 149         * Call idle CPU cluster PM exit notifier chain
 150         * to restore GIC and wakeupgen context.
 151         */
 152        if (omap4_mpuss_read_prev_context_state())
 153                cpu_cluster_pm_exit();
 154
 155        clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id);
 156
 157fail:
 158        cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
 159        cpu_done[dev->cpu] = false;
 160
 161        local_fiq_enable();
 162
 163        return index;
 164}
 165
 166/*
 167 * For each cpu, setup the broadcast timer because local timers
 168 * stops for the states above C1.
 169 */
 170static void omap_setup_broadcast_timer(void *arg)
 171{
 172        int cpu = smp_processor_id();
 173        clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
 174}
 175
 176static DEFINE_PER_CPU(struct cpuidle_device, omap4_idle_dev);
 177
 178static struct cpuidle_driver omap4_idle_driver = {
 179        .name                           = "omap4_idle",
 180        .owner                          = THIS_MODULE,
 181        .en_core_tk_irqen               = 1,
 182        .states = {
 183                {
 184                        /* C1 - CPU0 ON + CPU1 ON + MPU ON */
 185                        .exit_latency = 2 + 2,
 186                        .target_residency = 5,
 187                        .flags = CPUIDLE_FLAG_TIME_VALID,
 188                        .enter = omap4_enter_idle_simple,
 189                        .name = "C1",
 190                        .desc = "MPUSS ON"
 191                },
 192                {
 193                        /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
 194                        .exit_latency = 328 + 440,
 195                        .target_residency = 960,
 196                        .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
 197                        .enter = omap4_enter_idle_coupled,
 198                        .name = "C2",
 199                        .desc = "MPUSS CSWR",
 200                },
 201                {
 202                        /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
 203                        .exit_latency = 460 + 518,
 204                        .target_residency = 1100,
 205                        .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED,
 206                        .enter = omap4_enter_idle_coupled,
 207                        .name = "C3",
 208                        .desc = "MPUSS OSWR",
 209                },
 210        },
 211        .state_count = ARRAY_SIZE(omap4_idle_data),
 212        .safe_state_index = 0,
 213};
 214
 215/* Public functions */
 216
 217/**
 218 * omap4_idle_init - Init routine for OMAP4 idle
 219 *
 220 * Registers the OMAP4 specific cpuidle driver to the cpuidle
 221 * framework with the valid set of states.
 222 */
 223int __init omap4_idle_init(void)
 224{
 225        struct cpuidle_device *dev;
 226        unsigned int cpu_id = 0;
 227
 228        mpu_pd = pwrdm_lookup("mpu_pwrdm");
 229        cpu_pd[0] = pwrdm_lookup("cpu0_pwrdm");
 230        cpu_pd[1] = pwrdm_lookup("cpu1_pwrdm");
 231        if ((!mpu_pd) || (!cpu_pd[0]) || (!cpu_pd[1]))
 232                return -ENODEV;
 233
 234        cpu_clkdm[0] = clkdm_lookup("mpu0_clkdm");
 235        cpu_clkdm[1] = clkdm_lookup("mpu1_clkdm");
 236        if (!cpu_clkdm[0] || !cpu_clkdm[1])
 237                return -ENODEV;
 238
 239        /* Configure the broadcast timer on each cpu */
 240        on_each_cpu(omap_setup_broadcast_timer, NULL, 1);
 241
 242        for_each_cpu(cpu_id, cpu_online_mask) {
 243                dev = &per_cpu(omap4_idle_dev, cpu_id);
 244                dev->cpu = cpu_id;
 245#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
 246                dev->coupled_cpus = *cpu_online_mask;
 247#endif
 248                cpuidle_register_driver(&omap4_idle_driver);
 249
 250                if (cpuidle_register_device(dev)) {
 251                        pr_err("%s: CPUidle register failed\n", __func__);
 252                        return -EIO;
 253                }
 254        }
 255
 256        return 0;
 257}
 258