linux/arch/arm/mach-omap2/cpuidle44xx.c
<<
>>
Prefs
   1/*
   2 * OMAP4+ CPU idle Routines
   3 *
   4 * Copyright (C) 2011-2013 Texas Instruments, Inc.
   5 * Santosh Shilimkar <santosh.shilimkar@ti.com>
   6 * Rajendra Nayak <rnayak@ti.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 */
  12
  13#include <linux/sched.h>
  14#include <linux/cpuidle.h>
  15#include <linux/cpu_pm.h>
  16#include <linux/export.h>
  17
  18#include <asm/cpuidle.h>
  19#include <asm/proc-fns.h>
  20
  21#include "common.h"
  22#include "pm.h"
  23#include "prm.h"
  24#include "clockdomain.h"
  25
  26/* Machine specific information */
  27struct idle_statedata {
  28        u32 cpu_state;
  29        u32 mpu_logic_state;
  30        u32 mpu_state;
  31};
  32
  33static struct idle_statedata omap4_idle_data[] = {
  34        {
  35                .cpu_state = PWRDM_POWER_ON,
  36                .mpu_state = PWRDM_POWER_ON,
  37                .mpu_logic_state = PWRDM_POWER_RET,
  38        },
  39        {
  40                .cpu_state = PWRDM_POWER_OFF,
  41                .mpu_state = PWRDM_POWER_RET,
  42                .mpu_logic_state = PWRDM_POWER_RET,
  43        },
  44        {
  45                .cpu_state = PWRDM_POWER_OFF,
  46                .mpu_state = PWRDM_POWER_RET,
  47                .mpu_logic_state = PWRDM_POWER_OFF,
  48        },
  49};
  50
  51static struct powerdomain *mpu_pd, *cpu_pd[NR_CPUS];
  52static struct clockdomain *cpu_clkdm[NR_CPUS];
  53
  54static atomic_t abort_barrier;
  55static bool cpu_done[NR_CPUS];
  56static struct idle_statedata *state_ptr = &omap4_idle_data[0];
  57
  58/* Private functions */
  59
  60/**
  61 * omap_enter_idle_[simple/coupled] - OMAP4PLUS cpuidle entry functions
  62 * @dev: cpuidle device
  63 * @drv: cpuidle driver
  64 * @index: the index of state to be entered
  65 *
  66 * Called from the CPUidle framework to program the device to the
  67 * specified low power state selected by the governor.
  68 * Returns the amount of time spent in the low power state.
  69 */
  70static int omap_enter_idle_simple(struct cpuidle_device *dev,
  71                        struct cpuidle_driver *drv,
  72                        int index)
  73{
  74        omap_do_wfi();
  75        return index;
  76}
  77
  78static int omap_enter_idle_coupled(struct cpuidle_device *dev,
  79                        struct cpuidle_driver *drv,
  80                        int index)
  81{
  82        struct idle_statedata *cx = state_ptr + index;
  83
  84        /*
  85         * CPU0 has to wait and stay ON until CPU1 is OFF state.
  86         * This is necessary to honour hardware recommondation
  87         * of triggeing all the possible low power modes once CPU1 is
  88         * out of coherency and in OFF mode.
  89         */
  90        if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
  91                while (pwrdm_read_pwrst(cpu_pd[1]) != PWRDM_POWER_OFF) {
  92                        cpu_relax();
  93
  94                        /*
  95                         * CPU1 could have already entered & exited idle
  96                         * without hitting off because of a wakeup
  97                         * or a failed attempt to hit off mode.  Check for
  98                         * that here, otherwise we could spin forever
  99                         * waiting for CPU1 off.
 100                         */
 101                        if (cpu_done[1])
 102                            goto fail;
 103
 104                }
 105        }
 106
 107        /*
 108         * Call idle CPU PM enter notifier chain so that
 109         * VFP and per CPU interrupt context is saved.
 110         */
 111        cpu_pm_enter();
 112
 113        if (dev->cpu == 0) {
 114                pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
 115                omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
 116
 117                /*
 118                 * Call idle CPU cluster PM enter notifier chain
 119                 * to save GIC and wakeupgen context.
 120                 */
 121                if ((cx->mpu_state == PWRDM_POWER_RET) &&
 122                        (cx->mpu_logic_state == PWRDM_POWER_OFF))
 123                                cpu_cluster_pm_enter();
 124        }
 125
 126        omap4_enter_lowpower(dev->cpu, cx->cpu_state);
 127        cpu_done[dev->cpu] = true;
 128
 129        /* Wakeup CPU1 only if it is not offlined */
 130        if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
 131                clkdm_wakeup(cpu_clkdm[1]);
 132                omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON);
 133                clkdm_allow_idle(cpu_clkdm[1]);
 134        }
 135
 136        /*
 137         * Call idle CPU PM exit notifier chain to restore
 138         * VFP and per CPU IRQ context.
 139         */
 140        cpu_pm_exit();
 141
 142        /*
 143         * Call idle CPU cluster PM exit notifier chain
 144         * to restore GIC and wakeupgen context.
 145         */
 146        if ((cx->mpu_state == PWRDM_POWER_RET) &&
 147                (cx->mpu_logic_state == PWRDM_POWER_OFF))
 148                cpu_cluster_pm_exit();
 149
 150fail:
 151        cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
 152        cpu_done[dev->cpu] = false;
 153
 154        return index;
 155}
 156
 157static struct cpuidle_driver omap4_idle_driver = {
 158        .name                           = "omap4_idle",
 159        .owner                          = THIS_MODULE,
 160        .states = {
 161                {
 162                        /* C1 - CPU0 ON + CPU1 ON + MPU ON */
 163                        .exit_latency = 2 + 2,
 164                        .target_residency = 5,
 165                        .flags = CPUIDLE_FLAG_TIME_VALID,
 166                        .enter = omap_enter_idle_simple,
 167                        .name = "C1",
 168                        .desc = "CPUx ON, MPUSS ON"
 169                },
 170                {
 171                        /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
 172                        .exit_latency = 328 + 440,
 173                        .target_residency = 960,
 174                        .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED |
 175                                 CPUIDLE_FLAG_TIMER_STOP,
 176                        .enter = omap_enter_idle_coupled,
 177                        .name = "C2",
 178                        .desc = "CPUx OFF, MPUSS CSWR",
 179                },
 180                {
 181                        /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
 182                        .exit_latency = 460 + 518,
 183                        .target_residency = 1100,
 184                        .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED |
 185                                 CPUIDLE_FLAG_TIMER_STOP,
 186                        .enter = omap_enter_idle_coupled,
 187                        .name = "C3",
 188                        .desc = "CPUx OFF, MPUSS OSWR",
 189                },
 190        },
 191        .state_count = ARRAY_SIZE(omap4_idle_data),
 192        .safe_state_index = 0,
 193};
 194
 195/* Public functions */
 196
 197/**
 198 * omap4_idle_init - Init routine for OMAP4+ idle
 199 *
 200 * Registers the OMAP4+ specific cpuidle driver to the cpuidle
 201 * framework with the valid set of states.
 202 */
 203int __init omap4_idle_init(void)
 204{
 205        mpu_pd = pwrdm_lookup("mpu_pwrdm");
 206        cpu_pd[0] = pwrdm_lookup("cpu0_pwrdm");
 207        cpu_pd[1] = pwrdm_lookup("cpu1_pwrdm");
 208        if ((!mpu_pd) || (!cpu_pd[0]) || (!cpu_pd[1]))
 209                return -ENODEV;
 210
 211        cpu_clkdm[0] = clkdm_lookup("mpu0_clkdm");
 212        cpu_clkdm[1] = clkdm_lookup("mpu1_clkdm");
 213        if (!cpu_clkdm[0] || !cpu_clkdm[1])
 214                return -ENODEV;
 215
 216        return cpuidle_register(&omap4_idle_driver, cpu_online_mask);
 217}
 218