linux/drivers/cpufreq/cpufreq-dt.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2012 Freescale Semiconductor, Inc.
   3 *
   4 * Copyright (C) 2014 Linaro.
   5 * Viresh Kumar <viresh.kumar@linaro.org>
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 */
  11
  12#define pr_fmt(fmt)     KBUILD_MODNAME ": " fmt
  13
  14#include <linux/clk.h>
  15#include <linux/cpu.h>
  16#include <linux/cpu_cooling.h>
  17#include <linux/cpufreq.h>
  18#include <linux/cpumask.h>
  19#include <linux/err.h>
  20#include <linux/module.h>
  21#include <linux/of.h>
  22#include <linux/pm_opp.h>
  23#include <linux/platform_device.h>
  24#include <linux/regulator/consumer.h>
  25#include <linux/slab.h>
  26#include <linux/thermal.h>
  27
  28#include "cpufreq-dt.h"
  29
  30struct private_data {
  31        struct opp_table *opp_table;
  32        struct device *cpu_dev;
  33        struct thermal_cooling_device *cdev;
  34        const char *reg_name;
  35};
  36
  37static struct freq_attr *cpufreq_dt_attr[] = {
  38        &cpufreq_freq_attr_scaling_available_freqs,
  39        NULL,   /* Extra space for boost-attr if required */
  40        NULL,
  41};
  42
  43static int set_target(struct cpufreq_policy *policy, unsigned int index)
  44{
  45        struct private_data *priv = policy->driver_data;
  46        unsigned long freq = policy->freq_table[index].frequency;
  47        int ret;
  48
  49        ret = dev_pm_opp_set_rate(priv->cpu_dev, freq * 1000);
  50
  51        if (!ret) {
  52                arch_set_freq_scale(policy->related_cpus, freq,
  53                                    policy->cpuinfo.max_freq);
  54        }
  55
  56        return ret;
  57}
  58
  59/*
  60 * An earlier version of opp-v1 bindings used to name the regulator
  61 * "cpu0-supply", we still need to handle that for backwards compatibility.
  62 */
  63static const char *find_supply_name(struct device *dev)
  64{
  65        struct device_node *np;
  66        struct property *pp;
  67        int cpu = dev->id;
  68        const char *name = NULL;
  69
  70        np = of_node_get(dev->of_node);
  71
  72        /* This must be valid for sure */
  73        if (WARN_ON(!np))
  74                return NULL;
  75
  76        /* Try "cpu0" for older DTs */
  77        if (!cpu) {
  78                pp = of_find_property(np, "cpu0-supply", NULL);
  79                if (pp) {
  80                        name = "cpu0";
  81                        goto node_put;
  82                }
  83        }
  84
  85        pp = of_find_property(np, "cpu-supply", NULL);
  86        if (pp) {
  87                name = "cpu";
  88                goto node_put;
  89        }
  90
  91        dev_dbg(dev, "no regulator for cpu%d\n", cpu);
  92node_put:
  93        of_node_put(np);
  94        return name;
  95}
  96
  97static int resources_available(void)
  98{
  99        struct device *cpu_dev;
 100        struct regulator *cpu_reg;
 101        struct clk *cpu_clk;
 102        int ret = 0;
 103        const char *name;
 104
 105        cpu_dev = get_cpu_device(0);
 106        if (!cpu_dev) {
 107                pr_err("failed to get cpu0 device\n");
 108                return -ENODEV;
 109        }
 110
 111        cpu_clk = clk_get(cpu_dev, NULL);
 112        ret = PTR_ERR_OR_ZERO(cpu_clk);
 113        if (ret) {
 114                /*
 115                 * If cpu's clk node is present, but clock is not yet
 116                 * registered, we should try defering probe.
 117                 */
 118                if (ret == -EPROBE_DEFER)
 119                        dev_dbg(cpu_dev, "clock not ready, retry\n");
 120                else
 121                        dev_err(cpu_dev, "failed to get clock: %d\n", ret);
 122
 123                return ret;
 124        }
 125
 126        clk_put(cpu_clk);
 127
 128        name = find_supply_name(cpu_dev);
 129        /* Platform doesn't require regulator */
 130        if (!name)
 131                return 0;
 132
 133        cpu_reg = regulator_get_optional(cpu_dev, name);
 134        ret = PTR_ERR_OR_ZERO(cpu_reg);
 135        if (ret) {
 136                /*
 137                 * If cpu's regulator supply node is present, but regulator is
 138                 * not yet registered, we should try defering probe.
 139                 */
 140                if (ret == -EPROBE_DEFER)
 141                        dev_dbg(cpu_dev, "cpu0 regulator not ready, retry\n");
 142                else
 143                        dev_dbg(cpu_dev, "no regulator for cpu0: %d\n", ret);
 144
 145                return ret;
 146        }
 147
 148        regulator_put(cpu_reg);
 149        return 0;
 150}
 151
 152static int cpufreq_init(struct cpufreq_policy *policy)
 153{
 154        struct cpufreq_frequency_table *freq_table;
 155        struct opp_table *opp_table = NULL;
 156        struct private_data *priv;
 157        struct device *cpu_dev;
 158        struct clk *cpu_clk;
 159        unsigned int transition_latency;
 160        bool fallback = false;
 161        const char *name;
 162        int ret;
 163
 164        cpu_dev = get_cpu_device(policy->cpu);
 165        if (!cpu_dev) {
 166                pr_err("failed to get cpu%d device\n", policy->cpu);
 167                return -ENODEV;
 168        }
 169
 170        cpu_clk = clk_get(cpu_dev, NULL);
 171        if (IS_ERR(cpu_clk)) {
 172                ret = PTR_ERR(cpu_clk);
 173                dev_err(cpu_dev, "%s: failed to get clk: %d\n", __func__, ret);
 174                return ret;
 175        }
 176
 177        /* Get OPP-sharing information from "operating-points-v2" bindings */
 178        ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, policy->cpus);
 179        if (ret) {
 180                if (ret != -ENOENT)
 181                        goto out_put_clk;
 182
 183                /*
 184                 * operating-points-v2 not supported, fallback to old method of
 185                 * finding shared-OPPs for backward compatibility if the
 186                 * platform hasn't set sharing CPUs.
 187                 */
 188                if (dev_pm_opp_get_sharing_cpus(cpu_dev, policy->cpus))
 189                        fallback = true;
 190        }
 191
 192        /*
 193         * OPP layer will be taking care of regulators now, but it needs to know
 194         * the name of the regulator first.
 195         */
 196        name = find_supply_name(cpu_dev);
 197        if (name) {
 198                opp_table = dev_pm_opp_set_regulators(cpu_dev, &name, 1);
 199                if (IS_ERR(opp_table)) {
 200                        ret = PTR_ERR(opp_table);
 201                        dev_err(cpu_dev, "Failed to set regulator for cpu%d: %d\n",
 202                                policy->cpu, ret);
 203                        goto out_put_clk;
 204                }
 205        }
 206
 207        /*
 208         * Initialize OPP tables for all policy->cpus. They will be shared by
 209         * all CPUs which have marked their CPUs shared with OPP bindings.
 210         *
 211         * For platforms not using operating-points-v2 bindings, we do this
 212         * before updating policy->cpus. Otherwise, we will end up creating
 213         * duplicate OPPs for policy->cpus.
 214         *
 215         * OPPs might be populated at runtime, don't check for error here
 216         */
 217        dev_pm_opp_of_cpumask_add_table(policy->cpus);
 218
 219        /*
 220         * But we need OPP table to function so if it is not there let's
 221         * give platform code chance to provide it for us.
 222         */
 223        ret = dev_pm_opp_get_opp_count(cpu_dev);
 224        if (ret <= 0) {
 225                dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n");
 226                ret = -EPROBE_DEFER;
 227                goto out_free_opp;
 228        }
 229
 230        if (fallback) {
 231                cpumask_setall(policy->cpus);
 232
 233                /*
 234                 * OPP tables are initialized only for policy->cpu, do it for
 235                 * others as well.
 236                 */
 237                ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
 238                if (ret)
 239                        dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
 240                                __func__, ret);
 241        }
 242
 243        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 244        if (!priv) {
 245                ret = -ENOMEM;
 246                goto out_free_opp;
 247        }
 248
 249        priv->reg_name = name;
 250        priv->opp_table = opp_table;
 251
 252        ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
 253        if (ret) {
 254                dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
 255                goto out_free_priv;
 256        }
 257
 258        priv->cpu_dev = cpu_dev;
 259        policy->driver_data = priv;
 260        policy->clk = cpu_clk;
 261        policy->freq_table = freq_table;
 262
 263        policy->suspend_freq = dev_pm_opp_get_suspend_opp_freq(cpu_dev) / 1000;
 264
 265        /* Support turbo/boost mode */
 266        if (policy_has_boost_freq(policy)) {
 267                /* This gets disabled by core on driver unregister */
 268                ret = cpufreq_enable_boost_support();
 269                if (ret)
 270                        goto out_free_cpufreq_table;
 271                cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
 272        }
 273
 274        transition_latency = dev_pm_opp_get_max_transition_latency(cpu_dev);
 275        if (!transition_latency)
 276                transition_latency = CPUFREQ_ETERNAL;
 277
 278        policy->cpuinfo.transition_latency = transition_latency;
 279        policy->dvfs_possible_from_any_cpu = true;
 280
 281        return 0;
 282
 283out_free_cpufreq_table:
 284        dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
 285out_free_priv:
 286        kfree(priv);
 287out_free_opp:
 288        dev_pm_opp_of_cpumask_remove_table(policy->cpus);
 289        if (name)
 290                dev_pm_opp_put_regulators(opp_table);
 291out_put_clk:
 292        clk_put(cpu_clk);
 293
 294        return ret;
 295}
 296
 297static int cpufreq_exit(struct cpufreq_policy *policy)
 298{
 299        struct private_data *priv = policy->driver_data;
 300
 301        cpufreq_cooling_unregister(priv->cdev);
 302        dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
 303        dev_pm_opp_of_cpumask_remove_table(policy->related_cpus);
 304        if (priv->reg_name)
 305                dev_pm_opp_put_regulators(priv->opp_table);
 306
 307        clk_put(policy->clk);
 308        kfree(priv);
 309
 310        return 0;
 311}
 312
 313static void cpufreq_ready(struct cpufreq_policy *policy)
 314{
 315        struct private_data *priv = policy->driver_data;
 316
 317        priv->cdev = of_cpufreq_cooling_register(policy);
 318}
 319
 320static struct cpufreq_driver dt_cpufreq_driver = {
 321        .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
 322        .verify = cpufreq_generic_frequency_table_verify,
 323        .target_index = set_target,
 324        .get = cpufreq_generic_get,
 325        .init = cpufreq_init,
 326        .exit = cpufreq_exit,
 327        .ready = cpufreq_ready,
 328        .name = "cpufreq-dt",
 329        .attr = cpufreq_dt_attr,
 330        .suspend = cpufreq_generic_suspend,
 331};
 332
 333static int dt_cpufreq_probe(struct platform_device *pdev)
 334{
 335        struct cpufreq_dt_platform_data *data = dev_get_platdata(&pdev->dev);
 336        int ret;
 337
 338        /*
 339         * All per-cluster (CPUs sharing clock/voltages) initialization is done
 340         * from ->init(). In probe(), we just need to make sure that clk and
 341         * regulators are available. Else defer probe and retry.
 342         *
 343         * FIXME: Is checking this only for CPU0 sufficient ?
 344         */
 345        ret = resources_available();
 346        if (ret)
 347                return ret;
 348
 349        if (data) {
 350                if (data->have_governor_per_policy)
 351                        dt_cpufreq_driver.flags |= CPUFREQ_HAVE_GOVERNOR_PER_POLICY;
 352
 353                dt_cpufreq_driver.resume = data->resume;
 354                if (data->suspend)
 355                        dt_cpufreq_driver.suspend = data->suspend;
 356        }
 357
 358        ret = cpufreq_register_driver(&dt_cpufreq_driver);
 359        if (ret)
 360                dev_err(&pdev->dev, "failed register driver: %d\n", ret);
 361
 362        return ret;
 363}
 364
 365static int dt_cpufreq_remove(struct platform_device *pdev)
 366{
 367        cpufreq_unregister_driver(&dt_cpufreq_driver);
 368        return 0;
 369}
 370
 371static struct platform_driver dt_cpufreq_platdrv = {
 372        .driver = {
 373                .name   = "cpufreq-dt",
 374        },
 375        .probe          = dt_cpufreq_probe,
 376        .remove         = dt_cpufreq_remove,
 377};
 378module_platform_driver(dt_cpufreq_platdrv);
 379
 380MODULE_ALIAS("platform:cpufreq-dt");
 381MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
 382MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
 383MODULE_DESCRIPTION("Generic cpufreq driver");
 384MODULE_LICENSE("GPL");
 385