linux/drivers/cpufreq/scmi-cpufreq.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * System Control and Power Interface (SCMI) based CPUFreq Interface driver
   4 *
   5 * Copyright (C) 2018-2021 ARM Ltd.
   6 * Sudeep Holla <sudeep.holla@arm.com>
   7 */
   8
   9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  10
  11#include <linux/clk-provider.h>
  12#include <linux/cpu.h>
  13#include <linux/cpufreq.h>
  14#include <linux/cpumask.h>
  15#include <linux/energy_model.h>
  16#include <linux/export.h>
  17#include <linux/module.h>
  18#include <linux/pm_opp.h>
  19#include <linux/slab.h>
  20#include <linux/scmi_protocol.h>
  21#include <linux/types.h>
  22
  23struct scmi_data {
  24        int domain_id;
  25        int nr_opp;
  26        struct device *cpu_dev;
  27        cpumask_var_t opp_shared_cpus;
  28};
  29
  30static struct scmi_protocol_handle *ph;
  31static const struct scmi_perf_proto_ops *perf_ops;
  32
  33static unsigned int scmi_cpufreq_get_rate(unsigned int cpu)
  34{
  35        struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
  36        struct scmi_data *priv = policy->driver_data;
  37        unsigned long rate;
  38        int ret;
  39
  40        ret = perf_ops->freq_get(ph, priv->domain_id, &rate, false);
  41        if (ret)
  42                return 0;
  43        return rate / 1000;
  44}
  45
  46/*
  47 * perf_ops->freq_set is not a synchronous, the actual OPP change will
  48 * happen asynchronously and can get notified if the events are
  49 * subscribed for by the SCMI firmware
  50 */
  51static int
  52scmi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
  53{
  54        struct scmi_data *priv = policy->driver_data;
  55        u64 freq = policy->freq_table[index].frequency;
  56
  57        return perf_ops->freq_set(ph, priv->domain_id, freq * 1000, false);
  58}
  59
  60static unsigned int scmi_cpufreq_fast_switch(struct cpufreq_policy *policy,
  61                                             unsigned int target_freq)
  62{
  63        struct scmi_data *priv = policy->driver_data;
  64
  65        if (!perf_ops->freq_set(ph, priv->domain_id,
  66                                target_freq * 1000, true))
  67                return target_freq;
  68
  69        return 0;
  70}
  71
  72static int
  73scmi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
  74{
  75        int cpu, domain, tdomain;
  76        struct device *tcpu_dev;
  77
  78        domain = perf_ops->device_domain_id(cpu_dev);
  79        if (domain < 0)
  80                return domain;
  81
  82        for_each_possible_cpu(cpu) {
  83                if (cpu == cpu_dev->id)
  84                        continue;
  85
  86                tcpu_dev = get_cpu_device(cpu);
  87                if (!tcpu_dev)
  88                        continue;
  89
  90                tdomain = perf_ops->device_domain_id(tcpu_dev);
  91                if (tdomain == domain)
  92                        cpumask_set_cpu(cpu, cpumask);
  93        }
  94
  95        return 0;
  96}
  97
  98static int __maybe_unused
  99scmi_get_cpu_power(unsigned long *power, unsigned long *KHz,
 100                   struct device *cpu_dev)
 101{
 102        unsigned long Hz;
 103        int ret, domain;
 104
 105        domain = perf_ops->device_domain_id(cpu_dev);
 106        if (domain < 0)
 107                return domain;
 108
 109        /* Get the power cost of the performance domain. */
 110        Hz = *KHz * 1000;
 111        ret = perf_ops->est_power_get(ph, domain, &Hz, power);
 112        if (ret)
 113                return ret;
 114
 115        /* The EM framework specifies the frequency in KHz. */
 116        *KHz = Hz / 1000;
 117
 118        return 0;
 119}
 120
 121static int scmi_cpufreq_init(struct cpufreq_policy *policy)
 122{
 123        int ret, nr_opp;
 124        unsigned int latency;
 125        struct device *cpu_dev;
 126        struct scmi_data *priv;
 127        struct cpufreq_frequency_table *freq_table;
 128
 129        cpu_dev = get_cpu_device(policy->cpu);
 130        if (!cpu_dev) {
 131                pr_err("failed to get cpu%d device\n", policy->cpu);
 132                return -ENODEV;
 133        }
 134
 135        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 136        if (!priv)
 137                return -ENOMEM;
 138
 139        if (!zalloc_cpumask_var(&priv->opp_shared_cpus, GFP_KERNEL)) {
 140                ret = -ENOMEM;
 141                goto out_free_priv;
 142        }
 143
 144        /* Obtain CPUs that share SCMI performance controls */
 145        ret = scmi_get_sharing_cpus(cpu_dev, policy->cpus);
 146        if (ret) {
 147                dev_warn(cpu_dev, "failed to get sharing cpumask\n");
 148                goto out_free_cpumask;
 149        }
 150
 151        /*
 152         * Obtain CPUs that share performance levels.
 153         * The OPP 'sharing cpus' info may come from DT through an empty opp
 154         * table and opp-shared.
 155         */
 156        ret = dev_pm_opp_of_get_sharing_cpus(cpu_dev, priv->opp_shared_cpus);
 157        if (ret || !cpumask_weight(priv->opp_shared_cpus)) {
 158                /*
 159                 * Either opp-table is not set or no opp-shared was found.
 160                 * Use the CPU mask from SCMI to designate CPUs sharing an OPP
 161                 * table.
 162                 */
 163                cpumask_copy(priv->opp_shared_cpus, policy->cpus);
 164        }
 165
 166         /*
 167          * A previous CPU may have marked OPPs as shared for a few CPUs, based on
 168          * what OPP core provided. If the current CPU is part of those few, then
 169          * there is no need to add OPPs again.
 170          */
 171        nr_opp = dev_pm_opp_get_opp_count(cpu_dev);
 172        if (nr_opp <= 0) {
 173                ret = perf_ops->device_opps_add(ph, cpu_dev);
 174                if (ret) {
 175                        dev_warn(cpu_dev, "failed to add opps to the device\n");
 176                        goto out_free_cpumask;
 177                }
 178
 179                nr_opp = dev_pm_opp_get_opp_count(cpu_dev);
 180                if (nr_opp <= 0) {
 181                        dev_err(cpu_dev, "%s: No OPPs for this device: %d\n",
 182                                __func__, nr_opp);
 183
 184                        ret = -ENODEV;
 185                        goto out_free_opp;
 186                }
 187
 188                ret = dev_pm_opp_set_sharing_cpus(cpu_dev, priv->opp_shared_cpus);
 189                if (ret) {
 190                        dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
 191                                __func__, ret);
 192
 193                        goto out_free_opp;
 194                }
 195
 196                priv->nr_opp = nr_opp;
 197        }
 198
 199        ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
 200        if (ret) {
 201                dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
 202                goto out_free_opp;
 203        }
 204
 205        priv->cpu_dev = cpu_dev;
 206        priv->domain_id = perf_ops->device_domain_id(cpu_dev);
 207
 208        policy->driver_data = priv;
 209        policy->freq_table = freq_table;
 210
 211        /* SCMI allows DVFS request for any domain from any CPU */
 212        policy->dvfs_possible_from_any_cpu = true;
 213
 214        latency = perf_ops->transition_latency_get(ph, cpu_dev);
 215        if (!latency)
 216                latency = CPUFREQ_ETERNAL;
 217
 218        policy->cpuinfo.transition_latency = latency;
 219
 220        policy->fast_switch_possible =
 221                perf_ops->fast_switch_possible(ph, cpu_dev);
 222
 223        return 0;
 224
 225out_free_opp:
 226        dev_pm_opp_remove_all_dynamic(cpu_dev);
 227
 228out_free_cpumask:
 229        free_cpumask_var(priv->opp_shared_cpus);
 230
 231out_free_priv:
 232        kfree(priv);
 233
 234        return ret;
 235}
 236
 237static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
 238{
 239        struct scmi_data *priv = policy->driver_data;
 240
 241        dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
 242        dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
 243        free_cpumask_var(priv->opp_shared_cpus);
 244        kfree(priv);
 245
 246        return 0;
 247}
 248
 249static void scmi_cpufreq_register_em(struct cpufreq_policy *policy)
 250{
 251        struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power);
 252        bool power_scale_mw = perf_ops->power_scale_mw_get(ph);
 253        struct scmi_data *priv = policy->driver_data;
 254
 255        /*
 256         * This callback will be called for each policy, but we don't need to
 257         * register with EM every time. Despite not being part of the same
 258         * policy, some CPUs may still share their perf-domains, and a CPU from
 259         * another policy may already have registered with EM on behalf of CPUs
 260         * of this policy.
 261         */
 262        if (!priv->nr_opp)
 263                return;
 264
 265        em_dev_register_perf_domain(get_cpu_device(policy->cpu), priv->nr_opp,
 266                                    &em_cb, priv->opp_shared_cpus,
 267                                    power_scale_mw);
 268}
 269
 270static struct cpufreq_driver scmi_cpufreq_driver = {
 271        .name   = "scmi",
 272        .flags  = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
 273                  CPUFREQ_NEED_INITIAL_FREQ_CHECK |
 274                  CPUFREQ_IS_COOLING_DEV,
 275        .verify = cpufreq_generic_frequency_table_verify,
 276        .attr   = cpufreq_generic_attr,
 277        .target_index   = scmi_cpufreq_set_target,
 278        .fast_switch    = scmi_cpufreq_fast_switch,
 279        .get    = scmi_cpufreq_get_rate,
 280        .init   = scmi_cpufreq_init,
 281        .exit   = scmi_cpufreq_exit,
 282        .register_em    = scmi_cpufreq_register_em,
 283};
 284
 285static int scmi_cpufreq_probe(struct scmi_device *sdev)
 286{
 287        int ret;
 288        struct device *dev = &sdev->dev;
 289        const struct scmi_handle *handle;
 290
 291        handle = sdev->handle;
 292
 293        if (!handle)
 294                return -ENODEV;
 295
 296        perf_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_PERF, &ph);
 297        if (IS_ERR(perf_ops))
 298                return PTR_ERR(perf_ops);
 299
 300#ifdef CONFIG_COMMON_CLK
 301        /* dummy clock provider as needed by OPP if clocks property is used */
 302        if (of_find_property(dev->of_node, "#clock-cells", NULL))
 303                devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, NULL);
 304#endif
 305
 306        ret = cpufreq_register_driver(&scmi_cpufreq_driver);
 307        if (ret) {
 308                dev_err(dev, "%s: registering cpufreq failed, err: %d\n",
 309                        __func__, ret);
 310        }
 311
 312        return ret;
 313}
 314
 315static void scmi_cpufreq_remove(struct scmi_device *sdev)
 316{
 317        cpufreq_unregister_driver(&scmi_cpufreq_driver);
 318}
 319
 320static const struct scmi_device_id scmi_id_table[] = {
 321        { SCMI_PROTOCOL_PERF, "cpufreq" },
 322        { },
 323};
 324MODULE_DEVICE_TABLE(scmi, scmi_id_table);
 325
 326static struct scmi_driver scmi_cpufreq_drv = {
 327        .name           = "scmi-cpufreq",
 328        .probe          = scmi_cpufreq_probe,
 329        .remove         = scmi_cpufreq_remove,
 330        .id_table       = scmi_id_table,
 331};
 332module_scmi_driver(scmi_cpufreq_drv);
 333
 334MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
 335MODULE_DESCRIPTION("ARM SCMI CPUFreq interface driver");
 336MODULE_LICENSE("GPL v2");
 337