linux/drivers/cpufreq/scmi-cpufreq.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * System Control and Power Interface (SCMI) based CPUFreq Interface driver
   4 *
   5 * Copyright (C) 2018 ARM Ltd.
   6 * Sudeep Holla <sudeep.holla@arm.com>
   7 */
   8
   9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  10
  11#include <linux/cpu.h>
  12#include <linux/cpufreq.h>
  13#include <linux/cpumask.h>
  14#include <linux/energy_model.h>
  15#include <linux/export.h>
  16#include <linux/module.h>
  17#include <linux/pm_opp.h>
  18#include <linux/slab.h>
  19#include <linux/scmi_protocol.h>
  20#include <linux/types.h>
  21
  22struct scmi_data {
  23        int domain_id;
  24        struct device *cpu_dev;
  25};
  26
  27static const struct scmi_handle *handle;
  28
  29static unsigned int scmi_cpufreq_get_rate(unsigned int cpu)
  30{
  31        struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
  32        struct scmi_perf_ops *perf_ops = handle->perf_ops;
  33        struct scmi_data *priv = policy->driver_data;
  34        unsigned long rate;
  35        int ret;
  36
  37        ret = perf_ops->freq_get(handle, priv->domain_id, &rate, false);
  38        if (ret)
  39                return 0;
  40        return rate / 1000;
  41}
  42
  43/*
  44 * perf_ops->freq_set is not a synchronous, the actual OPP change will
  45 * happen asynchronously and can get notified if the events are
  46 * subscribed for by the SCMI firmware
  47 */
  48static int
  49scmi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index)
  50{
  51        int ret;
  52        struct scmi_data *priv = policy->driver_data;
  53        struct scmi_perf_ops *perf_ops = handle->perf_ops;
  54        u64 freq = policy->freq_table[index].frequency;
  55
  56        ret = perf_ops->freq_set(handle, priv->domain_id, freq * 1000, false);
  57        if (!ret)
  58                arch_set_freq_scale(policy->related_cpus, freq,
  59                                    policy->cpuinfo.max_freq);
  60        return ret;
  61}
  62
  63static unsigned int scmi_cpufreq_fast_switch(struct cpufreq_policy *policy,
  64                                             unsigned int target_freq)
  65{
  66        struct scmi_data *priv = policy->driver_data;
  67        struct scmi_perf_ops *perf_ops = handle->perf_ops;
  68
  69        if (!perf_ops->freq_set(handle, priv->domain_id,
  70                                target_freq * 1000, true)) {
  71                arch_set_freq_scale(policy->related_cpus, target_freq,
  72                                    policy->cpuinfo.max_freq);
  73                return target_freq;
  74        }
  75
  76        return 0;
  77}
  78
  79static int
  80scmi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
  81{
  82        int cpu, domain, tdomain;
  83        struct device *tcpu_dev;
  84
  85        domain = handle->perf_ops->device_domain_id(cpu_dev);
  86        if (domain < 0)
  87                return domain;
  88
  89        for_each_possible_cpu(cpu) {
  90                if (cpu == cpu_dev->id)
  91                        continue;
  92
  93                tcpu_dev = get_cpu_device(cpu);
  94                if (!tcpu_dev)
  95                        continue;
  96
  97                tdomain = handle->perf_ops->device_domain_id(tcpu_dev);
  98                if (tdomain == domain)
  99                        cpumask_set_cpu(cpu, cpumask);
 100        }
 101
 102        return 0;
 103}
 104
 105static int __maybe_unused
 106scmi_get_cpu_power(unsigned long *power, unsigned long *KHz, int cpu)
 107{
 108        struct device *cpu_dev = get_cpu_device(cpu);
 109        unsigned long Hz;
 110        int ret, domain;
 111
 112        if (!cpu_dev) {
 113                pr_err("failed to get cpu%d device\n", cpu);
 114                return -ENODEV;
 115        }
 116
 117        domain = handle->perf_ops->device_domain_id(cpu_dev);
 118        if (domain < 0)
 119                return domain;
 120
 121        /* Get the power cost of the performance domain. */
 122        Hz = *KHz * 1000;
 123        ret = handle->perf_ops->est_power_get(handle, domain, &Hz, power);
 124        if (ret)
 125                return ret;
 126
 127        /* The EM framework specifies the frequency in KHz. */
 128        *KHz = Hz / 1000;
 129
 130        return 0;
 131}
 132
 133static int scmi_cpufreq_init(struct cpufreq_policy *policy)
 134{
 135        int ret, nr_opp;
 136        unsigned int latency;
 137        struct device *cpu_dev;
 138        struct scmi_data *priv;
 139        struct cpufreq_frequency_table *freq_table;
 140        struct em_data_callback em_cb = EM_DATA_CB(scmi_get_cpu_power);
 141
 142        cpu_dev = get_cpu_device(policy->cpu);
 143        if (!cpu_dev) {
 144                pr_err("failed to get cpu%d device\n", policy->cpu);
 145                return -ENODEV;
 146        }
 147
 148        ret = handle->perf_ops->device_opps_add(handle, cpu_dev);
 149        if (ret) {
 150                dev_warn(cpu_dev, "failed to add opps to the device\n");
 151                return ret;
 152        }
 153
 154        ret = scmi_get_sharing_cpus(cpu_dev, policy->cpus);
 155        if (ret) {
 156                dev_warn(cpu_dev, "failed to get sharing cpumask\n");
 157                return ret;
 158        }
 159
 160        ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus);
 161        if (ret) {
 162                dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
 163                        __func__, ret);
 164                return ret;
 165        }
 166
 167        nr_opp = dev_pm_opp_get_opp_count(cpu_dev);
 168        if (nr_opp <= 0) {
 169                dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n");
 170                ret = -EPROBE_DEFER;
 171                goto out_free_opp;
 172        }
 173
 174        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 175        if (!priv) {
 176                ret = -ENOMEM;
 177                goto out_free_opp;
 178        }
 179
 180        ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
 181        if (ret) {
 182                dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
 183                goto out_free_priv;
 184        }
 185
 186        priv->cpu_dev = cpu_dev;
 187        priv->domain_id = handle->perf_ops->device_domain_id(cpu_dev);
 188
 189        policy->driver_data = priv;
 190        policy->freq_table = freq_table;
 191
 192        /* SCMI allows DVFS request for any domain from any CPU */
 193        policy->dvfs_possible_from_any_cpu = true;
 194
 195        latency = handle->perf_ops->transition_latency_get(handle, cpu_dev);
 196        if (!latency)
 197                latency = CPUFREQ_ETERNAL;
 198
 199        policy->cpuinfo.transition_latency = latency;
 200
 201        policy->fast_switch_possible = true;
 202
 203        em_register_perf_domain(policy->cpus, nr_opp, &em_cb);
 204
 205        return 0;
 206
 207out_free_priv:
 208        kfree(priv);
 209out_free_opp:
 210        dev_pm_opp_remove_all_dynamic(cpu_dev);
 211
 212        return ret;
 213}
 214
 215static int scmi_cpufreq_exit(struct cpufreq_policy *policy)
 216{
 217        struct scmi_data *priv = policy->driver_data;
 218
 219        dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
 220        dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
 221        kfree(priv);
 222
 223        return 0;
 224}
 225
 226static struct cpufreq_driver scmi_cpufreq_driver = {
 227        .name   = "scmi",
 228        .flags  = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
 229                  CPUFREQ_NEED_INITIAL_FREQ_CHECK |
 230                  CPUFREQ_IS_COOLING_DEV,
 231        .verify = cpufreq_generic_frequency_table_verify,
 232        .attr   = cpufreq_generic_attr,
 233        .target_index   = scmi_cpufreq_set_target,
 234        .fast_switch    = scmi_cpufreq_fast_switch,
 235        .get    = scmi_cpufreq_get_rate,
 236        .init   = scmi_cpufreq_init,
 237        .exit   = scmi_cpufreq_exit,
 238};
 239
 240static int scmi_cpufreq_probe(struct scmi_device *sdev)
 241{
 242        int ret;
 243
 244        handle = sdev->handle;
 245
 246        if (!handle || !handle->perf_ops)
 247                return -ENODEV;
 248
 249        ret = cpufreq_register_driver(&scmi_cpufreq_driver);
 250        if (ret) {
 251                dev_err(&sdev->dev, "%s: registering cpufreq failed, err: %d\n",
 252                        __func__, ret);
 253        }
 254
 255        return ret;
 256}
 257
 258static void scmi_cpufreq_remove(struct scmi_device *sdev)
 259{
 260        cpufreq_unregister_driver(&scmi_cpufreq_driver);
 261}
 262
 263static const struct scmi_device_id scmi_id_table[] = {
 264        { SCMI_PROTOCOL_PERF },
 265        { },
 266};
 267MODULE_DEVICE_TABLE(scmi, scmi_id_table);
 268
 269static struct scmi_driver scmi_cpufreq_drv = {
 270        .name           = "scmi-cpufreq",
 271        .probe          = scmi_cpufreq_probe,
 272        .remove         = scmi_cpufreq_remove,
 273        .id_table       = scmi_id_table,
 274};
 275module_scmi_driver(scmi_cpufreq_drv);
 276
 277MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
 278MODULE_DESCRIPTION("ARM SCMI CPUFreq interface driver");
 279MODULE_LICENSE("GPL v2");
 280