linux/drivers/cpufreq/cppc_cpufreq.c
<<
>>
Prefs
   1/*
   2 * CPPC (Collaborative Processor Performance Control) driver for
   3 * interfacing with the CPUfreq layer and governors. See
   4 * cppc_acpi.c for CPPC specific methods.
   5 *
   6 * (C) Copyright 2014, 2015 Linaro Ltd.
   7 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
   8 *
   9 * This program is free software; you can redistribute it and/or
  10 * modify it under the terms of the GNU General Public License
  11 * as published by the Free Software Foundation; version 2
  12 * of the License.
  13 */
  14
  15#define pr_fmt(fmt)     "CPPC Cpufreq:" fmt
  16
  17#include <linux/kernel.h>
  18#include <linux/module.h>
  19#include <linux/delay.h>
  20#include <linux/cpu.h>
  21#include <linux/cpufreq.h>
  22#include <linux/dmi.h>
  23#include <linux/time.h>
  24#include <linux/vmalloc.h>
  25
  26#include <asm/unaligned.h>
  27
  28#include <acpi/cppc_acpi.h>
  29
  30/* Minimum struct length needed for the DMI processor entry we want */
  31#define DMI_ENTRY_PROCESSOR_MIN_LENGTH  48
  32
  33/* Offest in the DMI processor structure for the max frequency */
  34#define DMI_PROCESSOR_MAX_SPEED  0x14
  35
  36/*
  37 * These structs contain information parsed from per CPU
  38 * ACPI _CPC structures.
  39 * e.g. For each CPU the highest, lowest supported
  40 * performance capabilities, desired performance level
  41 * requested etc.
  42 */
  43static struct cppc_cpudata **all_cpu_data;
  44
  45/* Callback function used to retrieve the max frequency from DMI */
  46static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
  47{
  48        const u8 *dmi_data = (const u8 *)dm;
  49        u16 *mhz = (u16 *)private;
  50
  51        if (dm->type == DMI_ENTRY_PROCESSOR &&
  52            dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) {
  53                u16 val = (u16)get_unaligned((const u16 *)
  54                                (dmi_data + DMI_PROCESSOR_MAX_SPEED));
  55                *mhz = val > *mhz ? val : *mhz;
  56        }
  57}
  58
  59/* Look up the max frequency in DMI */
  60static u64 cppc_get_dmi_max_khz(void)
  61{
  62        u16 mhz = 0;
  63
  64        dmi_walk(cppc_find_dmi_mhz, &mhz);
  65
  66        /*
  67         * Real stupid fallback value, just in case there is no
  68         * actual value set.
  69         */
  70        mhz = mhz ? mhz : 1;
  71
  72        return (1000 * mhz);
  73}
  74
  75/*
  76 * If CPPC lowest_freq and nominal_freq registers are exposed then we can
  77 * use them to convert perf to freq and vice versa
  78 *
  79 * If the perf/freq point lies between Nominal and Lowest, we can treat
  80 * (Low perf, Low freq) and (Nom Perf, Nom freq) as 2D co-ordinates of a line
  81 * and extrapolate the rest
  82 * For perf/freq > Nominal, we use the ratio perf:freq at Nominal for conversion
  83 */
  84static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu,
  85                                        unsigned int perf)
  86{
  87        static u64 max_khz;
  88        struct cppc_perf_caps *caps = &cpu->perf_caps;
  89        u64 mul, div;
  90
  91        if (caps->lowest_freq && caps->nominal_freq) {
  92                if (perf >= caps->nominal_perf) {
  93                        mul = caps->nominal_freq;
  94                        div = caps->nominal_perf;
  95                } else {
  96                        mul = caps->nominal_freq - caps->lowest_freq;
  97                        div = caps->nominal_perf - caps->lowest_perf;
  98                }
  99        } else {
 100                if (!max_khz)
 101                        max_khz = cppc_get_dmi_max_khz();
 102                mul = max_khz;
 103                div = cpu->perf_caps.highest_perf;
 104        }
 105        return (u64)perf * mul / div;
 106}
 107
 108static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu,
 109                                        unsigned int freq)
 110{
 111        static u64 max_khz;
 112        struct cppc_perf_caps *caps = &cpu->perf_caps;
 113        u64  mul, div;
 114
 115        if (caps->lowest_freq && caps->nominal_freq) {
 116                if (freq >= caps->nominal_freq) {
 117                        mul = caps->nominal_perf;
 118                        div = caps->nominal_freq;
 119                } else {
 120                        mul = caps->lowest_perf;
 121                        div = caps->lowest_freq;
 122                }
 123        } else {
 124                if (!max_khz)
 125                        max_khz = cppc_get_dmi_max_khz();
 126                mul = cpu->perf_caps.highest_perf;
 127                div = max_khz;
 128        }
 129
 130        return (u64)freq * mul / div;
 131}
 132
 133static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
 134                unsigned int target_freq,
 135                unsigned int relation)
 136{
 137        struct cppc_cpudata *cpu;
 138        struct cpufreq_freqs freqs;
 139        u32 desired_perf;
 140        int ret = 0;
 141
 142        cpu = all_cpu_data[policy->cpu];
 143
 144        desired_perf = cppc_cpufreq_khz_to_perf(cpu, target_freq);
 145        /* Return if it is exactly the same perf */
 146        if (desired_perf == cpu->perf_ctrls.desired_perf)
 147                return ret;
 148
 149        cpu->perf_ctrls.desired_perf = desired_perf;
 150        freqs.old = policy->cur;
 151        freqs.new = target_freq;
 152
 153        cpufreq_freq_transition_begin(policy, &freqs);
 154        ret = cppc_set_perf(cpu->cpu, &cpu->perf_ctrls);
 155        cpufreq_freq_transition_end(policy, &freqs, ret != 0);
 156
 157        if (ret)
 158                pr_debug("Failed to set target on CPU:%d. ret:%d\n",
 159                                cpu->cpu, ret);
 160
 161        return ret;
 162}
 163
 164static int cppc_verify_policy(struct cpufreq_policy *policy)
 165{
 166        cpufreq_verify_within_cpu_limits(policy);
 167        return 0;
 168}
 169
 170static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy)
 171{
 172        int cpu_num = policy->cpu;
 173        struct cppc_cpudata *cpu = all_cpu_data[cpu_num];
 174        int ret;
 175
 176        cpu->perf_ctrls.desired_perf = cpu->perf_caps.lowest_perf;
 177
 178        ret = cppc_set_perf(cpu_num, &cpu->perf_ctrls);
 179        if (ret)
 180                pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
 181                                cpu->perf_caps.lowest_perf, cpu_num, ret);
 182}
 183
 184/*
 185 * The PCC subspace describes the rate at which platform can accept commands
 186 * on the shared PCC channel (including READs which do not count towards freq
 187 * trasition requests), so ideally we need to use the PCC values as a fallback
 188 * if we don't have a platform specific transition_delay_us
 189 */
 190#ifdef CONFIG_ARM64
 191#include <asm/cputype.h>
 192
 193static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu)
 194{
 195        unsigned long implementor = read_cpuid_implementor();
 196        unsigned long part_num = read_cpuid_part_number();
 197        unsigned int delay_us = 0;
 198
 199        switch (implementor) {
 200        case ARM_CPU_IMP_QCOM:
 201                switch (part_num) {
 202                case QCOM_CPU_PART_FALKOR_V1:
 203                case QCOM_CPU_PART_FALKOR:
 204                        delay_us = 10000;
 205                        break;
 206                default:
 207                        delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
 208                        break;
 209                }
 210                break;
 211        default:
 212                delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
 213                break;
 214        }
 215
 216        return delay_us;
 217}
 218
 219#else
 220
 221static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu)
 222{
 223        return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
 224}
 225#endif
 226
 227static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
 228{
 229        struct cppc_cpudata *cpu;
 230        unsigned int cpu_num = policy->cpu;
 231        int ret = 0;
 232
 233        cpu = all_cpu_data[policy->cpu];
 234
 235        cpu->cpu = cpu_num;
 236        ret = cppc_get_perf_caps(policy->cpu, &cpu->perf_caps);
 237
 238        if (ret) {
 239                pr_debug("Err reading CPU%d perf capabilities. ret:%d\n",
 240                                cpu_num, ret);
 241                return ret;
 242        }
 243
 244        /* Convert the lowest and nominal freq from MHz to KHz */
 245        cpu->perf_caps.lowest_freq *= 1000;
 246        cpu->perf_caps.nominal_freq *= 1000;
 247
 248        /*
 249         * Set min to lowest nonlinear perf to avoid any efficiency penalty (see
 250         * Section 8.4.7.1.1.5 of ACPI 6.1 spec)
 251         */
 252        policy->min = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.lowest_nonlinear_perf);
 253        policy->max = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.highest_perf);
 254
 255        /*
 256         * Set cpuinfo.min_freq to Lowest to make the full range of performance
 257         * available if userspace wants to use any perf between lowest & lowest
 258         * nonlinear perf
 259         */
 260        policy->cpuinfo.min_freq = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.lowest_perf);
 261        policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu, cpu->perf_caps.highest_perf);
 262
 263        policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu_num);
 264        policy->shared_type = cpu->shared_type;
 265
 266        if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
 267                int i;
 268
 269                cpumask_copy(policy->cpus, cpu->shared_cpu_map);
 270
 271                for_each_cpu(i, policy->cpus) {
 272                        if (unlikely(i == policy->cpu))
 273                                continue;
 274
 275                        memcpy(&all_cpu_data[i]->perf_caps, &cpu->perf_caps,
 276                               sizeof(cpu->perf_caps));
 277                }
 278        } else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) {
 279                /* Support only SW_ANY for now. */
 280                pr_debug("Unsupported CPU co-ord type\n");
 281                return -EFAULT;
 282        }
 283
 284        cpu->cur_policy = policy;
 285
 286        /* Set policy->cur to max now. The governors will adjust later. */
 287        policy->cur = cppc_cpufreq_perf_to_khz(cpu,
 288                                        cpu->perf_caps.highest_perf);
 289        cpu->perf_ctrls.desired_perf = cpu->perf_caps.highest_perf;
 290
 291        ret = cppc_set_perf(cpu_num, &cpu->perf_ctrls);
 292        if (ret)
 293                pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
 294                                cpu->perf_caps.highest_perf, cpu_num, ret);
 295
 296        return ret;
 297}
 298
 299static inline u64 get_delta(u64 t1, u64 t0)
 300{
 301        if (t1 > t0 || t0 > ~(u32)0)
 302                return t1 - t0;
 303
 304        return (u32)t1 - (u32)t0;
 305}
 306
 307static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu,
 308                                     struct cppc_perf_fb_ctrs fb_ctrs_t0,
 309                                     struct cppc_perf_fb_ctrs fb_ctrs_t1)
 310{
 311        u64 delta_reference, delta_delivered;
 312        u64 reference_perf, delivered_perf;
 313
 314        reference_perf = fb_ctrs_t0.reference_perf;
 315
 316        delta_reference = get_delta(fb_ctrs_t1.reference,
 317                                    fb_ctrs_t0.reference);
 318        delta_delivered = get_delta(fb_ctrs_t1.delivered,
 319                                    fb_ctrs_t0.delivered);
 320
 321        /* Check to avoid divide-by zero */
 322        if (delta_reference || delta_delivered)
 323                delivered_perf = (reference_perf * delta_delivered) /
 324                                        delta_reference;
 325        else
 326                delivered_perf = cpu->perf_ctrls.desired_perf;
 327
 328        return cppc_cpufreq_perf_to_khz(cpu, delivered_perf);
 329}
 330
 331static unsigned int cppc_cpufreq_get_rate(unsigned int cpunum)
 332{
 333        struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
 334        struct cppc_cpudata *cpu = all_cpu_data[cpunum];
 335        int ret;
 336
 337        ret = cppc_get_perf_ctrs(cpunum, &fb_ctrs_t0);
 338        if (ret)
 339                return ret;
 340
 341        udelay(2); /* 2usec delay between sampling */
 342
 343        ret = cppc_get_perf_ctrs(cpunum, &fb_ctrs_t1);
 344        if (ret)
 345                return ret;
 346
 347        return cppc_get_rate_from_fbctrs(cpu, fb_ctrs_t0, fb_ctrs_t1);
 348}
 349
 350static struct cpufreq_driver cppc_cpufreq_driver = {
 351        .flags = CPUFREQ_CONST_LOOPS,
 352        .verify = cppc_verify_policy,
 353        .target = cppc_cpufreq_set_target,
 354        .get = cppc_cpufreq_get_rate,
 355        .init = cppc_cpufreq_cpu_init,
 356        .stop_cpu = cppc_cpufreq_stop_cpu,
 357        .name = "cppc_cpufreq",
 358};
 359
 360static int __init cppc_cpufreq_init(void)
 361{
 362        int i, ret = 0;
 363        struct cppc_cpudata *cpu;
 364
 365        if (acpi_disabled)
 366                return -ENODEV;
 367
 368        all_cpu_data = kcalloc(num_possible_cpus(), sizeof(void *),
 369                               GFP_KERNEL);
 370        if (!all_cpu_data)
 371                return -ENOMEM;
 372
 373        for_each_possible_cpu(i) {
 374                all_cpu_data[i] = kzalloc(sizeof(struct cppc_cpudata), GFP_KERNEL);
 375                if (!all_cpu_data[i])
 376                        goto out;
 377
 378                cpu = all_cpu_data[i];
 379                if (!zalloc_cpumask_var(&cpu->shared_cpu_map, GFP_KERNEL))
 380                        goto out;
 381        }
 382
 383        ret = acpi_get_psd_map(all_cpu_data);
 384        if (ret) {
 385                pr_debug("Error parsing PSD data. Aborting cpufreq registration.\n");
 386                goto out;
 387        }
 388
 389        ret = cpufreq_register_driver(&cppc_cpufreq_driver);
 390        if (ret)
 391                goto out;
 392
 393        return ret;
 394
 395out:
 396        for_each_possible_cpu(i) {
 397                cpu = all_cpu_data[i];
 398                if (!cpu)
 399                        break;
 400                free_cpumask_var(cpu->shared_cpu_map);
 401                kfree(cpu);
 402        }
 403
 404        kfree(all_cpu_data);
 405        return -ENODEV;
 406}
 407
 408static void __exit cppc_cpufreq_exit(void)
 409{
 410        struct cppc_cpudata *cpu;
 411        int i;
 412
 413        cpufreq_unregister_driver(&cppc_cpufreq_driver);
 414
 415        for_each_possible_cpu(i) {
 416                cpu = all_cpu_data[i];
 417                free_cpumask_var(cpu->shared_cpu_map);
 418                kfree(cpu);
 419        }
 420
 421        kfree(all_cpu_data);
 422}
 423
 424module_exit(cppc_cpufreq_exit);
 425MODULE_AUTHOR("Ashwin Chaugule");
 426MODULE_DESCRIPTION("CPUFreq driver based on the ACPI CPPC v5.0+ spec");
 427MODULE_LICENSE("GPL");
 428
 429late_initcall(cppc_cpufreq_init);
 430
 431static const struct acpi_device_id cppc_acpi_ids[] = {
 432        {ACPI_PROCESSOR_DEVICE_HID, },
 433        {}
 434};
 435
 436MODULE_DEVICE_TABLE(acpi, cppc_acpi_ids);
 437