linux/drivers/acpi/processor_thermal.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * processor_thermal.c - Passive cooling submodule of the ACPI processor driver
   4 *
   5 *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
   6 *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
   7 *  Copyright (C) 2004       Dominik Brodowski <linux@brodo.de>
   8 *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
   9 *                      - Added processor hotplug support
  10 */
  11
  12#include <linux/kernel.h>
  13#include <linux/module.h>
  14#include <linux/init.h>
  15#include <linux/cpufreq.h>
  16#include <linux/acpi.h>
  17#include <acpi/processor.h>
  18#include <linux/uaccess.h>
  19
  20#define PREFIX "ACPI: "
  21
  22#ifdef CONFIG_CPU_FREQ
  23
  24/* If a passive cooling situation is detected, primarily CPUfreq is used, as it
  25 * offers (in most cases) voltage scaling in addition to frequency scaling, and
  26 * thus a cubic (instead of linear) reduction of energy. Also, we allow for
  27 * _any_ cpufreq driver and not only the acpi-cpufreq driver.
  28 */
  29
  30#define CPUFREQ_THERMAL_MIN_STEP 0
  31#define CPUFREQ_THERMAL_MAX_STEP 3
  32
  33static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg);
  34
  35#define reduction_pctg(cpu) \
  36        per_cpu(cpufreq_thermal_reduction_pctg, phys_package_first_cpu(cpu))
  37
  38/*
  39 * Emulate "per package data" using per cpu data (which should really be
  40 * provided elsewhere)
  41 *
  42 * Note we can lose a CPU on cpu hotunplug, in this case we forget the state
  43 * temporarily. Fortunately that's not a big issue here (I hope)
  44 */
  45static int phys_package_first_cpu(int cpu)
  46{
  47        int i;
  48        int id = topology_physical_package_id(cpu);
  49
  50        for_each_online_cpu(i)
  51                if (topology_physical_package_id(i) == id)
  52                        return i;
  53        return 0;
  54}
  55
  56static int cpu_has_cpufreq(unsigned int cpu)
  57{
  58        struct cpufreq_policy policy;
  59        if (!acpi_processor_cpufreq_init || cpufreq_get_policy(&policy, cpu))
  60                return 0;
  61        return 1;
  62}
  63
  64static int cpufreq_get_max_state(unsigned int cpu)
  65{
  66        if (!cpu_has_cpufreq(cpu))
  67                return 0;
  68
  69        return CPUFREQ_THERMAL_MAX_STEP;
  70}
  71
  72static int cpufreq_get_cur_state(unsigned int cpu)
  73{
  74        if (!cpu_has_cpufreq(cpu))
  75                return 0;
  76
  77        return reduction_pctg(cpu);
  78}
  79
  80static int cpufreq_set_cur_state(unsigned int cpu, int state)
  81{
  82        struct cpufreq_policy *policy;
  83        struct acpi_processor *pr;
  84        unsigned long max_freq;
  85        int i, ret;
  86
  87        if (!cpu_has_cpufreq(cpu))
  88                return 0;
  89
  90        reduction_pctg(cpu) = state;
  91
  92        /*
  93         * Update all the CPUs in the same package because they all
  94         * contribute to the temperature and often share the same
  95         * frequency.
  96         */
  97        for_each_online_cpu(i) {
  98                if (topology_physical_package_id(i) !=
  99                    topology_physical_package_id(cpu))
 100                        continue;
 101
 102                pr = per_cpu(processors, i);
 103
 104                if (unlikely(!freq_qos_request_active(&pr->thermal_req)))
 105                        continue;
 106
 107                policy = cpufreq_cpu_get(i);
 108                if (!policy)
 109                        return -EINVAL;
 110
 111                max_freq = (policy->cpuinfo.max_freq * (100 - reduction_pctg(i) * 20)) / 100;
 112
 113                cpufreq_cpu_put(policy);
 114
 115                ret = freq_qos_update_request(&pr->thermal_req, max_freq);
 116                if (ret < 0) {
 117                        pr_warn("Failed to update thermal freq constraint: CPU%d (%d)\n",
 118                                pr->id, ret);
 119                }
 120        }
 121        return 0;
 122}
 123
 124void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy)
 125{
 126        unsigned int cpu;
 127
 128        for_each_cpu(cpu, policy->related_cpus) {
 129                struct acpi_processor *pr = per_cpu(processors, cpu);
 130                int ret;
 131
 132                if (!pr)
 133                        continue;
 134
 135                ret = freq_qos_add_request(&policy->constraints,
 136                                           &pr->thermal_req,
 137                                           FREQ_QOS_MAX, INT_MAX);
 138                if (ret < 0)
 139                        pr_err("Failed to add freq constraint for CPU%d (%d)\n",
 140                               cpu, ret);
 141        }
 142}
 143
 144void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
 145{
 146        unsigned int cpu;
 147
 148        for_each_cpu(cpu, policy->related_cpus) {
 149                struct acpi_processor *pr = per_cpu(processors, policy->cpu);
 150
 151                if (pr)
 152                        freq_qos_remove_request(&pr->thermal_req);
 153        }
 154}
 155#else                           /* ! CONFIG_CPU_FREQ */
 156static int cpufreq_get_max_state(unsigned int cpu)
 157{
 158        return 0;
 159}
 160
 161static int cpufreq_get_cur_state(unsigned int cpu)
 162{
 163        return 0;
 164}
 165
 166static int cpufreq_set_cur_state(unsigned int cpu, int state)
 167{
 168        return 0;
 169}
 170
 171#endif
 172
 173/* thermal cooling device callbacks */
 174static int acpi_processor_max_state(struct acpi_processor *pr)
 175{
 176        int max_state = 0;
 177
 178        /*
 179         * There exists four states according to
 180         * cpufreq_thermal_reduction_pctg. 0, 1, 2, 3
 181         */
 182        max_state += cpufreq_get_max_state(pr->id);
 183        if (pr->flags.throttling)
 184                max_state += (pr->throttling.state_count -1);
 185
 186        return max_state;
 187}
 188static int
 189processor_get_max_state(struct thermal_cooling_device *cdev,
 190                        unsigned long *state)
 191{
 192        struct acpi_device *device = cdev->devdata;
 193        struct acpi_processor *pr;
 194
 195        if (!device)
 196                return -EINVAL;
 197
 198        pr = acpi_driver_data(device);
 199        if (!pr)
 200                return -EINVAL;
 201
 202        *state = acpi_processor_max_state(pr);
 203        return 0;
 204}
 205
 206static int
 207processor_get_cur_state(struct thermal_cooling_device *cdev,
 208                        unsigned long *cur_state)
 209{
 210        struct acpi_device *device = cdev->devdata;
 211        struct acpi_processor *pr;
 212
 213        if (!device)
 214                return -EINVAL;
 215
 216        pr = acpi_driver_data(device);
 217        if (!pr)
 218                return -EINVAL;
 219
 220        *cur_state = cpufreq_get_cur_state(pr->id);
 221        if (pr->flags.throttling)
 222                *cur_state += pr->throttling.state;
 223        return 0;
 224}
 225
 226static int
 227processor_set_cur_state(struct thermal_cooling_device *cdev,
 228                        unsigned long state)
 229{
 230        struct acpi_device *device = cdev->devdata;
 231        struct acpi_processor *pr;
 232        int result = 0;
 233        int max_pstate;
 234
 235        if (!device)
 236                return -EINVAL;
 237
 238        pr = acpi_driver_data(device);
 239        if (!pr)
 240                return -EINVAL;
 241
 242        max_pstate = cpufreq_get_max_state(pr->id);
 243
 244        if (state > acpi_processor_max_state(pr))
 245                return -EINVAL;
 246
 247        if (state <= max_pstate) {
 248                if (pr->flags.throttling && pr->throttling.state)
 249                        result = acpi_processor_set_throttling(pr, 0, false);
 250                cpufreq_set_cur_state(pr->id, state);
 251        } else {
 252                cpufreq_set_cur_state(pr->id, max_pstate);
 253                result = acpi_processor_set_throttling(pr,
 254                                state - max_pstate, false);
 255        }
 256        return result;
 257}
 258
 259const struct thermal_cooling_device_ops processor_cooling_ops = {
 260        .get_max_state = processor_get_max_state,
 261        .get_cur_state = processor_get_cur_state,
 262        .set_cur_state = processor_set_cur_state,
 263};
 264