linux/drivers/cpufreq/cpufreq_governor.c
<<
>>
Prefs
   1/*
   2 * drivers/cpufreq/cpufreq_governor.c
   3 *
   4 * CPUFREQ governors common code
   5 *
   6 * Copyright    (C) 2001 Russell King
   7 *              (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
   8 *              (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
   9 *              (C) 2009 Alexander Clouter <alex@digriz.org.uk>
  10 *              (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
  11 *
  12 * This program is free software; you can redistribute it and/or modify
  13 * it under the terms of the GNU General Public License version 2 as
  14 * published by the Free Software Foundation.
  15 */
  16
  17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  18
  19#include <linux/export.h>
  20#include <linux/kernel_stat.h>
  21#include <linux/slab.h>
  22
  23#include "cpufreq_governor.h"
  24
  25static struct attribute_group *get_sysfs_attr(struct dbs_data *dbs_data)
  26{
  27        if (have_governor_per_policy())
  28                return dbs_data->cdata->attr_group_gov_pol;
  29        else
  30                return dbs_data->cdata->attr_group_gov_sys;
  31}
  32
  33void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
  34{
  35        struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
  36        struct od_dbs_tuners *od_tuners = dbs_data->tuners;
  37        struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
  38        struct cpufreq_policy *policy;
  39        unsigned int sampling_rate;
  40        unsigned int max_load = 0;
  41        unsigned int ignore_nice;
  42        unsigned int j;
  43
  44        if (dbs_data->cdata->governor == GOV_ONDEMAND) {
  45                struct od_cpu_dbs_info_s *od_dbs_info =
  46                                dbs_data->cdata->get_cpu_dbs_info_s(cpu);
  47
  48                /*
  49                 * Sometimes, the ondemand governor uses an additional
  50                 * multiplier to give long delays. So apply this multiplier to
  51                 * the 'sampling_rate', so as to keep the wake-up-from-idle
  52                 * detection logic a bit conservative.
  53                 */
  54                sampling_rate = od_tuners->sampling_rate;
  55                sampling_rate *= od_dbs_info->rate_mult;
  56
  57                ignore_nice = od_tuners->ignore_nice_load;
  58        } else {
  59                sampling_rate = cs_tuners->sampling_rate;
  60                ignore_nice = cs_tuners->ignore_nice_load;
  61        }
  62
  63        policy = cdbs->cur_policy;
  64
  65        /* Get Absolute Load */
  66        for_each_cpu(j, policy->cpus) {
  67                struct cpu_dbs_common_info *j_cdbs;
  68                u64 cur_wall_time, cur_idle_time;
  69                unsigned int idle_time, wall_time;
  70                unsigned int load;
  71                int io_busy = 0;
  72
  73                j_cdbs = dbs_data->cdata->get_cpu_cdbs(j);
  74
  75                /*
  76                 * For the purpose of ondemand, waiting for disk IO is
  77                 * an indication that you're performance critical, and
  78                 * not that the system is actually idle. So do not add
  79                 * the iowait time to the cpu idle time.
  80                 */
  81                if (dbs_data->cdata->governor == GOV_ONDEMAND)
  82                        io_busy = od_tuners->io_is_busy;
  83                cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, io_busy);
  84
  85                wall_time = (unsigned int)
  86                        (cur_wall_time - j_cdbs->prev_cpu_wall);
  87                j_cdbs->prev_cpu_wall = cur_wall_time;
  88
  89                idle_time = (unsigned int)
  90                        (cur_idle_time - j_cdbs->prev_cpu_idle);
  91                j_cdbs->prev_cpu_idle = cur_idle_time;
  92
  93                if (ignore_nice) {
  94                        u64 cur_nice;
  95                        unsigned long cur_nice_jiffies;
  96
  97                        cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
  98                                         cdbs->prev_cpu_nice;
  99                        /*
 100                         * Assumption: nice time between sampling periods will
 101                         * be less than 2^32 jiffies for 32 bit sys
 102                         */
 103                        cur_nice_jiffies = (unsigned long)
 104                                        cputime64_to_jiffies64(cur_nice);
 105
 106                        cdbs->prev_cpu_nice =
 107                                kcpustat_cpu(j).cpustat[CPUTIME_NICE];
 108                        idle_time += jiffies_to_usecs(cur_nice_jiffies);
 109                }
 110
 111                if (unlikely(!wall_time || wall_time < idle_time))
 112                        continue;
 113
 114                /*
 115                 * If the CPU had gone completely idle, and a task just woke up
 116                 * on this CPU now, it would be unfair to calculate 'load' the
 117                 * usual way for this elapsed time-window, because it will show
 118                 * near-zero load, irrespective of how CPU intensive that task
 119                 * actually is. This is undesirable for latency-sensitive bursty
 120                 * workloads.
 121                 *
 122                 * To avoid this, we reuse the 'load' from the previous
 123                 * time-window and give this task a chance to start with a
 124                 * reasonably high CPU frequency. (However, we shouldn't over-do
 125                 * this copy, lest we get stuck at a high load (high frequency)
 126                 * for too long, even when the current system load has actually
 127                 * dropped down. So we perform the copy only once, upon the
 128                 * first wake-up from idle.)
 129                 *
 130                 * Detecting this situation is easy: the governor's deferrable
 131                 * timer would not have fired during CPU-idle periods. Hence
 132                 * an unusually large 'wall_time' (as compared to the sampling
 133                 * rate) indicates this scenario.
 134                 *
 135                 * prev_load can be zero in two cases and we must recalculate it
 136                 * for both cases:
 137                 * - during long idle intervals
 138                 * - explicitly set to zero
 139                 */
 140                if (unlikely(wall_time > (2 * sampling_rate) &&
 141                             j_cdbs->prev_load)) {
 142                        load = j_cdbs->prev_load;
 143
 144                        /*
 145                         * Perform a destructive copy, to ensure that we copy
 146                         * the previous load only once, upon the first wake-up
 147                         * from idle.
 148                         */
 149                        j_cdbs->prev_load = 0;
 150                } else {
 151                        load = 100 * (wall_time - idle_time) / wall_time;
 152                        j_cdbs->prev_load = load;
 153                }
 154
 155                if (load > max_load)
 156                        max_load = load;
 157        }
 158
 159        dbs_data->cdata->gov_check_cpu(cpu, max_load);
 160}
 161EXPORT_SYMBOL_GPL(dbs_check_cpu);
 162
 163static inline void __gov_queue_work(int cpu, struct dbs_data *dbs_data,
 164                unsigned int delay)
 165{
 166        struct cpu_dbs_common_info *cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
 167
 168        mod_delayed_work_on(cpu, system_wq, &cdbs->work, delay);
 169}
 170
 171void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
 172                unsigned int delay, bool all_cpus)
 173{
 174        int i;
 175
 176        mutex_lock(&cpufreq_governor_lock);
 177        if (!policy->governor_enabled)
 178                goto out_unlock;
 179
 180        if (!all_cpus) {
 181                /*
 182                 * Use raw_smp_processor_id() to avoid preemptible warnings.
 183                 * We know that this is only called with all_cpus == false from
 184                 * works that have been queued with *_work_on() functions and
 185                 * those works are canceled during CPU_DOWN_PREPARE so they
 186                 * can't possibly run on any other CPU.
 187                 */
 188                __gov_queue_work(raw_smp_processor_id(), dbs_data, delay);
 189        } else {
 190                for_each_cpu(i, policy->cpus)
 191                        __gov_queue_work(i, dbs_data, delay);
 192        }
 193
 194out_unlock:
 195        mutex_unlock(&cpufreq_governor_lock);
 196}
 197EXPORT_SYMBOL_GPL(gov_queue_work);
 198
 199static inline void gov_cancel_work(struct dbs_data *dbs_data,
 200                struct cpufreq_policy *policy)
 201{
 202        struct cpu_dbs_common_info *cdbs;
 203        int i;
 204
 205        for_each_cpu(i, policy->cpus) {
 206                cdbs = dbs_data->cdata->get_cpu_cdbs(i);
 207                cancel_delayed_work_sync(&cdbs->work);
 208        }
 209}
 210
 211/* Will return if we need to evaluate cpu load again or not */
 212bool need_load_eval(struct cpu_dbs_common_info *cdbs,
 213                unsigned int sampling_rate)
 214{
 215        if (policy_is_shared(cdbs->cur_policy)) {
 216                ktime_t time_now = ktime_get();
 217                s64 delta_us = ktime_us_delta(time_now, cdbs->time_stamp);
 218
 219                /* Do nothing if we recently have sampled */
 220                if (delta_us < (s64)(sampling_rate / 2))
 221                        return false;
 222                else
 223                        cdbs->time_stamp = time_now;
 224        }
 225
 226        return true;
 227}
 228EXPORT_SYMBOL_GPL(need_load_eval);
 229
 230static void set_sampling_rate(struct dbs_data *dbs_data,
 231                unsigned int sampling_rate)
 232{
 233        if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
 234                struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
 235                cs_tuners->sampling_rate = sampling_rate;
 236        } else {
 237                struct od_dbs_tuners *od_tuners = dbs_data->tuners;
 238                od_tuners->sampling_rate = sampling_rate;
 239        }
 240}
 241
 242int cpufreq_governor_dbs(struct cpufreq_policy *policy,
 243                struct common_dbs_data *cdata, unsigned int event)
 244{
 245        struct dbs_data *dbs_data;
 246        struct od_cpu_dbs_info_s *od_dbs_info = NULL;
 247        struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
 248        struct od_ops *od_ops = NULL;
 249        struct od_dbs_tuners *od_tuners = NULL;
 250        struct cs_dbs_tuners *cs_tuners = NULL;
 251        struct cpu_dbs_common_info *cpu_cdbs;
 252        unsigned int sampling_rate, latency, ignore_nice, j, cpu = policy->cpu;
 253        int io_busy = 0;
 254        int rc;
 255
 256        if (have_governor_per_policy())
 257                dbs_data = policy->governor_data;
 258        else
 259                dbs_data = cdata->gdbs_data;
 260
 261        WARN_ON(!dbs_data && (event != CPUFREQ_GOV_POLICY_INIT));
 262
 263        switch (event) {
 264        case CPUFREQ_GOV_POLICY_INIT:
 265                if (have_governor_per_policy()) {
 266                        WARN_ON(dbs_data);
 267                } else if (dbs_data) {
 268                        dbs_data->usage_count++;
 269                        policy->governor_data = dbs_data;
 270                        return 0;
 271                }
 272
 273                dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
 274                if (!dbs_data) {
 275                        pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
 276                        return -ENOMEM;
 277                }
 278
 279                dbs_data->cdata = cdata;
 280                dbs_data->usage_count = 1;
 281                rc = cdata->init(dbs_data);
 282                if (rc) {
 283                        pr_err("%s: POLICY_INIT: init() failed\n", __func__);
 284                        kfree(dbs_data);
 285                        return rc;
 286                }
 287
 288                if (!have_governor_per_policy())
 289                        WARN_ON(cpufreq_get_global_kobject());
 290
 291                rc = sysfs_create_group(get_governor_parent_kobj(policy),
 292                                get_sysfs_attr(dbs_data));
 293                if (rc) {
 294                        cdata->exit(dbs_data);
 295                        kfree(dbs_data);
 296                        return rc;
 297                }
 298
 299                policy->governor_data = dbs_data;
 300
 301                /* policy latency is in ns. Convert it to us first */
 302                latency = policy->cpuinfo.transition_latency / 1000;
 303                if (latency == 0)
 304                        latency = 1;
 305
 306                /* Bring kernel and HW constraints together */
 307                dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate,
 308                                MIN_LATENCY_MULTIPLIER * latency);
 309                set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate,
 310                                        latency * LATENCY_MULTIPLIER));
 311
 312                if ((cdata->governor == GOV_CONSERVATIVE) &&
 313                                (!policy->governor->initialized)) {
 314                        struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
 315
 316                        cpufreq_register_notifier(cs_ops->notifier_block,
 317                                        CPUFREQ_TRANSITION_NOTIFIER);
 318                }
 319
 320                if (!have_governor_per_policy())
 321                        cdata->gdbs_data = dbs_data;
 322
 323                return 0;
 324        case CPUFREQ_GOV_POLICY_EXIT:
 325                if (!--dbs_data->usage_count) {
 326                        sysfs_remove_group(get_governor_parent_kobj(policy),
 327                                        get_sysfs_attr(dbs_data));
 328
 329                        if (!have_governor_per_policy())
 330                                cpufreq_put_global_kobject();
 331
 332                        if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
 333                                (policy->governor->initialized == 1)) {
 334                                struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
 335
 336                                cpufreq_unregister_notifier(cs_ops->notifier_block,
 337                                                CPUFREQ_TRANSITION_NOTIFIER);
 338                        }
 339
 340                        cdata->exit(dbs_data);
 341                        kfree(dbs_data);
 342                        cdata->gdbs_data = NULL;
 343                }
 344
 345                policy->governor_data = NULL;
 346                return 0;
 347        }
 348
 349        cpu_cdbs = dbs_data->cdata->get_cpu_cdbs(cpu);
 350
 351        if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
 352                cs_tuners = dbs_data->tuners;
 353                cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
 354                sampling_rate = cs_tuners->sampling_rate;
 355                ignore_nice = cs_tuners->ignore_nice_load;
 356        } else {
 357                od_tuners = dbs_data->tuners;
 358                od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
 359                sampling_rate = od_tuners->sampling_rate;
 360                ignore_nice = od_tuners->ignore_nice_load;
 361                od_ops = dbs_data->cdata->gov_ops;
 362                io_busy = od_tuners->io_is_busy;
 363        }
 364
 365        switch (event) {
 366        case CPUFREQ_GOV_START:
 367                if (!policy->cur)
 368                        return -EINVAL;
 369
 370                mutex_lock(&dbs_data->mutex);
 371
 372                for_each_cpu(j, policy->cpus) {
 373                        struct cpu_dbs_common_info *j_cdbs =
 374                                dbs_data->cdata->get_cpu_cdbs(j);
 375                        unsigned int prev_load;
 376
 377                        j_cdbs->cpu = j;
 378                        j_cdbs->cur_policy = policy;
 379                        j_cdbs->prev_cpu_idle = get_cpu_idle_time(j,
 380                                               &j_cdbs->prev_cpu_wall, io_busy);
 381
 382                        prev_load = (unsigned int)
 383                                (j_cdbs->prev_cpu_wall - j_cdbs->prev_cpu_idle);
 384                        j_cdbs->prev_load = 100 * prev_load /
 385                                        (unsigned int) j_cdbs->prev_cpu_wall;
 386
 387                        if (ignore_nice)
 388                                j_cdbs->prev_cpu_nice =
 389                                        kcpustat_cpu(j).cpustat[CPUTIME_NICE];
 390
 391                        mutex_init(&j_cdbs->timer_mutex);
 392                        INIT_DEFERRABLE_WORK(&j_cdbs->work,
 393                                             dbs_data->cdata->gov_dbs_timer);
 394                }
 395
 396                if (dbs_data->cdata->governor == GOV_CONSERVATIVE) {
 397                        cs_dbs_info->down_skip = 0;
 398                        cs_dbs_info->enable = 1;
 399                        cs_dbs_info->requested_freq = policy->cur;
 400                } else {
 401                        od_dbs_info->rate_mult = 1;
 402                        od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
 403                        od_ops->powersave_bias_init_cpu(cpu);
 404                }
 405
 406                mutex_unlock(&dbs_data->mutex);
 407
 408                /* Initiate timer time stamp */
 409                cpu_cdbs->time_stamp = ktime_get();
 410
 411                gov_queue_work(dbs_data, policy,
 412                                delay_for_sampling_rate(sampling_rate), true);
 413                break;
 414
 415        case CPUFREQ_GOV_STOP:
 416                if (dbs_data->cdata->governor == GOV_CONSERVATIVE)
 417                        cs_dbs_info->enable = 0;
 418
 419                gov_cancel_work(dbs_data, policy);
 420
 421                mutex_lock(&dbs_data->mutex);
 422                mutex_destroy(&cpu_cdbs->timer_mutex);
 423                cpu_cdbs->cur_policy = NULL;
 424
 425                mutex_unlock(&dbs_data->mutex);
 426
 427                break;
 428
 429        case CPUFREQ_GOV_LIMITS:
 430                mutex_lock(&dbs_data->mutex);
 431                if (!cpu_cdbs->cur_policy) {
 432                        mutex_unlock(&dbs_data->mutex);
 433                        break;
 434                }
 435                mutex_lock(&cpu_cdbs->timer_mutex);
 436                if (policy->max < cpu_cdbs->cur_policy->cur)
 437                        __cpufreq_driver_target(cpu_cdbs->cur_policy,
 438                                        policy->max, CPUFREQ_RELATION_H);
 439                else if (policy->min > cpu_cdbs->cur_policy->cur)
 440                        __cpufreq_driver_target(cpu_cdbs->cur_policy,
 441                                        policy->min, CPUFREQ_RELATION_L);
 442                dbs_check_cpu(dbs_data, cpu);
 443                mutex_unlock(&cpu_cdbs->timer_mutex);
 444                mutex_unlock(&dbs_data->mutex);
 445                break;
 446        }
 447        return 0;
 448}
 449EXPORT_SYMBOL_GPL(cpufreq_governor_dbs);
 450