linux/drivers/cpufreq/cpufreq_governor.c
<<
>>
Prefs
   1/*
   2 * drivers/cpufreq/cpufreq_governor.c
   3 *
   4 * CPUFREQ governors common code
   5 *
   6 * Copyright    (C) 2001 Russell King
   7 *              (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
   8 *              (C) 2003 Jun Nakajima <jun.nakajima@intel.com>
   9 *              (C) 2009 Alexander Clouter <alex@digriz.org.uk>
  10 *              (c) 2012 Viresh Kumar <viresh.kumar@linaro.org>
  11 *
  12 * This program is free software; you can redistribute it and/or modify
  13 * it under the terms of the GNU General Public License version 2 as
  14 * published by the Free Software Foundation.
  15 */
  16
  17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  18
  19#include <linux/export.h>
  20#include <linux/kernel_stat.h>
  21#include <linux/slab.h>
  22
  23#include "cpufreq_governor.h"
  24
  25#define CPUFREQ_DBS_MIN_SAMPLING_INTERVAL       (2 * TICK_NSEC / NSEC_PER_USEC)
  26
  27static DEFINE_PER_CPU(struct cpu_dbs_info, cpu_dbs);
  28
  29static DEFINE_MUTEX(gov_dbs_data_mutex);
  30
  31/* Common sysfs tunables */
  32/**
  33 * store_sampling_rate - update sampling rate effective immediately if needed.
  34 *
  35 * If new rate is smaller than the old, simply updating
  36 * dbs.sampling_rate might not be appropriate. For example, if the
  37 * original sampling_rate was 1 second and the requested new sampling rate is 10
  38 * ms because the user needs immediate reaction from ondemand governor, but not
  39 * sure if higher frequency will be required or not, then, the governor may
  40 * change the sampling rate too late; up to 1 second later. Thus, if we are
  41 * reducing the sampling rate, we need to make the new value effective
  42 * immediately.
  43 *
  44 * This must be called with dbs_data->mutex held, otherwise traversing
  45 * policy_dbs_list isn't safe.
  46 */
  47ssize_t store_sampling_rate(struct gov_attr_set *attr_set, const char *buf,
  48                            size_t count)
  49{
  50        struct dbs_data *dbs_data = to_dbs_data(attr_set);
  51        struct policy_dbs_info *policy_dbs;
  52        unsigned int sampling_interval;
  53        int ret;
  54
  55        ret = sscanf(buf, "%u", &sampling_interval);
  56        if (ret != 1 || sampling_interval < CPUFREQ_DBS_MIN_SAMPLING_INTERVAL)
  57                return -EINVAL;
  58
  59        dbs_data->sampling_rate = sampling_interval;
  60
  61        /*
  62         * We are operating under dbs_data->mutex and so the list and its
  63         * entries can't be freed concurrently.
  64         */
  65        list_for_each_entry(policy_dbs, &attr_set->policy_list, list) {
  66                mutex_lock(&policy_dbs->update_mutex);
  67                /*
  68                 * On 32-bit architectures this may race with the
  69                 * sample_delay_ns read in dbs_update_util_handler(), but that
  70                 * really doesn't matter.  If the read returns a value that's
  71                 * too big, the sample will be skipped, but the next invocation
  72                 * of dbs_update_util_handler() (when the update has been
  73                 * completed) will take a sample.
  74                 *
  75                 * If this runs in parallel with dbs_work_handler(), we may end
  76                 * up overwriting the sample_delay_ns value that it has just
  77                 * written, but it will be corrected next time a sample is
  78                 * taken, so it shouldn't be significant.
  79                 */
  80                gov_update_sample_delay(policy_dbs, 0);
  81                mutex_unlock(&policy_dbs->update_mutex);
  82        }
  83
  84        return count;
  85}
  86EXPORT_SYMBOL_GPL(store_sampling_rate);
  87
  88/**
  89 * gov_update_cpu_data - Update CPU load data.
  90 * @dbs_data: Top-level governor data pointer.
  91 *
  92 * Update CPU load data for all CPUs in the domain governed by @dbs_data
  93 * (that may be a single policy or a bunch of them if governor tunables are
  94 * system-wide).
  95 *
  96 * Call under the @dbs_data mutex.
  97 */
  98void gov_update_cpu_data(struct dbs_data *dbs_data)
  99{
 100        struct policy_dbs_info *policy_dbs;
 101
 102        list_for_each_entry(policy_dbs, &dbs_data->attr_set.policy_list, list) {
 103                unsigned int j;
 104
 105                for_each_cpu(j, policy_dbs->policy->cpus) {
 106                        struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
 107
 108                        j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time,
 109                                                                  dbs_data->io_is_busy);
 110                        if (dbs_data->ignore_nice_load)
 111                                j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
 112                }
 113        }
 114}
 115EXPORT_SYMBOL_GPL(gov_update_cpu_data);
 116
 117unsigned int dbs_update(struct cpufreq_policy *policy)
 118{
 119        struct policy_dbs_info *policy_dbs = policy->governor_data;
 120        struct dbs_data *dbs_data = policy_dbs->dbs_data;
 121        unsigned int ignore_nice = dbs_data->ignore_nice_load;
 122        unsigned int max_load = 0, idle_periods = UINT_MAX;
 123        unsigned int sampling_rate, io_busy, j;
 124
 125        /*
 126         * Sometimes governors may use an additional multiplier to increase
 127         * sample delays temporarily.  Apply that multiplier to sampling_rate
 128         * so as to keep the wake-up-from-idle detection logic a bit
 129         * conservative.
 130         */
 131        sampling_rate = dbs_data->sampling_rate * policy_dbs->rate_mult;
 132        /*
 133         * For the purpose of ondemand, waiting for disk IO is an indication
 134         * that you're performance critical, and not that the system is actually
 135         * idle, so do not add the iowait time to the CPU idle time then.
 136         */
 137        io_busy = dbs_data->io_is_busy;
 138
 139        /* Get Absolute Load */
 140        for_each_cpu(j, policy->cpus) {
 141                struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
 142                u64 update_time, cur_idle_time;
 143                unsigned int idle_time, time_elapsed;
 144                unsigned int load;
 145
 146                cur_idle_time = get_cpu_idle_time(j, &update_time, io_busy);
 147
 148                time_elapsed = update_time - j_cdbs->prev_update_time;
 149                j_cdbs->prev_update_time = update_time;
 150
 151                idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
 152                j_cdbs->prev_cpu_idle = cur_idle_time;
 153
 154                if (ignore_nice) {
 155                        u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
 156
 157                        idle_time += div_u64(cur_nice - j_cdbs->prev_cpu_nice, NSEC_PER_USEC);
 158                        j_cdbs->prev_cpu_nice = cur_nice;
 159                }
 160
 161                if (unlikely(!time_elapsed)) {
 162                        /*
 163                         * That can only happen when this function is called
 164                         * twice in a row with a very short interval between the
 165                         * calls, so the previous load value can be used then.
 166                         */
 167                        load = j_cdbs->prev_load;
 168                } else if (unlikely(time_elapsed > 2 * sampling_rate &&
 169                                    j_cdbs->prev_load)) {
 170                        /*
 171                         * If the CPU had gone completely idle and a task has
 172                         * just woken up on this CPU now, it would be unfair to
 173                         * calculate 'load' the usual way for this elapsed
 174                         * time-window, because it would show near-zero load,
 175                         * irrespective of how CPU intensive that task actually
 176                         * was. This is undesirable for latency-sensitive bursty
 177                         * workloads.
 178                         *
 179                         * To avoid this, reuse the 'load' from the previous
 180                         * time-window and give this task a chance to start with
 181                         * a reasonably high CPU frequency. However, that
 182                         * shouldn't be over-done, lest we get stuck at a high
 183                         * load (high frequency) for too long, even when the
 184                         * current system load has actually dropped down, so
 185                         * clear prev_load to guarantee that the load will be
 186                         * computed again next time.
 187                         *
 188                         * Detecting this situation is easy: the governor's
 189                         * utilization update handler would not have run during
 190                         * CPU-idle periods.  Hence, an unusually large
 191                         * 'time_elapsed' (as compared to the sampling rate)
 192                         * indicates this scenario.
 193                         */
 194                        load = j_cdbs->prev_load;
 195                        j_cdbs->prev_load = 0;
 196                } else {
 197                        if (time_elapsed >= idle_time) {
 198                                load = 100 * (time_elapsed - idle_time) / time_elapsed;
 199                        } else {
 200                                /*
 201                                 * That can happen if idle_time is returned by
 202                                 * get_cpu_idle_time_jiffy().  In that case
 203                                 * idle_time is roughly equal to the difference
 204                                 * between time_elapsed and "busy time" obtained
 205                                 * from CPU statistics.  Then, the "busy time"
 206                                 * can end up being greater than time_elapsed
 207                                 * (for example, if jiffies_64 and the CPU
 208                                 * statistics are updated by different CPUs),
 209                                 * so idle_time may in fact be negative.  That
 210                                 * means, though, that the CPU was busy all
 211                                 * the time (on the rough average) during the
 212                                 * last sampling interval and 100 can be
 213                                 * returned as the load.
 214                                 */
 215                                load = (int)idle_time < 0 ? 100 : 0;
 216                        }
 217                        j_cdbs->prev_load = load;
 218                }
 219
 220                if (time_elapsed > 2 * sampling_rate) {
 221                        unsigned int periods = time_elapsed / sampling_rate;
 222
 223                        if (periods < idle_periods)
 224                                idle_periods = periods;
 225                }
 226
 227                if (load > max_load)
 228                        max_load = load;
 229        }
 230
 231        policy_dbs->idle_periods = idle_periods;
 232
 233        return max_load;
 234}
 235EXPORT_SYMBOL_GPL(dbs_update);
 236
 237static void dbs_work_handler(struct work_struct *work)
 238{
 239        struct policy_dbs_info *policy_dbs;
 240        struct cpufreq_policy *policy;
 241        struct dbs_governor *gov;
 242
 243        policy_dbs = container_of(work, struct policy_dbs_info, work);
 244        policy = policy_dbs->policy;
 245        gov = dbs_governor_of(policy);
 246
 247        /*
 248         * Make sure cpufreq_governor_limits() isn't evaluating load or the
 249         * ondemand governor isn't updating the sampling rate in parallel.
 250         */
 251        mutex_lock(&policy_dbs->update_mutex);
 252        gov_update_sample_delay(policy_dbs, gov->gov_dbs_update(policy));
 253        mutex_unlock(&policy_dbs->update_mutex);
 254
 255        /* Allow the utilization update handler to queue up more work. */
 256        atomic_set(&policy_dbs->work_count, 0);
 257        /*
 258         * If the update below is reordered with respect to the sample delay
 259         * modification, the utilization update handler may end up using a stale
 260         * sample delay value.
 261         */
 262        smp_wmb();
 263        policy_dbs->work_in_progress = false;
 264}
 265
 266static void dbs_irq_work(struct irq_work *irq_work)
 267{
 268        struct policy_dbs_info *policy_dbs;
 269
 270        policy_dbs = container_of(irq_work, struct policy_dbs_info, irq_work);
 271        schedule_work_on(smp_processor_id(), &policy_dbs->work);
 272}
 273
 274static void dbs_update_util_handler(struct update_util_data *data, u64 time,
 275                                    unsigned int flags)
 276{
 277        struct cpu_dbs_info *cdbs = container_of(data, struct cpu_dbs_info, update_util);
 278        struct policy_dbs_info *policy_dbs = cdbs->policy_dbs;
 279        u64 delta_ns, lst;
 280
 281        if (!cpufreq_can_do_remote_dvfs(policy_dbs->policy))
 282                return;
 283
 284        /*
 285         * The work may not be allowed to be queued up right now.
 286         * Possible reasons:
 287         * - Work has already been queued up or is in progress.
 288         * - It is too early (too little time from the previous sample).
 289         */
 290        if (policy_dbs->work_in_progress)
 291                return;
 292
 293        /*
 294         * If the reads below are reordered before the check above, the value
 295         * of sample_delay_ns used in the computation may be stale.
 296         */
 297        smp_rmb();
 298        lst = READ_ONCE(policy_dbs->last_sample_time);
 299        delta_ns = time - lst;
 300        if ((s64)delta_ns < policy_dbs->sample_delay_ns)
 301                return;
 302
 303        /*
 304         * If the policy is not shared, the irq_work may be queued up right away
 305         * at this point.  Otherwise, we need to ensure that only one of the
 306         * CPUs sharing the policy will do that.
 307         */
 308        if (policy_dbs->is_shared) {
 309                if (!atomic_add_unless(&policy_dbs->work_count, 1, 1))
 310                        return;
 311
 312                /*
 313                 * If another CPU updated last_sample_time in the meantime, we
 314                 * shouldn't be here, so clear the work counter and bail out.
 315                 */
 316                if (unlikely(lst != READ_ONCE(policy_dbs->last_sample_time))) {
 317                        atomic_set(&policy_dbs->work_count, 0);
 318                        return;
 319                }
 320        }
 321
 322        policy_dbs->last_sample_time = time;
 323        policy_dbs->work_in_progress = true;
 324        irq_work_queue(&policy_dbs->irq_work);
 325}
 326
 327static void gov_set_update_util(struct policy_dbs_info *policy_dbs,
 328                                unsigned int delay_us)
 329{
 330        struct cpufreq_policy *policy = policy_dbs->policy;
 331        int cpu;
 332
 333        gov_update_sample_delay(policy_dbs, delay_us);
 334        policy_dbs->last_sample_time = 0;
 335
 336        for_each_cpu(cpu, policy->cpus) {
 337                struct cpu_dbs_info *cdbs = &per_cpu(cpu_dbs, cpu);
 338
 339                cpufreq_add_update_util_hook(cpu, &cdbs->update_util,
 340                                             dbs_update_util_handler);
 341        }
 342}
 343
 344static inline void gov_clear_update_util(struct cpufreq_policy *policy)
 345{
 346        int i;
 347
 348        for_each_cpu(i, policy->cpus)
 349                cpufreq_remove_update_util_hook(i);
 350
 351        synchronize_sched();
 352}
 353
 354static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy,
 355                                                     struct dbs_governor *gov)
 356{
 357        struct policy_dbs_info *policy_dbs;
 358        int j;
 359
 360        /* Allocate memory for per-policy governor data. */
 361        policy_dbs = gov->alloc();
 362        if (!policy_dbs)
 363                return NULL;
 364
 365        policy_dbs->policy = policy;
 366        mutex_init(&policy_dbs->update_mutex);
 367        atomic_set(&policy_dbs->work_count, 0);
 368        init_irq_work(&policy_dbs->irq_work, dbs_irq_work);
 369        INIT_WORK(&policy_dbs->work, dbs_work_handler);
 370
 371        /* Set policy_dbs for all CPUs, online+offline */
 372        for_each_cpu(j, policy->related_cpus) {
 373                struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
 374
 375                j_cdbs->policy_dbs = policy_dbs;
 376        }
 377        return policy_dbs;
 378}
 379
 380static void free_policy_dbs_info(struct policy_dbs_info *policy_dbs,
 381                                 struct dbs_governor *gov)
 382{
 383        int j;
 384
 385        mutex_destroy(&policy_dbs->update_mutex);
 386
 387        for_each_cpu(j, policy_dbs->policy->related_cpus) {
 388                struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
 389
 390                j_cdbs->policy_dbs = NULL;
 391                j_cdbs->update_util.func = NULL;
 392        }
 393        gov->free(policy_dbs);
 394}
 395
 396int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
 397{
 398        struct dbs_governor *gov = dbs_governor_of(policy);
 399        struct dbs_data *dbs_data;
 400        struct policy_dbs_info *policy_dbs;
 401        int ret = 0;
 402
 403        /* State should be equivalent to EXIT */
 404        if (policy->governor_data)
 405                return -EBUSY;
 406
 407        policy_dbs = alloc_policy_dbs_info(policy, gov);
 408        if (!policy_dbs)
 409                return -ENOMEM;
 410
 411        /* Protect gov->gdbs_data against concurrent updates. */
 412        mutex_lock(&gov_dbs_data_mutex);
 413
 414        dbs_data = gov->gdbs_data;
 415        if (dbs_data) {
 416                if (WARN_ON(have_governor_per_policy())) {
 417                        ret = -EINVAL;
 418                        goto free_policy_dbs_info;
 419                }
 420                policy_dbs->dbs_data = dbs_data;
 421                policy->governor_data = policy_dbs;
 422
 423                gov_attr_set_get(&dbs_data->attr_set, &policy_dbs->list);
 424                goto out;
 425        }
 426
 427        dbs_data = kzalloc(sizeof(*dbs_data), GFP_KERNEL);
 428        if (!dbs_data) {
 429                ret = -ENOMEM;
 430                goto free_policy_dbs_info;
 431        }
 432
 433        gov_attr_set_init(&dbs_data->attr_set, &policy_dbs->list);
 434
 435        ret = gov->init(dbs_data);
 436        if (ret)
 437                goto free_policy_dbs_info;
 438
 439        /*
 440         * The sampling interval should not be less than the transition latency
 441         * of the CPU and it also cannot be too small for dbs_update() to work
 442         * correctly.
 443         */
 444        dbs_data->sampling_rate = max_t(unsigned int,
 445                                        CPUFREQ_DBS_MIN_SAMPLING_INTERVAL,
 446                                        cpufreq_policy_transition_delay_us(policy));
 447
 448        if (!have_governor_per_policy())
 449                gov->gdbs_data = dbs_data;
 450
 451        policy_dbs->dbs_data = dbs_data;
 452        policy->governor_data = policy_dbs;
 453
 454        gov->kobj_type.sysfs_ops = &governor_sysfs_ops;
 455        ret = kobject_init_and_add(&dbs_data->attr_set.kobj, &gov->kobj_type,
 456                                   get_governor_parent_kobj(policy),
 457                                   "%s", gov->gov.name);
 458        if (!ret)
 459                goto out;
 460
 461        /* Failure, so roll back. */
 462        pr_err("initialization failed (dbs_data kobject init error %d)\n", ret);
 463
 464        policy->governor_data = NULL;
 465
 466        if (!have_governor_per_policy())
 467                gov->gdbs_data = NULL;
 468        gov->exit(dbs_data);
 469        kfree(dbs_data);
 470
 471free_policy_dbs_info:
 472        free_policy_dbs_info(policy_dbs, gov);
 473
 474out:
 475        mutex_unlock(&gov_dbs_data_mutex);
 476        return ret;
 477}
 478EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_init);
 479
 480void cpufreq_dbs_governor_exit(struct cpufreq_policy *policy)
 481{
 482        struct dbs_governor *gov = dbs_governor_of(policy);
 483        struct policy_dbs_info *policy_dbs = policy->governor_data;
 484        struct dbs_data *dbs_data = policy_dbs->dbs_data;
 485        unsigned int count;
 486
 487        /* Protect gov->gdbs_data against concurrent updates. */
 488        mutex_lock(&gov_dbs_data_mutex);
 489
 490        count = gov_attr_set_put(&dbs_data->attr_set, &policy_dbs->list);
 491
 492        policy->governor_data = NULL;
 493
 494        if (!count) {
 495                if (!have_governor_per_policy())
 496                        gov->gdbs_data = NULL;
 497
 498                gov->exit(dbs_data);
 499                kfree(dbs_data);
 500        }
 501
 502        free_policy_dbs_info(policy_dbs, gov);
 503
 504        mutex_unlock(&gov_dbs_data_mutex);
 505}
 506EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_exit);
 507
 508int cpufreq_dbs_governor_start(struct cpufreq_policy *policy)
 509{
 510        struct dbs_governor *gov = dbs_governor_of(policy);
 511        struct policy_dbs_info *policy_dbs = policy->governor_data;
 512        struct dbs_data *dbs_data = policy_dbs->dbs_data;
 513        unsigned int sampling_rate, ignore_nice, j;
 514        unsigned int io_busy;
 515
 516        if (!policy->cur)
 517                return -EINVAL;
 518
 519        policy_dbs->is_shared = policy_is_shared(policy);
 520        policy_dbs->rate_mult = 1;
 521
 522        sampling_rate = dbs_data->sampling_rate;
 523        ignore_nice = dbs_data->ignore_nice_load;
 524        io_busy = dbs_data->io_is_busy;
 525
 526        for_each_cpu(j, policy->cpus) {
 527                struct cpu_dbs_info *j_cdbs = &per_cpu(cpu_dbs, j);
 528
 529                j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, &j_cdbs->prev_update_time, io_busy);
 530                /*
 531                 * Make the first invocation of dbs_update() compute the load.
 532                 */
 533                j_cdbs->prev_load = 0;
 534
 535                if (ignore_nice)
 536                        j_cdbs->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
 537        }
 538
 539        gov->start(policy);
 540
 541        gov_set_update_util(policy_dbs, sampling_rate);
 542        return 0;
 543}
 544EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_start);
 545
 546void cpufreq_dbs_governor_stop(struct cpufreq_policy *policy)
 547{
 548        struct policy_dbs_info *policy_dbs = policy->governor_data;
 549
 550        gov_clear_update_util(policy_dbs->policy);
 551        irq_work_sync(&policy_dbs->irq_work);
 552        cancel_work_sync(&policy_dbs->work);
 553        atomic_set(&policy_dbs->work_count, 0);
 554        policy_dbs->work_in_progress = false;
 555}
 556EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_stop);
 557
 558void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy)
 559{
 560        struct policy_dbs_info *policy_dbs = policy->governor_data;
 561
 562        mutex_lock(&policy_dbs->update_mutex);
 563        cpufreq_policy_apply_limits(policy);
 564        gov_update_sample_delay(policy_dbs, 0);
 565
 566        mutex_unlock(&policy_dbs->update_mutex);
 567}
 568EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_limits);
 569