linux/drivers/cpuidle/governors/menu.c
<<
>>
Prefs
   1/*
   2 * menu.c - the menu idle governor
   3 *
   4 * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
   5 * Copyright (C) 2009 Intel Corporation
   6 * Author:
   7 *        Arjan van de Ven <arjan@linux.intel.com>
   8 *
   9 * This code is licenced under the GPL version 2 as described
  10 * in the COPYING file that acompanies the Linux Kernel.
  11 */
  12
  13#include <linux/kernel.h>
  14#include <linux/cpuidle.h>
  15#include <linux/pm_qos.h>
  16#include <linux/time.h>
  17#include <linux/ktime.h>
  18#include <linux/hrtimer.h>
  19#include <linux/tick.h>
  20#include <linux/sched.h>
  21#include <linux/math64.h>
  22#include <linux/module.h>
  23
  24#define BUCKETS 12
  25#define INTERVALS 8
  26#define RESOLUTION 1024
  27#define DECAY 8
  28#define MAX_INTERESTING 50000
  29#define STDDEV_THRESH 400
  30
  31/* 60 * 60 > STDDEV_THRESH * INTERVALS = 400 * 8 */
  32#define MAX_DEVIATION 60
  33
  34static DEFINE_PER_CPU(struct hrtimer, menu_hrtimer);
  35static DEFINE_PER_CPU(int, hrtimer_status);
  36/* menu hrtimer mode */
  37enum {MENU_HRTIMER_STOP, MENU_HRTIMER_REPEAT, MENU_HRTIMER_GENERAL};
  38
  39/*
  40 * Concepts and ideas behind the menu governor
  41 *
  42 * For the menu governor, there are 3 decision factors for picking a C
  43 * state:
  44 * 1) Energy break even point
  45 * 2) Performance impact
  46 * 3) Latency tolerance (from pmqos infrastructure)
  47 * These these three factors are treated independently.
  48 *
  49 * Energy break even point
  50 * -----------------------
  51 * C state entry and exit have an energy cost, and a certain amount of time in
  52 * the  C state is required to actually break even on this cost. CPUIDLE
  53 * provides us this duration in the "target_residency" field. So all that we
  54 * need is a good prediction of how long we'll be idle. Like the traditional
  55 * menu governor, we start with the actual known "next timer event" time.
  56 *
  57 * Since there are other source of wakeups (interrupts for example) than
  58 * the next timer event, this estimation is rather optimistic. To get a
  59 * more realistic estimate, a correction factor is applied to the estimate,
  60 * that is based on historic behavior. For example, if in the past the actual
  61 * duration always was 50% of the next timer tick, the correction factor will
  62 * be 0.5.
  63 *
  64 * menu uses a running average for this correction factor, however it uses a
  65 * set of factors, not just a single factor. This stems from the realization
  66 * that the ratio is dependent on the order of magnitude of the expected
  67 * duration; if we expect 500 milliseconds of idle time the likelihood of
  68 * getting an interrupt very early is much higher than if we expect 50 micro
  69 * seconds of idle time. A second independent factor that has big impact on
  70 * the actual factor is if there is (disk) IO outstanding or not.
  71 * (as a special twist, we consider every sleep longer than 50 milliseconds
  72 * as perfect; there are no power gains for sleeping longer than this)
  73 *
  74 * For these two reasons we keep an array of 12 independent factors, that gets
  75 * indexed based on the magnitude of the expected duration as well as the
  76 * "is IO outstanding" property.
  77 *
  78 * Repeatable-interval-detector
  79 * ----------------------------
  80 * There are some cases where "next timer" is a completely unusable predictor:
  81 * Those cases where the interval is fixed, for example due to hardware
  82 * interrupt mitigation, but also due to fixed transfer rate devices such as
  83 * mice.
  84 * For this, we use a different predictor: We track the duration of the last 8
  85 * intervals and if the stand deviation of these 8 intervals is below a
  86 * threshold value, we use the average of these intervals as prediction.
  87 *
  88 * Limiting Performance Impact
  89 * ---------------------------
  90 * C states, especially those with large exit latencies, can have a real
  91 * noticeable impact on workloads, which is not acceptable for most sysadmins,
  92 * and in addition, less performance has a power price of its own.
  93 *
  94 * As a general rule of thumb, menu assumes that the following heuristic
  95 * holds:
  96 *     The busier the system, the less impact of C states is acceptable
  97 *
  98 * This rule-of-thumb is implemented using a performance-multiplier:
  99 * If the exit latency times the performance multiplier is longer than
 100 * the predicted duration, the C state is not considered a candidate
 101 * for selection due to a too high performance impact. So the higher
 102 * this multiplier is, the longer we need to be idle to pick a deep C
 103 * state, and thus the less likely a busy CPU will hit such a deep
 104 * C state.
 105 *
 106 * Two factors are used in determing this multiplier:
 107 * a value of 10 is added for each point of "per cpu load average" we have.
 108 * a value of 5 points is added for each process that is waiting for
 109 * IO on this CPU.
 110 * (these values are experimentally determined)
 111 *
 112 * The load average factor gives a longer term (few seconds) input to the
 113 * decision, while the iowait value gives a cpu local instantanious input.
 114 * The iowait factor may look low, but realize that this is also already
 115 * represented in the system load average.
 116 *
 117 */
 118
 119/*
 120 * The C-state residency is so long that is is worthwhile to exit
 121 * from the shallow C-state and re-enter into a deeper C-state.
 122 */
 123static unsigned int perfect_cstate_ms __read_mostly = 30;
 124module_param(perfect_cstate_ms, uint, 0000);
 125
 126struct menu_device {
 127        int             last_state_idx;
 128        int             needs_update;
 129
 130        unsigned int    expected_us;
 131        u64             predicted_us;
 132        unsigned int    exit_us;
 133        unsigned int    bucket;
 134        u64             correction_factor[BUCKETS];
 135        u32             intervals[INTERVALS];
 136        int             interval_ptr;
 137};
 138
 139
 140#define LOAD_INT(x) ((x) >> FSHIFT)
 141#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
 142
 143static int get_loadavg(void)
 144{
 145        unsigned long this = this_cpu_load();
 146
 147
 148        return LOAD_INT(this) * 10 + LOAD_FRAC(this) / 10;
 149}
 150
 151static inline int which_bucket(unsigned int duration)
 152{
 153        int bucket = 0;
 154
 155        /*
 156         * We keep two groups of stats; one with no
 157         * IO pending, one without.
 158         * This allows us to calculate
 159         * E(duration)|iowait
 160         */
 161        if (nr_iowait_cpu(smp_processor_id()))
 162                bucket = BUCKETS/2;
 163
 164        if (duration < 10)
 165                return bucket;
 166        if (duration < 100)
 167                return bucket + 1;
 168        if (duration < 1000)
 169                return bucket + 2;
 170        if (duration < 10000)
 171                return bucket + 3;
 172        if (duration < 100000)
 173                return bucket + 4;
 174        return bucket + 5;
 175}
 176
 177/*
 178 * Return a multiplier for the exit latency that is intended
 179 * to take performance requirements into account.
 180 * The more performance critical we estimate the system
 181 * to be, the higher this multiplier, and thus the higher
 182 * the barrier to go to an expensive C state.
 183 */
 184static inline int performance_multiplier(void)
 185{
 186        int mult = 1;
 187
 188        /* for higher loadavg, we are more reluctant */
 189
 190        mult += 2 * get_loadavg();
 191
 192        /* for IO wait tasks (per cpu!) we add 5x each */
 193        mult += 10 * nr_iowait_cpu(smp_processor_id());
 194
 195        return mult;
 196}
 197
 198static DEFINE_PER_CPU(struct menu_device, menu_devices);
 199
 200static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
 201
 202/* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */
 203static u64 div_round64(u64 dividend, u32 divisor)
 204{
 205        return div_u64(dividend + (divisor / 2), divisor);
 206}
 207
 208/* Cancel the hrtimer if it is not triggered yet */
 209void menu_hrtimer_cancel(void)
 210{
 211        int cpu = smp_processor_id();
 212        struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu);
 213
 214        /* The timer is still not time out*/
 215        if (per_cpu(hrtimer_status, cpu)) {
 216                hrtimer_cancel(hrtmr);
 217                per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_STOP;
 218        }
 219}
 220EXPORT_SYMBOL_GPL(menu_hrtimer_cancel);
 221
 222/* Call back for hrtimer is triggered */
 223static enum hrtimer_restart menu_hrtimer_notify(struct hrtimer *hrtimer)
 224{
 225        int cpu = smp_processor_id();
 226        struct menu_device *data = &per_cpu(menu_devices, cpu);
 227
 228        /* In general case, the expected residency is much larger than
 229         *  deepest C-state target residency, but prediction logic still
 230         *  predicts a small predicted residency, so the prediction
 231         *  history is totally broken if the timer is triggered.
 232         *  So reset the correction factor.
 233         */
 234        if (per_cpu(hrtimer_status, cpu) == MENU_HRTIMER_GENERAL)
 235                data->correction_factor[data->bucket] = RESOLUTION * DECAY;
 236
 237        per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_STOP;
 238
 239        return HRTIMER_NORESTART;
 240}
 241
 242/*
 243 * Try detecting repeating patterns by keeping track of the last 8
 244 * intervals, and checking if the standard deviation of that set
 245 * of points is below a threshold. If it is... then use the
 246 * average of these 8 points as the estimated value.
 247 */
 248static u32 get_typical_interval(struct menu_device *data)
 249{
 250        int i = 0, divisor = 0;
 251        uint64_t max = 0, avg = 0, stddev = 0;
 252        int64_t thresh = LLONG_MAX; /* Discard outliers above this value. */
 253        unsigned int ret = 0;
 254
 255again:
 256
 257        /* first calculate average and standard deviation of the past */
 258        max = avg = divisor = stddev = 0;
 259        for (i = 0; i < INTERVALS; i++) {
 260                int64_t value = data->intervals[i];
 261                if (value <= thresh) {
 262                        avg += value;
 263                        divisor++;
 264                        if (value > max)
 265                                max = value;
 266                }
 267        }
 268        do_div(avg, divisor);
 269
 270        for (i = 0; i < INTERVALS; i++) {
 271                int64_t value = data->intervals[i];
 272                if (value <= thresh) {
 273                        int64_t diff = value - avg;
 274                        stddev += diff * diff;
 275                }
 276        }
 277        do_div(stddev, divisor);
 278        stddev = int_sqrt(stddev);
 279        /*
 280         * If we have outliers to the upside in our distribution, discard
 281         * those by setting the threshold to exclude these outliers, then
 282         * calculate the average and standard deviation again. Once we get
 283         * down to the bottom 3/4 of our samples, stop excluding samples.
 284         *
 285         * This can deal with workloads that have long pauses interspersed
 286         * with sporadic activity with a bunch of short pauses.
 287         *
 288         * The typical interval is obtained when standard deviation is small
 289         * or standard deviation is small compared to the average interval.
 290         */
 291        if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3))
 292                                                        || stddev <= 20) {
 293                data->predicted_us = avg;
 294                ret = 1;
 295                return ret;
 296
 297        } else if ((divisor * 4) > INTERVALS * 3) {
 298                /* Exclude the max interval */
 299                thresh = max - 1;
 300                goto again;
 301        }
 302
 303        return ret;
 304}
 305
 306/**
 307 * menu_select - selects the next idle state to enter
 308 * @drv: cpuidle driver containing state data
 309 * @dev: the CPU
 310 */
 311static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
 312{
 313        struct menu_device *data = &__get_cpu_var(menu_devices);
 314        int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
 315        int i;
 316        int multiplier;
 317        struct timespec t;
 318        int repeat = 0, low_predicted = 0;
 319        int cpu = smp_processor_id();
 320        struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu);
 321
 322        if (data->needs_update) {
 323                menu_update(drv, dev);
 324                data->needs_update = 0;
 325        }
 326
 327        data->last_state_idx = 0;
 328        data->exit_us = 0;
 329
 330        /* Special case when user has set very strict latency requirement */
 331        if (unlikely(latency_req == 0))
 332                return 0;
 333
 334        /* determine the expected residency time, round up */
 335        t = ktime_to_timespec(tick_nohz_get_sleep_length());
 336        data->expected_us =
 337                t.tv_sec * USEC_PER_SEC + t.tv_nsec / NSEC_PER_USEC;
 338
 339
 340        data->bucket = which_bucket(data->expected_us);
 341
 342        multiplier = performance_multiplier();
 343
 344        /*
 345         * if the correction factor is 0 (eg first time init or cpu hotplug
 346         * etc), we actually want to start out with a unity factor.
 347         */
 348        if (data->correction_factor[data->bucket] == 0)
 349                data->correction_factor[data->bucket] = RESOLUTION * DECAY;
 350
 351        /* Make sure to round up for half microseconds */
 352        data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket],
 353                                         RESOLUTION * DECAY);
 354
 355        repeat = get_typical_interval(data);
 356
 357        /*
 358         * We want to default to C1 (hlt), not to busy polling
 359         * unless the timer is happening really really soon.
 360         */
 361        if (data->expected_us > 5 &&
 362            !drv->states[CPUIDLE_DRIVER_STATE_START].disabled &&
 363                dev->states_usage[CPUIDLE_DRIVER_STATE_START].disable == 0)
 364                data->last_state_idx = CPUIDLE_DRIVER_STATE_START;
 365
 366        /*
 367         * Find the idle state with the lowest power while satisfying
 368         * our constraints.
 369         */
 370        for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
 371                struct cpuidle_state *s = &drv->states[i];
 372                struct cpuidle_state_usage *su = &dev->states_usage[i];
 373
 374                if (s->disabled || su->disable)
 375                        continue;
 376                if (s->target_residency > data->predicted_us) {
 377                        low_predicted = 1;
 378                        continue;
 379                }
 380                if (s->exit_latency > latency_req)
 381                        continue;
 382                if (s->exit_latency * multiplier > data->predicted_us)
 383                        continue;
 384
 385                data->last_state_idx = i;
 386                data->exit_us = s->exit_latency;
 387        }
 388
 389        /* not deepest C-state chosen for low predicted residency */
 390        if (low_predicted) {
 391                unsigned int timer_us = 0;
 392                unsigned int perfect_us = 0;
 393
 394                /*
 395                 * Set a timer to detect whether this sleep is much
 396                 * longer than repeat mode predicted.  If the timer
 397                 * triggers, the code will evaluate whether to put
 398                 * the CPU into a deeper C-state.
 399                 * The timer is cancelled on CPU wakeup.
 400                 */
 401                timer_us = 2 * (data->predicted_us + MAX_DEVIATION);
 402
 403                perfect_us = perfect_cstate_ms * 1000;
 404
 405                if (repeat && (4 * timer_us < data->expected_us)) {
 406                        RCU_NONIDLE(hrtimer_start(hrtmr,
 407                                ns_to_ktime(1000 * timer_us),
 408                                HRTIMER_MODE_REL_PINNED));
 409                        /* In repeat case, menu hrtimer is started */
 410                        per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_REPEAT;
 411                } else if (perfect_us < data->expected_us) {
 412                        /*
 413                         * The next timer is long. This could be because
 414                         * we did not make a useful prediction.
 415                         * In that case, it makes sense to re-enter
 416                         * into a deeper C-state after some time.
 417                         */
 418                        RCU_NONIDLE(hrtimer_start(hrtmr,
 419                                ns_to_ktime(1000 * timer_us),
 420                                HRTIMER_MODE_REL_PINNED));
 421                        /* In general case, menu hrtimer is started */
 422                        per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_GENERAL;
 423                }
 424
 425        }
 426
 427        return data->last_state_idx;
 428}
 429
 430/**
 431 * menu_reflect - records that data structures need update
 432 * @dev: the CPU
 433 * @index: the index of actual entered state
 434 *
 435 * NOTE: it's important to be fast here because this operation will add to
 436 *       the overall exit latency.
 437 */
 438static void menu_reflect(struct cpuidle_device *dev, int index)
 439{
 440        struct menu_device *data = &__get_cpu_var(menu_devices);
 441        data->last_state_idx = index;
 442        if (index >= 0)
 443                data->needs_update = 1;
 444}
 445
 446/**
 447 * menu_update - attempts to guess what happened after entry
 448 * @drv: cpuidle driver containing state data
 449 * @dev: the CPU
 450 */
 451static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
 452{
 453        struct menu_device *data = &__get_cpu_var(menu_devices);
 454        int last_idx = data->last_state_idx;
 455        unsigned int last_idle_us = cpuidle_get_last_residency(dev);
 456        struct cpuidle_state *target = &drv->states[last_idx];
 457        unsigned int measured_us;
 458        u64 new_factor;
 459
 460        /*
 461         * Ugh, this idle state doesn't support residency measurements, so we
 462         * are basically lost in the dark.  As a compromise, assume we slept
 463         * for the whole expected time.
 464         */
 465        if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID)))
 466                last_idle_us = data->expected_us;
 467
 468
 469        measured_us = last_idle_us;
 470
 471        /*
 472         * We correct for the exit latency; we are assuming here that the
 473         * exit latency happens after the event that we're interested in.
 474         */
 475        if (measured_us > data->exit_us)
 476                measured_us -= data->exit_us;
 477
 478
 479        /* update our correction ratio */
 480
 481        new_factor = data->correction_factor[data->bucket]
 482                        * (DECAY - 1) / DECAY;
 483
 484        if (data->expected_us > 0 && measured_us < MAX_INTERESTING)
 485                new_factor += RESOLUTION * measured_us / data->expected_us;
 486        else
 487                /*
 488                 * we were idle so long that we count it as a perfect
 489                 * prediction
 490                 */
 491                new_factor += RESOLUTION;
 492
 493        /*
 494         * We don't want 0 as factor; we always want at least
 495         * a tiny bit of estimated time.
 496         */
 497        if (new_factor == 0)
 498                new_factor = 1;
 499
 500        data->correction_factor[data->bucket] = new_factor;
 501
 502        /* update the repeating-pattern data */
 503        data->intervals[data->interval_ptr++] = last_idle_us;
 504        if (data->interval_ptr >= INTERVALS)
 505                data->interval_ptr = 0;
 506}
 507
 508/**
 509 * menu_enable_device - scans a CPU's states and does setup
 510 * @drv: cpuidle driver
 511 * @dev: the CPU
 512 */
 513static int menu_enable_device(struct cpuidle_driver *drv,
 514                                struct cpuidle_device *dev)
 515{
 516        struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
 517        struct hrtimer *t = &per_cpu(menu_hrtimer, dev->cpu);
 518        hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 519        t->function = menu_hrtimer_notify;
 520
 521        memset(data, 0, sizeof(struct menu_device));
 522
 523        return 0;
 524}
 525
 526static struct cpuidle_governor menu_governor = {
 527        .name =         "menu",
 528        .rating =       20,
 529        .enable =       menu_enable_device,
 530        .select =       menu_select,
 531        .reflect =      menu_reflect,
 532        .owner =        THIS_MODULE,
 533};
 534
 535/**
 536 * init_menu - initializes the governor
 537 */
 538static int __init init_menu(void)
 539{
 540        return cpuidle_register_governor(&menu_governor);
 541}
 542
 543/**
 544 * exit_menu - exits the governor
 545 */
 546static void __exit exit_menu(void)
 547{
 548        cpuidle_unregister_governor(&menu_governor);
 549}
 550
 551MODULE_LICENSE("GPL");
 552module_init(init_menu);
 553module_exit(exit_menu);
 554