linux/drivers/cpufreq/cpufreq_ondemand.c
<<
>>
Prefs
   1/*
   2 *  drivers/cpufreq/cpufreq_ondemand.c
   3 *
   4 *  Copyright (C)  2001 Russell King
   5 *            (C)  2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
   6 *                      Jun Nakajima <jun.nakajima@intel.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 */
  12
  13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  14
  15#include <linux/cpu.h>
  16#include <linux/percpu-defs.h>
  17#include <linux/slab.h>
  18#include <linux/tick.h>
  19#include "cpufreq_governor.h"
  20
  21/* On-demand governor macros */
  22#define DEF_FREQUENCY_UP_THRESHOLD              (80)
  23#define DEF_SAMPLING_DOWN_FACTOR                (1)
  24#define MAX_SAMPLING_DOWN_FACTOR                (100000)
  25#define MICRO_FREQUENCY_UP_THRESHOLD            (95)
  26#define MICRO_FREQUENCY_MIN_SAMPLE_RATE         (10000)
  27#define MIN_FREQUENCY_UP_THRESHOLD              (11)
  28#define MAX_FREQUENCY_UP_THRESHOLD              (100)
  29
  30static DEFINE_PER_CPU(struct od_cpu_dbs_info_s, od_cpu_dbs_info);
  31
  32static struct od_ops od_ops;
  33
  34#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
  35static struct cpufreq_governor cpufreq_gov_ondemand;
  36#endif
  37
  38static unsigned int default_powersave_bias;
  39
  40static void ondemand_powersave_bias_init_cpu(int cpu)
  41{
  42        struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
  43
  44        dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
  45        dbs_info->freq_lo = 0;
  46}
  47
  48/*
  49 * Not all CPUs want IO time to be accounted as busy; this depends on how
  50 * efficient idling at a higher frequency/voltage is.
  51 * Pavel Machek says this is not so for various generations of AMD and old
  52 * Intel systems.
  53 * Mike Chan (android.com) claims this is also not true for ARM.
  54 * Because of this, whitelist specific known (series) of CPUs by default, and
  55 * leave all others up to the user.
  56 */
  57static int should_io_be_busy(void)
  58{
  59#if defined(CONFIG_X86)
  60        /*
  61         * For Intel, Core 2 (model 15) and later have an efficient idle.
  62         */
  63        if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
  64                        boot_cpu_data.x86 == 6 &&
  65                        boot_cpu_data.x86_model >= 15)
  66                return 1;
  67#endif
  68        return 0;
  69}
  70
  71/*
  72 * Find right freq to be set now with powersave_bias on.
  73 * Returns the freq_hi to be used right now and will set freq_hi_jiffies,
  74 * freq_lo, and freq_lo_jiffies in percpu area for averaging freqs.
  75 */
  76static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
  77                unsigned int freq_next, unsigned int relation)
  78{
  79        unsigned int freq_req, freq_reduc, freq_avg;
  80        unsigned int freq_hi, freq_lo;
  81        unsigned int index = 0;
  82        unsigned int jiffies_total, jiffies_hi, jiffies_lo;
  83        struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
  84                                                   policy->cpu);
  85        struct dbs_data *dbs_data = policy->governor_data;
  86        struct od_dbs_tuners *od_tuners = dbs_data->tuners;
  87
  88        if (!dbs_info->freq_table) {
  89                dbs_info->freq_lo = 0;
  90                dbs_info->freq_lo_jiffies = 0;
  91                return freq_next;
  92        }
  93
  94        cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next,
  95                        relation, &index);
  96        freq_req = dbs_info->freq_table[index].frequency;
  97        freq_reduc = freq_req * od_tuners->powersave_bias / 1000;
  98        freq_avg = freq_req - freq_reduc;
  99
 100        /* Find freq bounds for freq_avg in freq_table */
 101        index = 0;
 102        cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
 103                        CPUFREQ_RELATION_H, &index);
 104        freq_lo = dbs_info->freq_table[index].frequency;
 105        index = 0;
 106        cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg,
 107                        CPUFREQ_RELATION_L, &index);
 108        freq_hi = dbs_info->freq_table[index].frequency;
 109
 110        /* Find out how long we have to be in hi and lo freqs */
 111        if (freq_hi == freq_lo) {
 112                dbs_info->freq_lo = 0;
 113                dbs_info->freq_lo_jiffies = 0;
 114                return freq_lo;
 115        }
 116        jiffies_total = usecs_to_jiffies(od_tuners->sampling_rate);
 117        jiffies_hi = (freq_avg - freq_lo) * jiffies_total;
 118        jiffies_hi += ((freq_hi - freq_lo) / 2);
 119        jiffies_hi /= (freq_hi - freq_lo);
 120        jiffies_lo = jiffies_total - jiffies_hi;
 121        dbs_info->freq_lo = freq_lo;
 122        dbs_info->freq_lo_jiffies = jiffies_lo;
 123        dbs_info->freq_hi_jiffies = jiffies_hi;
 124        return freq_hi;
 125}
 126
 127static void ondemand_powersave_bias_init(void)
 128{
 129        int i;
 130        for_each_online_cpu(i) {
 131                ondemand_powersave_bias_init_cpu(i);
 132        }
 133}
 134
 135static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq)
 136{
 137        struct dbs_data *dbs_data = policy->governor_data;
 138        struct od_dbs_tuners *od_tuners = dbs_data->tuners;
 139
 140        if (od_tuners->powersave_bias)
 141                freq = od_ops.powersave_bias_target(policy, freq,
 142                                CPUFREQ_RELATION_H);
 143        else if (policy->cur == policy->max)
 144                return;
 145
 146        __cpufreq_driver_target(policy, freq, od_tuners->powersave_bias ?
 147                        CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
 148}
 149
 150/*
 151 * Every sampling_rate, we check, if current idle time is less than 20%
 152 * (default), then we try to increase frequency. Else, we adjust the frequency
 153 * proportional to load.
 154 */
 155static void od_check_cpu(int cpu, unsigned int load)
 156{
 157        struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
 158        struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
 159        struct dbs_data *dbs_data = policy->governor_data;
 160        struct od_dbs_tuners *od_tuners = dbs_data->tuners;
 161
 162        dbs_info->freq_lo = 0;
 163
 164        /* Check for frequency increase */
 165        if (load > od_tuners->up_threshold) {
 166                /* If switching to max speed, apply sampling_down_factor */
 167                if (policy->cur < policy->max)
 168                        dbs_info->rate_mult =
 169                                od_tuners->sampling_down_factor;
 170                dbs_freq_increase(policy, policy->max);
 171                return;
 172        } else {
 173                /* Calculate the next frequency proportional to load */
 174                unsigned int freq_next;
 175                freq_next = load * policy->cpuinfo.max_freq / 100;
 176
 177                /* No longer fully busy, reset rate_mult */
 178                dbs_info->rate_mult = 1;
 179
 180                if (!od_tuners->powersave_bias) {
 181                        __cpufreq_driver_target(policy, freq_next,
 182                                        CPUFREQ_RELATION_L);
 183                        return;
 184                }
 185
 186                freq_next = od_ops.powersave_bias_target(policy, freq_next,
 187                                        CPUFREQ_RELATION_L);
 188                __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L);
 189        }
 190}
 191
 192static void od_dbs_timer(struct work_struct *work)
 193{
 194        struct od_cpu_dbs_info_s *dbs_info =
 195                container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
 196        unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
 197        struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info,
 198                        cpu);
 199        struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
 200        struct od_dbs_tuners *od_tuners = dbs_data->tuners;
 201        int delay = 0, sample_type = core_dbs_info->sample_type;
 202        bool modify_all = true;
 203
 204        mutex_lock(&core_dbs_info->cdbs.timer_mutex);
 205        if (!need_load_eval(&core_dbs_info->cdbs, od_tuners->sampling_rate)) {
 206                modify_all = false;
 207                goto max_delay;
 208        }
 209
 210        /* Common NORMAL_SAMPLE setup */
 211        core_dbs_info->sample_type = OD_NORMAL_SAMPLE;
 212        if (sample_type == OD_SUB_SAMPLE) {
 213                delay = core_dbs_info->freq_lo_jiffies;
 214                __cpufreq_driver_target(core_dbs_info->cdbs.cur_policy,
 215                                core_dbs_info->freq_lo, CPUFREQ_RELATION_H);
 216        } else {
 217                dbs_check_cpu(dbs_data, cpu);
 218                if (core_dbs_info->freq_lo) {
 219                        /* Setup timer for SUB_SAMPLE */
 220                        core_dbs_info->sample_type = OD_SUB_SAMPLE;
 221                        delay = core_dbs_info->freq_hi_jiffies;
 222                }
 223        }
 224
 225max_delay:
 226        if (!delay)
 227                delay = delay_for_sampling_rate(od_tuners->sampling_rate
 228                                * core_dbs_info->rate_mult);
 229
 230        gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all);
 231        mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
 232}
 233
 234/************************** sysfs interface ************************/
 235static struct common_dbs_data od_dbs_cdata;
 236
 237/**
 238 * update_sampling_rate - update sampling rate effective immediately if needed.
 239 * @new_rate: new sampling rate
 240 *
 241 * If new rate is smaller than the old, simply updating
 242 * dbs_tuners_int.sampling_rate might not be appropriate. For example, if the
 243 * original sampling_rate was 1 second and the requested new sampling rate is 10
 244 * ms because the user needs immediate reaction from ondemand governor, but not
 245 * sure if higher frequency will be required or not, then, the governor may
 246 * change the sampling rate too late; up to 1 second later. Thus, if we are
 247 * reducing the sampling rate, we need to make the new value effective
 248 * immediately.
 249 */
 250static void update_sampling_rate(struct dbs_data *dbs_data,
 251                unsigned int new_rate)
 252{
 253        struct od_dbs_tuners *od_tuners = dbs_data->tuners;
 254        int cpu;
 255
 256        od_tuners->sampling_rate = new_rate = max(new_rate,
 257                        dbs_data->min_sampling_rate);
 258
 259        for_each_online_cpu(cpu) {
 260                struct cpufreq_policy *policy;
 261                struct od_cpu_dbs_info_s *dbs_info;
 262                unsigned long next_sampling, appointed_at;
 263
 264                policy = cpufreq_cpu_get(cpu);
 265                if (!policy)
 266                        continue;
 267                if (policy->governor != &cpufreq_gov_ondemand) {
 268                        cpufreq_cpu_put(policy);
 269                        continue;
 270                }
 271                dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
 272                cpufreq_cpu_put(policy);
 273
 274                mutex_lock(&dbs_info->cdbs.timer_mutex);
 275
 276                if (!delayed_work_pending(&dbs_info->cdbs.work)) {
 277                        mutex_unlock(&dbs_info->cdbs.timer_mutex);
 278                        continue;
 279                }
 280
 281                next_sampling = jiffies + usecs_to_jiffies(new_rate);
 282                appointed_at = dbs_info->cdbs.work.timer.expires;
 283
 284                if (time_before(next_sampling, appointed_at)) {
 285
 286                        mutex_unlock(&dbs_info->cdbs.timer_mutex);
 287                        cancel_delayed_work_sync(&dbs_info->cdbs.work);
 288                        mutex_lock(&dbs_info->cdbs.timer_mutex);
 289
 290                        gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy,
 291                                        usecs_to_jiffies(new_rate), true);
 292
 293                }
 294                mutex_unlock(&dbs_info->cdbs.timer_mutex);
 295        }
 296}
 297
 298static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
 299                size_t count)
 300{
 301        unsigned int input;
 302        int ret;
 303        ret = sscanf(buf, "%u", &input);
 304        if (ret != 1)
 305                return -EINVAL;
 306
 307        update_sampling_rate(dbs_data, input);
 308        return count;
 309}
 310
 311static ssize_t store_io_is_busy(struct dbs_data *dbs_data, const char *buf,
 312                size_t count)
 313{
 314        struct od_dbs_tuners *od_tuners = dbs_data->tuners;
 315        unsigned int input;
 316        int ret;
 317        unsigned int j;
 318
 319        ret = sscanf(buf, "%u", &input);
 320        if (ret != 1)
 321                return -EINVAL;
 322        od_tuners->io_is_busy = !!input;
 323
 324        /* we need to re-evaluate prev_cpu_idle */
 325        for_each_online_cpu(j) {
 326                struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
 327                                                                        j);
 328                dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
 329                        &dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy);
 330        }
 331        return count;
 332}
 333
 334static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
 335                size_t count)
 336{
 337        struct od_dbs_tuners *od_tuners = dbs_data->tuners;
 338        unsigned int input;
 339        int ret;
 340        ret = sscanf(buf, "%u", &input);
 341
 342        if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
 343                        input < MIN_FREQUENCY_UP_THRESHOLD) {
 344                return -EINVAL;
 345        }
 346
 347        od_tuners->up_threshold = input;
 348        return count;
 349}
 350
 351static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
 352                const char *buf, size_t count)
 353{
 354        struct od_dbs_tuners *od_tuners = dbs_data->tuners;
 355        unsigned int input, j;
 356        int ret;
 357        ret = sscanf(buf, "%u", &input);
 358
 359        if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
 360                return -EINVAL;
 361        od_tuners->sampling_down_factor = input;
 362
 363        /* Reset down sampling multiplier in case it was active */
 364        for_each_online_cpu(j) {
 365                struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
 366                                j);
 367                dbs_info->rate_mult = 1;
 368        }
 369        return count;
 370}
 371
 372static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
 373                const char *buf, size_t count)
 374{
 375        struct od_dbs_tuners *od_tuners = dbs_data->tuners;
 376        unsigned int input;
 377        int ret;
 378
 379        unsigned int j;
 380
 381        ret = sscanf(buf, "%u", &input);
 382        if (ret != 1)
 383                return -EINVAL;
 384
 385        if (input > 1)
 386                input = 1;
 387
 388        if (input == od_tuners->ignore_nice_load) { /* nothing to do */
 389                return count;
 390        }
 391        od_tuners->ignore_nice_load = input;
 392
 393        /* we need to re-evaluate prev_cpu_idle */
 394        for_each_online_cpu(j) {
 395                struct od_cpu_dbs_info_s *dbs_info;
 396                dbs_info = &per_cpu(od_cpu_dbs_info, j);
 397                dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
 398                        &dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy);
 399                if (od_tuners->ignore_nice_load)
 400                        dbs_info->cdbs.prev_cpu_nice =
 401                                kcpustat_cpu(j).cpustat[CPUTIME_NICE];
 402
 403        }
 404        return count;
 405}
 406
 407static ssize_t store_powersave_bias(struct dbs_data *dbs_data, const char *buf,
 408                size_t count)
 409{
 410        struct od_dbs_tuners *od_tuners = dbs_data->tuners;
 411        unsigned int input;
 412        int ret;
 413        ret = sscanf(buf, "%u", &input);
 414
 415        if (ret != 1)
 416                return -EINVAL;
 417
 418        if (input > 1000)
 419                input = 1000;
 420
 421        od_tuners->powersave_bias = input;
 422        ondemand_powersave_bias_init();
 423        return count;
 424}
 425
 426show_store_one(od, sampling_rate);
 427show_store_one(od, io_is_busy);
 428show_store_one(od, up_threshold);
 429show_store_one(od, sampling_down_factor);
 430show_store_one(od, ignore_nice_load);
 431show_store_one(od, powersave_bias);
 432declare_show_sampling_rate_min(od);
 433
 434gov_sys_pol_attr_rw(sampling_rate);
 435gov_sys_pol_attr_rw(io_is_busy);
 436gov_sys_pol_attr_rw(up_threshold);
 437gov_sys_pol_attr_rw(sampling_down_factor);
 438gov_sys_pol_attr_rw(ignore_nice_load);
 439gov_sys_pol_attr_rw(powersave_bias);
 440gov_sys_pol_attr_ro(sampling_rate_min);
 441
 442static struct attribute *dbs_attributes_gov_sys[] = {
 443        &sampling_rate_min_gov_sys.attr,
 444        &sampling_rate_gov_sys.attr,
 445        &up_threshold_gov_sys.attr,
 446        &sampling_down_factor_gov_sys.attr,
 447        &ignore_nice_load_gov_sys.attr,
 448        &powersave_bias_gov_sys.attr,
 449        &io_is_busy_gov_sys.attr,
 450        NULL
 451};
 452
 453static struct attribute_group od_attr_group_gov_sys = {
 454        .attrs = dbs_attributes_gov_sys,
 455        .name = "ondemand",
 456};
 457
 458static struct attribute *dbs_attributes_gov_pol[] = {
 459        &sampling_rate_min_gov_pol.attr,
 460        &sampling_rate_gov_pol.attr,
 461        &up_threshold_gov_pol.attr,
 462        &sampling_down_factor_gov_pol.attr,
 463        &ignore_nice_load_gov_pol.attr,
 464        &powersave_bias_gov_pol.attr,
 465        &io_is_busy_gov_pol.attr,
 466        NULL
 467};
 468
 469static struct attribute_group od_attr_group_gov_pol = {
 470        .attrs = dbs_attributes_gov_pol,
 471        .name = "ondemand",
 472};
 473
 474/************************** sysfs end ************************/
 475
 476static int od_init(struct dbs_data *dbs_data)
 477{
 478        struct od_dbs_tuners *tuners;
 479        u64 idle_time;
 480        int cpu;
 481
 482        tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
 483        if (!tuners) {
 484                pr_err("%s: kzalloc failed\n", __func__);
 485                return -ENOMEM;
 486        }
 487
 488        cpu = get_cpu();
 489        idle_time = get_cpu_idle_time_us(cpu, NULL);
 490        put_cpu();
 491        if (idle_time != -1ULL) {
 492                /* Idle micro accounting is supported. Use finer thresholds */
 493                tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
 494                /*
 495                 * In nohz/micro accounting case we set the minimum frequency
 496                 * not depending on HZ, but fixed (very low). The deferred
 497                 * timer might skip some samples if idle/sleeping as needed.
 498                */
 499                dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
 500        } else {
 501                tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
 502
 503                /* For correct statistics, we need 10 ticks for each measure */
 504                dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
 505                        jiffies_to_usecs(10);
 506        }
 507
 508        tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
 509        tuners->ignore_nice_load = 0;
 510        tuners->powersave_bias = default_powersave_bias;
 511        tuners->io_is_busy = should_io_be_busy();
 512
 513        dbs_data->tuners = tuners;
 514        mutex_init(&dbs_data->mutex);
 515        return 0;
 516}
 517
 518static void od_exit(struct dbs_data *dbs_data)
 519{
 520        kfree(dbs_data->tuners);
 521}
 522
 523define_get_cpu_dbs_routines(od_cpu_dbs_info);
 524
 525static struct od_ops od_ops = {
 526        .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
 527        .powersave_bias_target = generic_powersave_bias_target,
 528        .freq_increase = dbs_freq_increase,
 529};
 530
 531static struct common_dbs_data od_dbs_cdata = {
 532        .governor = GOV_ONDEMAND,
 533        .attr_group_gov_sys = &od_attr_group_gov_sys,
 534        .attr_group_gov_pol = &od_attr_group_gov_pol,
 535        .get_cpu_cdbs = get_cpu_cdbs,
 536        .get_cpu_dbs_info_s = get_cpu_dbs_info_s,
 537        .gov_dbs_timer = od_dbs_timer,
 538        .gov_check_cpu = od_check_cpu,
 539        .gov_ops = &od_ops,
 540        .init = od_init,
 541        .exit = od_exit,
 542};
 543
 544static void od_set_powersave_bias(unsigned int powersave_bias)
 545{
 546        struct cpufreq_policy *policy;
 547        struct dbs_data *dbs_data;
 548        struct od_dbs_tuners *od_tuners;
 549        unsigned int cpu;
 550        cpumask_t done;
 551
 552        default_powersave_bias = powersave_bias;
 553        cpumask_clear(&done);
 554
 555        get_online_cpus();
 556        for_each_online_cpu(cpu) {
 557                if (cpumask_test_cpu(cpu, &done))
 558                        continue;
 559
 560                policy = per_cpu(od_cpu_dbs_info, cpu).cdbs.cur_policy;
 561                if (!policy)
 562                        continue;
 563
 564                cpumask_or(&done, &done, policy->cpus);
 565
 566                if (policy->governor != &cpufreq_gov_ondemand)
 567                        continue;
 568
 569                dbs_data = policy->governor_data;
 570                od_tuners = dbs_data->tuners;
 571                od_tuners->powersave_bias = default_powersave_bias;
 572        }
 573        put_online_cpus();
 574}
 575
 576void od_register_powersave_bias_handler(unsigned int (*f)
 577                (struct cpufreq_policy *, unsigned int, unsigned int),
 578                unsigned int powersave_bias)
 579{
 580        od_ops.powersave_bias_target = f;
 581        od_set_powersave_bias(powersave_bias);
 582}
 583EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
 584
 585void od_unregister_powersave_bias_handler(void)
 586{
 587        od_ops.powersave_bias_target = generic_powersave_bias_target;
 588        od_set_powersave_bias(0);
 589}
 590EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
 591
 592static int od_cpufreq_governor_dbs(struct cpufreq_policy *policy,
 593                unsigned int event)
 594{
 595        return cpufreq_governor_dbs(policy, &od_dbs_cdata, event);
 596}
 597
 598#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
 599static
 600#endif
 601struct cpufreq_governor cpufreq_gov_ondemand = {
 602        .name                   = "ondemand",
 603        .governor               = od_cpufreq_governor_dbs,
 604        .max_transition_latency = TRANSITION_LATENCY_LIMIT,
 605        .owner                  = THIS_MODULE,
 606};
 607
 608static int __init cpufreq_gov_dbs_init(void)
 609{
 610        return cpufreq_register_governor(&cpufreq_gov_ondemand);
 611}
 612
 613static void __exit cpufreq_gov_dbs_exit(void)
 614{
 615        cpufreq_unregister_governor(&cpufreq_gov_ondemand);
 616}
 617
 618MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
 619MODULE_AUTHOR("Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>");
 620MODULE_DESCRIPTION("'cpufreq_ondemand' - A dynamic cpufreq governor for "
 621        "Low Latency Frequency Transition capable processors");
 622MODULE_LICENSE("GPL");
 623
 624#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
 625fs_initcall(cpufreq_gov_dbs_init);
 626#else
 627module_init(cpufreq_gov_dbs_init);
 628#endif
 629module_exit(cpufreq_gov_dbs_exit);
 630