linux/drivers/cpufreq/cpufreq.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/drivers/cpufreq/cpufreq.c
   4 *
   5 *  Copyright (C) 2001 Russell King
   6 *            (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
   7 *            (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
   8 *
   9 *  Oct 2005 - Ashok Raj <ashok.raj@intel.com>
  10 *      Added handling for CPU hotplug
  11 *  Feb 2006 - Jacob Shin <jacob.shin@amd.com>
  12 *      Fix handling for CPU hotplug -- affected CPUs
  13 */
  14
  15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  16
  17#include <linux/cpu.h>
  18#include <linux/cpufreq.h>
  19#include <linux/cpu_cooling.h>
  20#include <linux/delay.h>
  21#include <linux/device.h>
  22#include <linux/init.h>
  23#include <linux/kernel_stat.h>
  24#include <linux/module.h>
  25#include <linux/mutex.h>
  26#include <linux/pm_qos.h>
  27#include <linux/slab.h>
  28#include <linux/suspend.h>
  29#include <linux/syscore_ops.h>
  30#include <linux/tick.h>
  31#include <trace/events/power.h>
  32
  33static LIST_HEAD(cpufreq_policy_list);
  34
  35/* Macros to iterate over CPU policies */
  36#define for_each_suitable_policy(__policy, __active)                     \
  37        list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
  38                if ((__active) == !policy_is_inactive(__policy))
  39
  40#define for_each_active_policy(__policy)                \
  41        for_each_suitable_policy(__policy, true)
  42#define for_each_inactive_policy(__policy)              \
  43        for_each_suitable_policy(__policy, false)
  44
  45#define for_each_policy(__policy)                       \
  46        list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
  47
  48/* Iterate over governors */
  49static LIST_HEAD(cpufreq_governor_list);
  50#define for_each_governor(__governor)                           \
  51        list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
  52
  53/**
  54 * The "cpufreq driver" - the arch- or hardware-dependent low
  55 * level driver of CPUFreq support, and its spinlock. This lock
  56 * also protects the cpufreq_cpu_data array.
  57 */
  58static struct cpufreq_driver *cpufreq_driver;
  59static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
  60static DEFINE_RWLOCK(cpufreq_driver_lock);
  61
  62/* Flag to suspend/resume CPUFreq governors */
  63static bool cpufreq_suspended;
  64
  65static inline bool has_target(void)
  66{
  67        return cpufreq_driver->target_index || cpufreq_driver->target;
  68}
  69
  70/* internal prototypes */
  71static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
  72static int cpufreq_init_governor(struct cpufreq_policy *policy);
  73static void cpufreq_exit_governor(struct cpufreq_policy *policy);
  74static int cpufreq_start_governor(struct cpufreq_policy *policy);
  75static void cpufreq_stop_governor(struct cpufreq_policy *policy);
  76static void cpufreq_governor_limits(struct cpufreq_policy *policy);
  77
  78/**
  79 * Two notifier lists: the "policy" list is involved in the
  80 * validation process for a new CPU frequency policy; the
  81 * "transition" list for kernel code that needs to handle
  82 * changes to devices when the CPU clock speed changes.
  83 * The mutex locks both lists.
  84 */
  85static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
  86SRCU_NOTIFIER_HEAD_STATIC(cpufreq_transition_notifier_list);
  87
  88static int off __read_mostly;
  89static int cpufreq_disabled(void)
  90{
  91        return off;
  92}
  93void disable_cpufreq(void)
  94{
  95        off = 1;
  96}
  97static DEFINE_MUTEX(cpufreq_governor_mutex);
  98
  99bool have_governor_per_policy(void)
 100{
 101        return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
 102}
 103EXPORT_SYMBOL_GPL(have_governor_per_policy);
 104
 105struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
 106{
 107        if (have_governor_per_policy())
 108                return &policy->kobj;
 109        else
 110                return cpufreq_global_kobject;
 111}
 112EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
 113
 114static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
 115{
 116        u64 idle_time;
 117        u64 cur_wall_time;
 118        u64 busy_time;
 119
 120        cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
 121
 122        busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
 123        busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
 124        busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
 125        busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
 126        busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
 127        busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
 128
 129        idle_time = cur_wall_time - busy_time;
 130        if (wall)
 131                *wall = div_u64(cur_wall_time, NSEC_PER_USEC);
 132
 133        return div_u64(idle_time, NSEC_PER_USEC);
 134}
 135
 136u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
 137{
 138        u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
 139
 140        if (idle_time == -1ULL)
 141                return get_cpu_idle_time_jiffy(cpu, wall);
 142        else if (!io_busy)
 143                idle_time += get_cpu_iowait_time_us(cpu, wall);
 144
 145        return idle_time;
 146}
 147EXPORT_SYMBOL_GPL(get_cpu_idle_time);
 148
 149__weak void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq,
 150                unsigned long max_freq)
 151{
 152}
 153EXPORT_SYMBOL_GPL(arch_set_freq_scale);
 154
 155/*
 156 * This is a generic cpufreq init() routine which can be used by cpufreq
 157 * drivers of SMP systems. It will do following:
 158 * - validate & show freq table passed
 159 * - set policies transition latency
 160 * - policy->cpus with all possible CPUs
 161 */
 162void cpufreq_generic_init(struct cpufreq_policy *policy,
 163                struct cpufreq_frequency_table *table,
 164                unsigned int transition_latency)
 165{
 166        policy->freq_table = table;
 167        policy->cpuinfo.transition_latency = transition_latency;
 168
 169        /*
 170         * The driver only supports the SMP configuration where all processors
 171         * share the clock and voltage and clock.
 172         */
 173        cpumask_setall(policy->cpus);
 174}
 175EXPORT_SYMBOL_GPL(cpufreq_generic_init);
 176
 177struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
 178{
 179        struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
 180
 181        return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
 182}
 183EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
 184
 185unsigned int cpufreq_generic_get(unsigned int cpu)
 186{
 187        struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
 188
 189        if (!policy || IS_ERR(policy->clk)) {
 190                pr_err("%s: No %s associated to cpu: %d\n",
 191                       __func__, policy ? "clk" : "policy", cpu);
 192                return 0;
 193        }
 194
 195        return clk_get_rate(policy->clk) / 1000;
 196}
 197EXPORT_SYMBOL_GPL(cpufreq_generic_get);
 198
 199/**
 200 * cpufreq_cpu_get - Return policy for a CPU and mark it as busy.
 201 * @cpu: CPU to find the policy for.
 202 *
 203 * Call cpufreq_cpu_get_raw() to obtain a cpufreq policy for @cpu and increment
 204 * the kobject reference counter of that policy.  Return a valid policy on
 205 * success or NULL on failure.
 206 *
 207 * The policy returned by this function has to be released with the help of
 208 * cpufreq_cpu_put() to balance its kobject reference counter properly.
 209 */
 210struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
 211{
 212        struct cpufreq_policy *policy = NULL;
 213        unsigned long flags;
 214
 215        if (WARN_ON(cpu >= nr_cpu_ids))
 216                return NULL;
 217
 218        /* get the cpufreq driver */
 219        read_lock_irqsave(&cpufreq_driver_lock, flags);
 220
 221        if (cpufreq_driver) {
 222                /* get the CPU */
 223                policy = cpufreq_cpu_get_raw(cpu);
 224                if (policy)
 225                        kobject_get(&policy->kobj);
 226        }
 227
 228        read_unlock_irqrestore(&cpufreq_driver_lock, flags);
 229
 230        return policy;
 231}
 232EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
 233
 234/**
 235 * cpufreq_cpu_put - Decrement kobject usage counter for cpufreq policy.
 236 * @policy: cpufreq policy returned by cpufreq_cpu_get().
 237 */
 238void cpufreq_cpu_put(struct cpufreq_policy *policy)
 239{
 240        kobject_put(&policy->kobj);
 241}
 242EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
 243
 244/**
 245 * cpufreq_cpu_release - Unlock a policy and decrement its usage counter.
 246 * @policy: cpufreq policy returned by cpufreq_cpu_acquire().
 247 */
 248void cpufreq_cpu_release(struct cpufreq_policy *policy)
 249{
 250        if (WARN_ON(!policy))
 251                return;
 252
 253        lockdep_assert_held(&policy->rwsem);
 254
 255        up_write(&policy->rwsem);
 256
 257        cpufreq_cpu_put(policy);
 258}
 259
 260/**
 261 * cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it.
 262 * @cpu: CPU to find the policy for.
 263 *
 264 * Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and
 265 * if the policy returned by it is not NULL, acquire its rwsem for writing.
 266 * Return the policy if it is active or release it and return NULL otherwise.
 267 *
 268 * The policy returned by this function has to be released with the help of
 269 * cpufreq_cpu_release() in order to release its rwsem and balance its usage
 270 * counter properly.
 271 */
 272struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
 273{
 274        struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
 275
 276        if (!policy)
 277                return NULL;
 278
 279        down_write(&policy->rwsem);
 280
 281        if (policy_is_inactive(policy)) {
 282                cpufreq_cpu_release(policy);
 283                return NULL;
 284        }
 285
 286        return policy;
 287}
 288
 289/*********************************************************************
 290 *            EXTERNALLY AFFECTING FREQUENCY CHANGES                 *
 291 *********************************************************************/
 292
 293/**
 294 * adjust_jiffies - adjust the system "loops_per_jiffy"
 295 *
 296 * This function alters the system "loops_per_jiffy" for the clock
 297 * speed change. Note that loops_per_jiffy cannot be updated on SMP
 298 * systems as each CPU might be scaled differently. So, use the arch
 299 * per-CPU loops_per_jiffy value wherever possible.
 300 */
 301static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
 302{
 303#ifndef CONFIG_SMP
 304        static unsigned long l_p_j_ref;
 305        static unsigned int l_p_j_ref_freq;
 306
 307        if (ci->flags & CPUFREQ_CONST_LOOPS)
 308                return;
 309
 310        if (!l_p_j_ref_freq) {
 311                l_p_j_ref = loops_per_jiffy;
 312                l_p_j_ref_freq = ci->old;
 313                pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
 314                         l_p_j_ref, l_p_j_ref_freq);
 315        }
 316        if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
 317                loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
 318                                                                ci->new);
 319                pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
 320                         loops_per_jiffy, ci->new);
 321        }
 322#endif
 323}
 324
 325/**
 326 * cpufreq_notify_transition - Notify frequency transition and adjust_jiffies.
 327 * @policy: cpufreq policy to enable fast frequency switching for.
 328 * @freqs: contain details of the frequency update.
 329 * @state: set to CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
 330 *
 331 * This function calls the transition notifiers and the "adjust_jiffies"
 332 * function. It is called twice on all CPU frequency changes that have
 333 * external effects.
 334 */
 335static void cpufreq_notify_transition(struct cpufreq_policy *policy,
 336                                      struct cpufreq_freqs *freqs,
 337                                      unsigned int state)
 338{
 339        int cpu;
 340
 341        BUG_ON(irqs_disabled());
 342
 343        if (cpufreq_disabled())
 344                return;
 345
 346        freqs->policy = policy;
 347        freqs->flags = cpufreq_driver->flags;
 348        pr_debug("notification %u of frequency transition to %u kHz\n",
 349                 state, freqs->new);
 350
 351        switch (state) {
 352        case CPUFREQ_PRECHANGE:
 353                /*
 354                 * Detect if the driver reported a value as "old frequency"
 355                 * which is not equal to what the cpufreq core thinks is
 356                 * "old frequency".
 357                 */
 358                if (policy->cur && policy->cur != freqs->old) {
 359                        pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
 360                                 freqs->old, policy->cur);
 361                        freqs->old = policy->cur;
 362                }
 363
 364                srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
 365                                         CPUFREQ_PRECHANGE, freqs);
 366
 367                adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
 368                break;
 369
 370        case CPUFREQ_POSTCHANGE:
 371                adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
 372                pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new,
 373                         cpumask_pr_args(policy->cpus));
 374
 375                for_each_cpu(cpu, policy->cpus)
 376                        trace_cpu_frequency(freqs->new, cpu);
 377
 378                srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
 379                                         CPUFREQ_POSTCHANGE, freqs);
 380
 381                cpufreq_stats_record_transition(policy, freqs->new);
 382                policy->cur = freqs->new;
 383        }
 384}
 385
 386/* Do post notifications when there are chances that transition has failed */
 387static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
 388                struct cpufreq_freqs *freqs, int transition_failed)
 389{
 390        cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
 391        if (!transition_failed)
 392                return;
 393
 394        swap(freqs->old, freqs->new);
 395        cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
 396        cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
 397}
 398
 399void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
 400                struct cpufreq_freqs *freqs)
 401{
 402
 403        /*
 404         * Catch double invocations of _begin() which lead to self-deadlock.
 405         * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
 406         * doesn't invoke _begin() on their behalf, and hence the chances of
 407         * double invocations are very low. Moreover, there are scenarios
 408         * where these checks can emit false-positive warnings in these
 409         * drivers; so we avoid that by skipping them altogether.
 410         */
 411        WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
 412                                && current == policy->transition_task);
 413
 414wait:
 415        wait_event(policy->transition_wait, !policy->transition_ongoing);
 416
 417        spin_lock(&policy->transition_lock);
 418
 419        if (unlikely(policy->transition_ongoing)) {
 420                spin_unlock(&policy->transition_lock);
 421                goto wait;
 422        }
 423
 424        policy->transition_ongoing = true;
 425        policy->transition_task = current;
 426
 427        spin_unlock(&policy->transition_lock);
 428
 429        cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
 430}
 431EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
 432
 433void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
 434                struct cpufreq_freqs *freqs, int transition_failed)
 435{
 436        if (WARN_ON(!policy->transition_ongoing))
 437                return;
 438
 439        cpufreq_notify_post_transition(policy, freqs, transition_failed);
 440
 441        policy->transition_ongoing = false;
 442        policy->transition_task = NULL;
 443
 444        wake_up(&policy->transition_wait);
 445}
 446EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
 447
 448/*
 449 * Fast frequency switching status count.  Positive means "enabled", negative
 450 * means "disabled" and 0 means "not decided yet".
 451 */
 452static int cpufreq_fast_switch_count;
 453static DEFINE_MUTEX(cpufreq_fast_switch_lock);
 454
 455static void cpufreq_list_transition_notifiers(void)
 456{
 457        struct notifier_block *nb;
 458
 459        pr_info("Registered transition notifiers:\n");
 460
 461        mutex_lock(&cpufreq_transition_notifier_list.mutex);
 462
 463        for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
 464                pr_info("%pS\n", nb->notifier_call);
 465
 466        mutex_unlock(&cpufreq_transition_notifier_list.mutex);
 467}
 468
 469/**
 470 * cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
 471 * @policy: cpufreq policy to enable fast frequency switching for.
 472 *
 473 * Try to enable fast frequency switching for @policy.
 474 *
 475 * The attempt will fail if there is at least one transition notifier registered
 476 * at this point, as fast frequency switching is quite fundamentally at odds
 477 * with transition notifiers.  Thus if successful, it will make registration of
 478 * transition notifiers fail going forward.
 479 */
 480void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
 481{
 482        lockdep_assert_held(&policy->rwsem);
 483
 484        if (!policy->fast_switch_possible)
 485                return;
 486
 487        mutex_lock(&cpufreq_fast_switch_lock);
 488        if (cpufreq_fast_switch_count >= 0) {
 489                cpufreq_fast_switch_count++;
 490                policy->fast_switch_enabled = true;
 491        } else {
 492                pr_warn("CPU%u: Fast frequency switching not enabled\n",
 493                        policy->cpu);
 494                cpufreq_list_transition_notifiers();
 495        }
 496        mutex_unlock(&cpufreq_fast_switch_lock);
 497}
 498EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
 499
 500/**
 501 * cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
 502 * @policy: cpufreq policy to disable fast frequency switching for.
 503 */
 504void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
 505{
 506        mutex_lock(&cpufreq_fast_switch_lock);
 507        if (policy->fast_switch_enabled) {
 508                policy->fast_switch_enabled = false;
 509                if (!WARN_ON(cpufreq_fast_switch_count <= 0))
 510                        cpufreq_fast_switch_count--;
 511        }
 512        mutex_unlock(&cpufreq_fast_switch_lock);
 513}
 514EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
 515
 516/**
 517 * cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
 518 * one.
 519 * @target_freq: target frequency to resolve.
 520 *
 521 * The target to driver frequency mapping is cached in the policy.
 522 *
 523 * Return: Lowest driver-supported frequency greater than or equal to the
 524 * given target_freq, subject to policy (min/max) and driver limitations.
 525 */
 526unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
 527                                         unsigned int target_freq)
 528{
 529        target_freq = clamp_val(target_freq, policy->min, policy->max);
 530        policy->cached_target_freq = target_freq;
 531
 532        if (cpufreq_driver->target_index) {
 533                int idx;
 534
 535                idx = cpufreq_frequency_table_target(policy, target_freq,
 536                                                     CPUFREQ_RELATION_L);
 537                policy->cached_resolved_idx = idx;
 538                return policy->freq_table[idx].frequency;
 539        }
 540
 541        if (cpufreq_driver->resolve_freq)
 542                return cpufreq_driver->resolve_freq(policy, target_freq);
 543
 544        return target_freq;
 545}
 546EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
 547
 548unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy)
 549{
 550        unsigned int latency;
 551
 552        if (policy->transition_delay_us)
 553                return policy->transition_delay_us;
 554
 555        latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
 556        if (latency) {
 557                /*
 558                 * For platforms that can change the frequency very fast (< 10
 559                 * us), the above formula gives a decent transition delay. But
 560                 * for platforms where transition_latency is in milliseconds, it
 561                 * ends up giving unrealistic values.
 562                 *
 563                 * Cap the default transition delay to 10 ms, which seems to be
 564                 * a reasonable amount of time after which we should reevaluate
 565                 * the frequency.
 566                 */
 567                return min(latency * LATENCY_MULTIPLIER, (unsigned int)10000);
 568        }
 569
 570        return LATENCY_MULTIPLIER;
 571}
 572EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
 573
 574/*********************************************************************
 575 *                          SYSFS INTERFACE                          *
 576 *********************************************************************/
 577static ssize_t show_boost(struct kobject *kobj,
 578                          struct kobj_attribute *attr, char *buf)
 579{
 580        return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
 581}
 582
 583static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
 584                           const char *buf, size_t count)
 585{
 586        int ret, enable;
 587
 588        ret = sscanf(buf, "%d", &enable);
 589        if (ret != 1 || enable < 0 || enable > 1)
 590                return -EINVAL;
 591
 592        if (cpufreq_boost_trigger_state(enable)) {
 593                pr_err("%s: Cannot %s BOOST!\n",
 594                       __func__, enable ? "enable" : "disable");
 595                return -EINVAL;
 596        }
 597
 598        pr_debug("%s: cpufreq BOOST %s\n",
 599                 __func__, enable ? "enabled" : "disabled");
 600
 601        return count;
 602}
 603define_one_global_rw(boost);
 604
 605static struct cpufreq_governor *find_governor(const char *str_governor)
 606{
 607        struct cpufreq_governor *t;
 608
 609        for_each_governor(t)
 610                if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
 611                        return t;
 612
 613        return NULL;
 614}
 615
 616static int cpufreq_parse_policy(char *str_governor,
 617                                struct cpufreq_policy *policy)
 618{
 619        if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
 620                policy->policy = CPUFREQ_POLICY_PERFORMANCE;
 621                return 0;
 622        }
 623        if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) {
 624                policy->policy = CPUFREQ_POLICY_POWERSAVE;
 625                return 0;
 626        }
 627        return -EINVAL;
 628}
 629
 630/**
 631 * cpufreq_parse_governor - parse a governor string only for has_target()
 632 */
 633static int cpufreq_parse_governor(char *str_governor,
 634                                  struct cpufreq_policy *policy)
 635{
 636        struct cpufreq_governor *t;
 637
 638        mutex_lock(&cpufreq_governor_mutex);
 639
 640        t = find_governor(str_governor);
 641        if (!t) {
 642                int ret;
 643
 644                mutex_unlock(&cpufreq_governor_mutex);
 645
 646                ret = request_module("cpufreq_%s", str_governor);
 647                if (ret)
 648                        return -EINVAL;
 649
 650                mutex_lock(&cpufreq_governor_mutex);
 651
 652                t = find_governor(str_governor);
 653        }
 654        if (t && !try_module_get(t->owner))
 655                t = NULL;
 656
 657        mutex_unlock(&cpufreq_governor_mutex);
 658
 659        if (t) {
 660                policy->governor = t;
 661                return 0;
 662        }
 663
 664        return -EINVAL;
 665}
 666
 667/**
 668 * cpufreq_per_cpu_attr_read() / show_##file_name() -
 669 * print out cpufreq information
 670 *
 671 * Write out information from cpufreq_driver->policy[cpu]; object must be
 672 * "unsigned int".
 673 */
 674
 675#define show_one(file_name, object)                     \
 676static ssize_t show_##file_name                         \
 677(struct cpufreq_policy *policy, char *buf)              \
 678{                                                       \
 679        return sprintf(buf, "%u\n", policy->object);    \
 680}
 681
 682show_one(cpuinfo_min_freq, cpuinfo.min_freq);
 683show_one(cpuinfo_max_freq, cpuinfo.max_freq);
 684show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
 685show_one(scaling_min_freq, min);
 686show_one(scaling_max_freq, max);
 687
 688__weak unsigned int arch_freq_get_on_cpu(int cpu)
 689{
 690        return 0;
 691}
 692
 693static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
 694{
 695        ssize_t ret;
 696        unsigned int freq;
 697
 698        freq = arch_freq_get_on_cpu(policy->cpu);
 699        if (freq)
 700                ret = sprintf(buf, "%u\n", freq);
 701        else if (cpufreq_driver && cpufreq_driver->setpolicy &&
 702                        cpufreq_driver->get)
 703                ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
 704        else
 705                ret = sprintf(buf, "%u\n", policy->cur);
 706        return ret;
 707}
 708
 709/**
 710 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
 711 */
 712#define store_one(file_name, object)                    \
 713static ssize_t store_##file_name                                        \
 714(struct cpufreq_policy *policy, const char *buf, size_t count)          \
 715{                                                                       \
 716        unsigned long val;                                              \
 717        int ret;                                                        \
 718                                                                        \
 719        ret = sscanf(buf, "%lu", &val);                                 \
 720        if (ret != 1)                                                   \
 721                return -EINVAL;                                         \
 722                                                                        \
 723        ret = dev_pm_qos_update_request(policy->object##_freq_req, val);\
 724        return ret >= 0 ? count : ret;                                  \
 725}
 726
 727store_one(scaling_min_freq, min);
 728store_one(scaling_max_freq, max);
 729
 730/**
 731 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
 732 */
 733static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
 734                                        char *buf)
 735{
 736        unsigned int cur_freq = __cpufreq_get(policy);
 737
 738        if (cur_freq)
 739                return sprintf(buf, "%u\n", cur_freq);
 740
 741        return sprintf(buf, "<unknown>\n");
 742}
 743
 744/**
 745 * show_scaling_governor - show the current policy for the specified CPU
 746 */
 747static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
 748{
 749        if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
 750                return sprintf(buf, "powersave\n");
 751        else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
 752                return sprintf(buf, "performance\n");
 753        else if (policy->governor)
 754                return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
 755                                policy->governor->name);
 756        return -EINVAL;
 757}
 758
 759/**
 760 * store_scaling_governor - store policy for the specified CPU
 761 */
 762static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
 763                                        const char *buf, size_t count)
 764{
 765        int ret;
 766        char    str_governor[16];
 767        struct cpufreq_policy new_policy;
 768
 769        memcpy(&new_policy, policy, sizeof(*policy));
 770
 771        ret = sscanf(buf, "%15s", str_governor);
 772        if (ret != 1)
 773                return -EINVAL;
 774
 775        if (cpufreq_driver->setpolicy) {
 776                if (cpufreq_parse_policy(str_governor, &new_policy))
 777                        return -EINVAL;
 778        } else {
 779                if (cpufreq_parse_governor(str_governor, &new_policy))
 780                        return -EINVAL;
 781        }
 782
 783        ret = cpufreq_set_policy(policy, &new_policy);
 784
 785        if (new_policy.governor)
 786                module_put(new_policy.governor->owner);
 787
 788        return ret ? ret : count;
 789}
 790
 791/**
 792 * show_scaling_driver - show the cpufreq driver currently loaded
 793 */
 794static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
 795{
 796        return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
 797}
 798
 799/**
 800 * show_scaling_available_governors - show the available CPUfreq governors
 801 */
 802static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
 803                                                char *buf)
 804{
 805        ssize_t i = 0;
 806        struct cpufreq_governor *t;
 807
 808        if (!has_target()) {
 809                i += sprintf(buf, "performance powersave");
 810                goto out;
 811        }
 812
 813        for_each_governor(t) {
 814                if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
 815                    - (CPUFREQ_NAME_LEN + 2)))
 816                        goto out;
 817                i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
 818        }
 819out:
 820        i += sprintf(&buf[i], "\n");
 821        return i;
 822}
 823
 824ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
 825{
 826        ssize_t i = 0;
 827        unsigned int cpu;
 828
 829        for_each_cpu(cpu, mask) {
 830                if (i)
 831                        i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
 832                i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
 833                if (i >= (PAGE_SIZE - 5))
 834                        break;
 835        }
 836        i += sprintf(&buf[i], "\n");
 837        return i;
 838}
 839EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
 840
 841/**
 842 * show_related_cpus - show the CPUs affected by each transition even if
 843 * hw coordination is in use
 844 */
 845static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
 846{
 847        return cpufreq_show_cpus(policy->related_cpus, buf);
 848}
 849
 850/**
 851 * show_affected_cpus - show the CPUs affected by each transition
 852 */
 853static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
 854{
 855        return cpufreq_show_cpus(policy->cpus, buf);
 856}
 857
 858static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
 859                                        const char *buf, size_t count)
 860{
 861        unsigned int freq = 0;
 862        unsigned int ret;
 863
 864        if (!policy->governor || !policy->governor->store_setspeed)
 865                return -EINVAL;
 866
 867        ret = sscanf(buf, "%u", &freq);
 868        if (ret != 1)
 869                return -EINVAL;
 870
 871        policy->governor->store_setspeed(policy, freq);
 872
 873        return count;
 874}
 875
 876static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
 877{
 878        if (!policy->governor || !policy->governor->show_setspeed)
 879                return sprintf(buf, "<unsupported>\n");
 880
 881        return policy->governor->show_setspeed(policy, buf);
 882}
 883
 884/**
 885 * show_bios_limit - show the current cpufreq HW/BIOS limitation
 886 */
 887static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
 888{
 889        unsigned int limit;
 890        int ret;
 891        ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
 892        if (!ret)
 893                return sprintf(buf, "%u\n", limit);
 894        return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
 895}
 896
 897cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
 898cpufreq_freq_attr_ro(cpuinfo_min_freq);
 899cpufreq_freq_attr_ro(cpuinfo_max_freq);
 900cpufreq_freq_attr_ro(cpuinfo_transition_latency);
 901cpufreq_freq_attr_ro(scaling_available_governors);
 902cpufreq_freq_attr_ro(scaling_driver);
 903cpufreq_freq_attr_ro(scaling_cur_freq);
 904cpufreq_freq_attr_ro(bios_limit);
 905cpufreq_freq_attr_ro(related_cpus);
 906cpufreq_freq_attr_ro(affected_cpus);
 907cpufreq_freq_attr_rw(scaling_min_freq);
 908cpufreq_freq_attr_rw(scaling_max_freq);
 909cpufreq_freq_attr_rw(scaling_governor);
 910cpufreq_freq_attr_rw(scaling_setspeed);
 911
 912static struct attribute *default_attrs[] = {
 913        &cpuinfo_min_freq.attr,
 914        &cpuinfo_max_freq.attr,
 915        &cpuinfo_transition_latency.attr,
 916        &scaling_min_freq.attr,
 917        &scaling_max_freq.attr,
 918        &affected_cpus.attr,
 919        &related_cpus.attr,
 920        &scaling_governor.attr,
 921        &scaling_driver.attr,
 922        &scaling_available_governors.attr,
 923        &scaling_setspeed.attr,
 924        NULL
 925};
 926
 927#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
 928#define to_attr(a) container_of(a, struct freq_attr, attr)
 929
 930static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
 931{
 932        struct cpufreq_policy *policy = to_policy(kobj);
 933        struct freq_attr *fattr = to_attr(attr);
 934        ssize_t ret;
 935
 936        down_read(&policy->rwsem);
 937        ret = fattr->show(policy, buf);
 938        up_read(&policy->rwsem);
 939
 940        return ret;
 941}
 942
 943static ssize_t store(struct kobject *kobj, struct attribute *attr,
 944                     const char *buf, size_t count)
 945{
 946        struct cpufreq_policy *policy = to_policy(kobj);
 947        struct freq_attr *fattr = to_attr(attr);
 948        ssize_t ret = -EINVAL;
 949
 950        /*
 951         * cpus_read_trylock() is used here to work around a circular lock
 952         * dependency problem with respect to the cpufreq_register_driver().
 953         */
 954        if (!cpus_read_trylock())
 955                return -EBUSY;
 956
 957        if (cpu_online(policy->cpu)) {
 958                down_write(&policy->rwsem);
 959                ret = fattr->store(policy, buf, count);
 960                up_write(&policy->rwsem);
 961        }
 962
 963        cpus_read_unlock();
 964
 965        return ret;
 966}
 967
 968static void cpufreq_sysfs_release(struct kobject *kobj)
 969{
 970        struct cpufreq_policy *policy = to_policy(kobj);
 971        pr_debug("last reference is dropped\n");
 972        complete(&policy->kobj_unregister);
 973}
 974
 975static const struct sysfs_ops sysfs_ops = {
 976        .show   = show,
 977        .store  = store,
 978};
 979
 980static struct kobj_type ktype_cpufreq = {
 981        .sysfs_ops      = &sysfs_ops,
 982        .default_attrs  = default_attrs,
 983        .release        = cpufreq_sysfs_release,
 984};
 985
 986static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu)
 987{
 988        struct device *dev = get_cpu_device(cpu);
 989
 990        if (unlikely(!dev))
 991                return;
 992
 993        if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
 994                return;
 995
 996        dev_dbg(dev, "%s: Adding symlink\n", __func__);
 997        if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
 998                dev_err(dev, "cpufreq symlink creation failed\n");
 999}
1000
1001static void remove_cpu_dev_symlink(struct cpufreq_policy *policy,
1002                                   struct device *dev)
1003{
1004        dev_dbg(dev, "%s: Removing symlink\n", __func__);
1005        sysfs_remove_link(&dev->kobj, "cpufreq");
1006}
1007
1008static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
1009{
1010        struct freq_attr **drv_attr;
1011        int ret = 0;
1012
1013        /* set up files for this cpu device */
1014        drv_attr = cpufreq_driver->attr;
1015        while (drv_attr && *drv_attr) {
1016                ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
1017                if (ret)
1018                        return ret;
1019                drv_attr++;
1020        }
1021        if (cpufreq_driver->get) {
1022                ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
1023                if (ret)
1024                        return ret;
1025        }
1026
1027        ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
1028        if (ret)
1029                return ret;
1030
1031        if (cpufreq_driver->bios_limit) {
1032                ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1033                if (ret)
1034                        return ret;
1035        }
1036
1037        return 0;
1038}
1039
1040__weak struct cpufreq_governor *cpufreq_default_governor(void)
1041{
1042        return NULL;
1043}
1044
1045static int cpufreq_init_policy(struct cpufreq_policy *policy)
1046{
1047        struct cpufreq_governor *gov = NULL, *def_gov = NULL;
1048        struct cpufreq_policy new_policy;
1049
1050        memcpy(&new_policy, policy, sizeof(*policy));
1051
1052        def_gov = cpufreq_default_governor();
1053
1054        if (has_target()) {
1055                /*
1056                 * Update governor of new_policy to the governor used before
1057                 * hotplug
1058                 */
1059                gov = find_governor(policy->last_governor);
1060                if (gov) {
1061                        pr_debug("Restoring governor %s for cpu %d\n",
1062                                policy->governor->name, policy->cpu);
1063                } else {
1064                        if (!def_gov)
1065                                return -ENODATA;
1066                        gov = def_gov;
1067                }
1068                new_policy.governor = gov;
1069        } else {
1070                /* Use the default policy if there is no last_policy. */
1071                if (policy->last_policy) {
1072                        new_policy.policy = policy->last_policy;
1073                } else {
1074                        if (!def_gov)
1075                                return -ENODATA;
1076                        cpufreq_parse_policy(def_gov->name, &new_policy);
1077                }
1078        }
1079
1080        return cpufreq_set_policy(policy, &new_policy);
1081}
1082
1083static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1084{
1085        int ret = 0;
1086
1087        /* Has this CPU been taken care of already? */
1088        if (cpumask_test_cpu(cpu, policy->cpus))
1089                return 0;
1090
1091        down_write(&policy->rwsem);
1092        if (has_target())
1093                cpufreq_stop_governor(policy);
1094
1095        cpumask_set_cpu(cpu, policy->cpus);
1096
1097        if (has_target()) {
1098                ret = cpufreq_start_governor(policy);
1099                if (ret)
1100                        pr_err("%s: Failed to start governor\n", __func__);
1101        }
1102        up_write(&policy->rwsem);
1103        return ret;
1104}
1105
1106void refresh_frequency_limits(struct cpufreq_policy *policy)
1107{
1108        struct cpufreq_policy new_policy;
1109
1110        if (!policy_is_inactive(policy)) {
1111                new_policy = *policy;
1112                pr_debug("updating policy for CPU %u\n", policy->cpu);
1113
1114                cpufreq_set_policy(policy, &new_policy);
1115        }
1116}
1117EXPORT_SYMBOL(refresh_frequency_limits);
1118
1119static void handle_update(struct work_struct *work)
1120{
1121        struct cpufreq_policy *policy =
1122                container_of(work, struct cpufreq_policy, update);
1123
1124        pr_debug("handle_update for cpu %u called\n", policy->cpu);
1125        down_write(&policy->rwsem);
1126        refresh_frequency_limits(policy);
1127        up_write(&policy->rwsem);
1128}
1129
1130static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq,
1131                                void *data)
1132{
1133        struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_min);
1134
1135        schedule_work(&policy->update);
1136        return 0;
1137}
1138
1139static int cpufreq_notifier_max(struct notifier_block *nb, unsigned long freq,
1140                                void *data)
1141{
1142        struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_max);
1143
1144        schedule_work(&policy->update);
1145        return 0;
1146}
1147
1148static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1149{
1150        struct kobject *kobj;
1151        struct completion *cmp;
1152
1153        down_write(&policy->rwsem);
1154        cpufreq_stats_free_table(policy);
1155        kobj = &policy->kobj;
1156        cmp = &policy->kobj_unregister;
1157        up_write(&policy->rwsem);
1158        kobject_put(kobj);
1159
1160        /*
1161         * We need to make sure that the underlying kobj is
1162         * actually not referenced anymore by anybody before we
1163         * proceed with unloading.
1164         */
1165        pr_debug("waiting for dropping of refcount\n");
1166        wait_for_completion(cmp);
1167        pr_debug("wait complete\n");
1168}
1169
1170static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1171{
1172        struct cpufreq_policy *policy;
1173        struct device *dev = get_cpu_device(cpu);
1174        int ret;
1175
1176        if (!dev)
1177                return NULL;
1178
1179        policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1180        if (!policy)
1181                return NULL;
1182
1183        if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1184                goto err_free_policy;
1185
1186        if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1187                goto err_free_cpumask;
1188
1189        if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1190                goto err_free_rcpumask;
1191
1192        ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1193                                   cpufreq_global_kobject, "policy%u", cpu);
1194        if (ret) {
1195                dev_err(dev, "%s: failed to init policy->kobj: %d\n", __func__, ret);
1196                /*
1197                 * The entire policy object will be freed below, but the extra
1198                 * memory allocated for the kobject name needs to be freed by
1199                 * releasing the kobject.
1200                 */
1201                kobject_put(&policy->kobj);
1202                goto err_free_real_cpus;
1203        }
1204
1205        policy->nb_min.notifier_call = cpufreq_notifier_min;
1206        policy->nb_max.notifier_call = cpufreq_notifier_max;
1207
1208        ret = dev_pm_qos_add_notifier(dev, &policy->nb_min,
1209                                      DEV_PM_QOS_MIN_FREQUENCY);
1210        if (ret) {
1211                dev_err(dev, "Failed to register MIN QoS notifier: %d (%*pbl)\n",
1212                        ret, cpumask_pr_args(policy->cpus));
1213                goto err_kobj_remove;
1214        }
1215
1216        ret = dev_pm_qos_add_notifier(dev, &policy->nb_max,
1217                                      DEV_PM_QOS_MAX_FREQUENCY);
1218        if (ret) {
1219                dev_err(dev, "Failed to register MAX QoS notifier: %d (%*pbl)\n",
1220                        ret, cpumask_pr_args(policy->cpus));
1221                goto err_min_qos_notifier;
1222        }
1223
1224        INIT_LIST_HEAD(&policy->policy_list);
1225        init_rwsem(&policy->rwsem);
1226        spin_lock_init(&policy->transition_lock);
1227        init_waitqueue_head(&policy->transition_wait);
1228        init_completion(&policy->kobj_unregister);
1229        INIT_WORK(&policy->update, handle_update);
1230
1231        policy->cpu = cpu;
1232        return policy;
1233
1234err_min_qos_notifier:
1235        dev_pm_qos_remove_notifier(dev, &policy->nb_min,
1236                                   DEV_PM_QOS_MIN_FREQUENCY);
1237err_kobj_remove:
1238        cpufreq_policy_put_kobj(policy);
1239err_free_real_cpus:
1240        free_cpumask_var(policy->real_cpus);
1241err_free_rcpumask:
1242        free_cpumask_var(policy->related_cpus);
1243err_free_cpumask:
1244        free_cpumask_var(policy->cpus);
1245err_free_policy:
1246        kfree(policy);
1247
1248        return NULL;
1249}
1250
1251static void cpufreq_policy_free(struct cpufreq_policy *policy)
1252{
1253        struct device *dev = get_cpu_device(policy->cpu);
1254        unsigned long flags;
1255        int cpu;
1256
1257        /* Remove policy from list */
1258        write_lock_irqsave(&cpufreq_driver_lock, flags);
1259        list_del(&policy->policy_list);
1260
1261        for_each_cpu(cpu, policy->related_cpus)
1262                per_cpu(cpufreq_cpu_data, cpu) = NULL;
1263        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1264
1265        dev_pm_qos_remove_notifier(dev, &policy->nb_max,
1266                                   DEV_PM_QOS_MAX_FREQUENCY);
1267        dev_pm_qos_remove_notifier(dev, &policy->nb_min,
1268                                   DEV_PM_QOS_MIN_FREQUENCY);
1269        dev_pm_qos_remove_request(policy->max_freq_req);
1270        dev_pm_qos_remove_request(policy->min_freq_req);
1271        kfree(policy->min_freq_req);
1272
1273        cpufreq_policy_put_kobj(policy);
1274        free_cpumask_var(policy->real_cpus);
1275        free_cpumask_var(policy->related_cpus);
1276        free_cpumask_var(policy->cpus);
1277        kfree(policy);
1278}
1279
1280static int cpufreq_online(unsigned int cpu)
1281{
1282        struct cpufreq_policy *policy;
1283        bool new_policy;
1284        unsigned long flags;
1285        unsigned int j;
1286        int ret;
1287
1288        pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
1289
1290        /* Check if this CPU already has a policy to manage it */
1291        policy = per_cpu(cpufreq_cpu_data, cpu);
1292        if (policy) {
1293                WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1294                if (!policy_is_inactive(policy))
1295                        return cpufreq_add_policy_cpu(policy, cpu);
1296
1297                /* This is the only online CPU for the policy.  Start over. */
1298                new_policy = false;
1299                down_write(&policy->rwsem);
1300                policy->cpu = cpu;
1301                policy->governor = NULL;
1302                up_write(&policy->rwsem);
1303        } else {
1304                new_policy = true;
1305                policy = cpufreq_policy_alloc(cpu);
1306                if (!policy)
1307                        return -ENOMEM;
1308        }
1309
1310        if (!new_policy && cpufreq_driver->online) {
1311                ret = cpufreq_driver->online(policy);
1312                if (ret) {
1313                        pr_debug("%s: %d: initialization failed\n", __func__,
1314                                 __LINE__);
1315                        goto out_exit_policy;
1316                }
1317
1318                /* Recover policy->cpus using related_cpus */
1319                cpumask_copy(policy->cpus, policy->related_cpus);
1320        } else {
1321                cpumask_copy(policy->cpus, cpumask_of(cpu));
1322
1323                /*
1324                 * Call driver. From then on the cpufreq must be able
1325                 * to accept all calls to ->verify and ->setpolicy for this CPU.
1326                 */
1327                ret = cpufreq_driver->init(policy);
1328                if (ret) {
1329                        pr_debug("%s: %d: initialization failed\n", __func__,
1330                                 __LINE__);
1331                        goto out_free_policy;
1332                }
1333
1334                ret = cpufreq_table_validate_and_sort(policy);
1335                if (ret)
1336                        goto out_exit_policy;
1337
1338                /* related_cpus should at least include policy->cpus. */
1339                cpumask_copy(policy->related_cpus, policy->cpus);
1340        }
1341
1342        down_write(&policy->rwsem);
1343        /*
1344         * affected cpus must always be the one, which are online. We aren't
1345         * managing offline cpus here.
1346         */
1347        cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1348
1349        if (new_policy) {
1350                struct device *dev = get_cpu_device(cpu);
1351
1352                for_each_cpu(j, policy->related_cpus) {
1353                        per_cpu(cpufreq_cpu_data, j) = policy;
1354                        add_cpu_dev_symlink(policy, j);
1355                }
1356
1357                policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
1358                                               GFP_KERNEL);
1359                if (!policy->min_freq_req)
1360                        goto out_destroy_policy;
1361
1362                ret = dev_pm_qos_add_request(dev, policy->min_freq_req,
1363                                             DEV_PM_QOS_MIN_FREQUENCY,
1364                                             policy->min);
1365                if (ret < 0) {
1366                        /*
1367                         * So we don't call dev_pm_qos_remove_request() for an
1368                         * uninitialized request.
1369                         */
1370                        kfree(policy->min_freq_req);
1371                        policy->min_freq_req = NULL;
1372
1373                        dev_err(dev, "Failed to add min-freq constraint (%d)\n",
1374                                ret);
1375                        goto out_destroy_policy;
1376                }
1377
1378                /*
1379                 * This must be initialized right here to avoid calling
1380                 * dev_pm_qos_remove_request() on uninitialized request in case
1381                 * of errors.
1382                 */
1383                policy->max_freq_req = policy->min_freq_req + 1;
1384
1385                ret = dev_pm_qos_add_request(dev, policy->max_freq_req,
1386                                             DEV_PM_QOS_MAX_FREQUENCY,
1387                                             policy->max);
1388                if (ret < 0) {
1389                        policy->max_freq_req = NULL;
1390                        dev_err(dev, "Failed to add max-freq constraint (%d)\n",
1391                                ret);
1392                        goto out_destroy_policy;
1393                }
1394        }
1395
1396        if (cpufreq_driver->get && has_target()) {
1397                policy->cur = cpufreq_driver->get(policy->cpu);
1398                if (!policy->cur) {
1399                        pr_err("%s: ->get() failed\n", __func__);
1400                        goto out_destroy_policy;
1401                }
1402        }
1403
1404        /*
1405         * Sometimes boot loaders set CPU frequency to a value outside of
1406         * frequency table present with cpufreq core. In such cases CPU might be
1407         * unstable if it has to run on that frequency for long duration of time
1408         * and so its better to set it to a frequency which is specified in
1409         * freq-table. This also makes cpufreq stats inconsistent as
1410         * cpufreq-stats would fail to register because current frequency of CPU
1411         * isn't found in freq-table.
1412         *
1413         * Because we don't want this change to effect boot process badly, we go
1414         * for the next freq which is >= policy->cur ('cur' must be set by now,
1415         * otherwise we will end up setting freq to lowest of the table as 'cur'
1416         * is initialized to zero).
1417         *
1418         * We are passing target-freq as "policy->cur - 1" otherwise
1419         * __cpufreq_driver_target() would simply fail, as policy->cur will be
1420         * equal to target-freq.
1421         */
1422        if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1423            && has_target()) {
1424                /* Are we running at unknown frequency ? */
1425                ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1426                if (ret == -EINVAL) {
1427                        /* Warn user and fix it */
1428                        pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1429                                __func__, policy->cpu, policy->cur);
1430                        ret = __cpufreq_driver_target(policy, policy->cur - 1,
1431                                CPUFREQ_RELATION_L);
1432
1433                        /*
1434                         * Reaching here after boot in a few seconds may not
1435                         * mean that system will remain stable at "unknown"
1436                         * frequency for longer duration. Hence, a BUG_ON().
1437                         */
1438                        BUG_ON(ret);
1439                        pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1440                                __func__, policy->cpu, policy->cur);
1441                }
1442        }
1443
1444        if (new_policy) {
1445                ret = cpufreq_add_dev_interface(policy);
1446                if (ret)
1447                        goto out_destroy_policy;
1448
1449                cpufreq_stats_create_table(policy);
1450
1451                write_lock_irqsave(&cpufreq_driver_lock, flags);
1452                list_add(&policy->policy_list, &cpufreq_policy_list);
1453                write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1454        }
1455
1456        ret = cpufreq_init_policy(policy);
1457        if (ret) {
1458                pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1459                       __func__, cpu, ret);
1460                goto out_destroy_policy;
1461        }
1462
1463        up_write(&policy->rwsem);
1464
1465        kobject_uevent(&policy->kobj, KOBJ_ADD);
1466
1467        /* Callback for handling stuff after policy is ready */
1468        if (cpufreq_driver->ready)
1469                cpufreq_driver->ready(policy);
1470
1471        if (cpufreq_thermal_control_enabled(cpufreq_driver))
1472                policy->cdev = of_cpufreq_cooling_register(policy);
1473
1474        pr_debug("initialization complete\n");
1475
1476        return 0;
1477
1478out_destroy_policy:
1479        for_each_cpu(j, policy->real_cpus)
1480                remove_cpu_dev_symlink(policy, get_cpu_device(j));
1481
1482        up_write(&policy->rwsem);
1483
1484out_exit_policy:
1485        if (cpufreq_driver->exit)
1486                cpufreq_driver->exit(policy);
1487
1488out_free_policy:
1489        cpufreq_policy_free(policy);
1490        return ret;
1491}
1492
1493/**
1494 * cpufreq_add_dev - the cpufreq interface for a CPU device.
1495 * @dev: CPU device.
1496 * @sif: Subsystem interface structure pointer (not used)
1497 */
1498static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1499{
1500        struct cpufreq_policy *policy;
1501        unsigned cpu = dev->id;
1502        int ret;
1503
1504        dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1505
1506        if (cpu_online(cpu)) {
1507                ret = cpufreq_online(cpu);
1508                if (ret)
1509                        return ret;
1510        }
1511
1512        /* Create sysfs link on CPU registration */
1513        policy = per_cpu(cpufreq_cpu_data, cpu);
1514        if (policy)
1515                add_cpu_dev_symlink(policy, cpu);
1516
1517        return 0;
1518}
1519
1520static int cpufreq_offline(unsigned int cpu)
1521{
1522        struct cpufreq_policy *policy;
1523        int ret;
1524
1525        pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1526
1527        policy = cpufreq_cpu_get_raw(cpu);
1528        if (!policy) {
1529                pr_debug("%s: No cpu_data found\n", __func__);
1530                return 0;
1531        }
1532
1533        down_write(&policy->rwsem);
1534        if (has_target())
1535                cpufreq_stop_governor(policy);
1536
1537        cpumask_clear_cpu(cpu, policy->cpus);
1538
1539        if (policy_is_inactive(policy)) {
1540                if (has_target())
1541                        strncpy(policy->last_governor, policy->governor->name,
1542                                CPUFREQ_NAME_LEN);
1543                else
1544                        policy->last_policy = policy->policy;
1545        } else if (cpu == policy->cpu) {
1546                /* Nominate new CPU */
1547                policy->cpu = cpumask_any(policy->cpus);
1548        }
1549
1550        /* Start governor again for active policy */
1551        if (!policy_is_inactive(policy)) {
1552                if (has_target()) {
1553                        ret = cpufreq_start_governor(policy);
1554                        if (ret)
1555                                pr_err("%s: Failed to start governor\n", __func__);
1556                }
1557
1558                goto unlock;
1559        }
1560
1561        if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
1562                cpufreq_cooling_unregister(policy->cdev);
1563                policy->cdev = NULL;
1564        }
1565
1566        if (cpufreq_driver->stop_cpu)
1567                cpufreq_driver->stop_cpu(policy);
1568
1569        if (has_target())
1570                cpufreq_exit_governor(policy);
1571
1572        /*
1573         * Perform the ->offline() during light-weight tear-down, as
1574         * that allows fast recovery when the CPU comes back.
1575         */
1576        if (cpufreq_driver->offline) {
1577                cpufreq_driver->offline(policy);
1578        } else if (cpufreq_driver->exit) {
1579                cpufreq_driver->exit(policy);
1580                policy->freq_table = NULL;
1581        }
1582
1583unlock:
1584        up_write(&policy->rwsem);
1585        return 0;
1586}
1587
1588/**
1589 * cpufreq_remove_dev - remove a CPU device
1590 *
1591 * Removes the cpufreq interface for a CPU device.
1592 */
1593static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1594{
1595        unsigned int cpu = dev->id;
1596        struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1597
1598        if (!policy)
1599                return;
1600
1601        if (cpu_online(cpu))
1602                cpufreq_offline(cpu);
1603
1604        cpumask_clear_cpu(cpu, policy->real_cpus);
1605        remove_cpu_dev_symlink(policy, dev);
1606
1607        if (cpumask_empty(policy->real_cpus)) {
1608                /* We did light-weight exit earlier, do full tear down now */
1609                if (cpufreq_driver->offline)
1610                        cpufreq_driver->exit(policy);
1611
1612                cpufreq_policy_free(policy);
1613        }
1614}
1615
1616/**
1617 *      cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1618 *      in deep trouble.
1619 *      @policy: policy managing CPUs
1620 *      @new_freq: CPU frequency the CPU actually runs at
1621 *
1622 *      We adjust to current frequency first, and need to clean up later.
1623 *      So either call to cpufreq_update_policy() or schedule handle_update()).
1624 */
1625static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1626                                unsigned int new_freq)
1627{
1628        struct cpufreq_freqs freqs;
1629
1630        pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1631                 policy->cur, new_freq);
1632
1633        freqs.old = policy->cur;
1634        freqs.new = new_freq;
1635
1636        cpufreq_freq_transition_begin(policy, &freqs);
1637        cpufreq_freq_transition_end(policy, &freqs, 0);
1638}
1639
1640static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, bool update)
1641{
1642        unsigned int new_freq;
1643
1644        new_freq = cpufreq_driver->get(policy->cpu);
1645        if (!new_freq)
1646                return 0;
1647
1648        /*
1649         * If fast frequency switching is used with the given policy, the check
1650         * against policy->cur is pointless, so skip it in that case.
1651         */
1652        if (policy->fast_switch_enabled || !has_target())
1653                return new_freq;
1654
1655        if (policy->cur != new_freq) {
1656                cpufreq_out_of_sync(policy, new_freq);
1657                if (update)
1658                        schedule_work(&policy->update);
1659        }
1660
1661        return new_freq;
1662}
1663
1664/**
1665 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1666 * @cpu: CPU number
1667 *
1668 * This is the last known freq, without actually getting it from the driver.
1669 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1670 */
1671unsigned int cpufreq_quick_get(unsigned int cpu)
1672{
1673        struct cpufreq_policy *policy;
1674        unsigned int ret_freq = 0;
1675        unsigned long flags;
1676
1677        read_lock_irqsave(&cpufreq_driver_lock, flags);
1678
1679        if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
1680                ret_freq = cpufreq_driver->get(cpu);
1681                read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1682                return ret_freq;
1683        }
1684
1685        read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1686
1687        policy = cpufreq_cpu_get(cpu);
1688        if (policy) {
1689                ret_freq = policy->cur;
1690                cpufreq_cpu_put(policy);
1691        }
1692
1693        return ret_freq;
1694}
1695EXPORT_SYMBOL(cpufreq_quick_get);
1696
1697/**
1698 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1699 * @cpu: CPU number
1700 *
1701 * Just return the max possible frequency for a given CPU.
1702 */
1703unsigned int cpufreq_quick_get_max(unsigned int cpu)
1704{
1705        struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1706        unsigned int ret_freq = 0;
1707
1708        if (policy) {
1709                ret_freq = policy->max;
1710                cpufreq_cpu_put(policy);
1711        }
1712
1713        return ret_freq;
1714}
1715EXPORT_SYMBOL(cpufreq_quick_get_max);
1716
1717static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1718{
1719        if (unlikely(policy_is_inactive(policy)))
1720                return 0;
1721
1722        return cpufreq_verify_current_freq(policy, true);
1723}
1724
1725/**
1726 * cpufreq_get - get the current CPU frequency (in kHz)
1727 * @cpu: CPU number
1728 *
1729 * Get the CPU current (static) CPU frequency
1730 */
1731unsigned int cpufreq_get(unsigned int cpu)
1732{
1733        struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1734        unsigned int ret_freq = 0;
1735
1736        if (policy) {
1737                down_read(&policy->rwsem);
1738                if (cpufreq_driver->get)
1739                        ret_freq = __cpufreq_get(policy);
1740                up_read(&policy->rwsem);
1741
1742                cpufreq_cpu_put(policy);
1743        }
1744
1745        return ret_freq;
1746}
1747EXPORT_SYMBOL(cpufreq_get);
1748
1749static struct subsys_interface cpufreq_interface = {
1750        .name           = "cpufreq",
1751        .subsys         = &cpu_subsys,
1752        .add_dev        = cpufreq_add_dev,
1753        .remove_dev     = cpufreq_remove_dev,
1754};
1755
1756/*
1757 * In case platform wants some specific frequency to be configured
1758 * during suspend..
1759 */
1760int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1761{
1762        int ret;
1763
1764        if (!policy->suspend_freq) {
1765                pr_debug("%s: suspend_freq not defined\n", __func__);
1766                return 0;
1767        }
1768
1769        pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1770                        policy->suspend_freq);
1771
1772        ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1773                        CPUFREQ_RELATION_H);
1774        if (ret)
1775                pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1776                                __func__, policy->suspend_freq, ret);
1777
1778        return ret;
1779}
1780EXPORT_SYMBOL(cpufreq_generic_suspend);
1781
1782/**
1783 * cpufreq_suspend() - Suspend CPUFreq governors
1784 *
1785 * Called during system wide Suspend/Hibernate cycles for suspending governors
1786 * as some platforms can't change frequency after this point in suspend cycle.
1787 * Because some of the devices (like: i2c, regulators, etc) they use for
1788 * changing frequency are suspended quickly after this point.
1789 */
1790void cpufreq_suspend(void)
1791{
1792        struct cpufreq_policy *policy;
1793
1794        if (!cpufreq_driver)
1795                return;
1796
1797        if (!has_target() && !cpufreq_driver->suspend)
1798                goto suspend;
1799
1800        pr_debug("%s: Suspending Governors\n", __func__);
1801
1802        for_each_active_policy(policy) {
1803                if (has_target()) {
1804                        down_write(&policy->rwsem);
1805                        cpufreq_stop_governor(policy);
1806                        up_write(&policy->rwsem);
1807                }
1808
1809                if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
1810                        pr_err("%s: Failed to suspend driver: %p\n", __func__,
1811                                policy);
1812        }
1813
1814suspend:
1815        cpufreq_suspended = true;
1816}
1817
1818/**
1819 * cpufreq_resume() - Resume CPUFreq governors
1820 *
1821 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1822 * are suspended with cpufreq_suspend().
1823 */
1824void cpufreq_resume(void)
1825{
1826        struct cpufreq_policy *policy;
1827        int ret;
1828
1829        if (!cpufreq_driver)
1830                return;
1831
1832        if (unlikely(!cpufreq_suspended))
1833                return;
1834
1835        cpufreq_suspended = false;
1836
1837        if (!has_target() && !cpufreq_driver->resume)
1838                return;
1839
1840        pr_debug("%s: Resuming Governors\n", __func__);
1841
1842        for_each_active_policy(policy) {
1843                if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
1844                        pr_err("%s: Failed to resume driver: %p\n", __func__,
1845                                policy);
1846                } else if (has_target()) {
1847                        down_write(&policy->rwsem);
1848                        ret = cpufreq_start_governor(policy);
1849                        up_write(&policy->rwsem);
1850
1851                        if (ret)
1852                                pr_err("%s: Failed to start governor for policy: %p\n",
1853                                       __func__, policy);
1854                }
1855        }
1856}
1857
1858/**
1859 *      cpufreq_get_current_driver - return current driver's name
1860 *
1861 *      Return the name string of the currently loaded cpufreq driver
1862 *      or NULL, if none.
1863 */
1864const char *cpufreq_get_current_driver(void)
1865{
1866        if (cpufreq_driver)
1867                return cpufreq_driver->name;
1868
1869        return NULL;
1870}
1871EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1872
1873/**
1874 *      cpufreq_get_driver_data - return current driver data
1875 *
1876 *      Return the private data of the currently loaded cpufreq
1877 *      driver, or NULL if no cpufreq driver is loaded.
1878 */
1879void *cpufreq_get_driver_data(void)
1880{
1881        if (cpufreq_driver)
1882                return cpufreq_driver->driver_data;
1883
1884        return NULL;
1885}
1886EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1887
1888/*********************************************************************
1889 *                     NOTIFIER LISTS INTERFACE                      *
1890 *********************************************************************/
1891
1892/**
1893 *      cpufreq_register_notifier - register a driver with cpufreq
1894 *      @nb: notifier function to register
1895 *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1896 *
1897 *      Add a driver to one of two lists: either a list of drivers that
1898 *      are notified about clock rate changes (once before and once after
1899 *      the transition), or a list of drivers that are notified about
1900 *      changes in cpufreq policy.
1901 *
1902 *      This function may sleep, and has the same return conditions as
1903 *      blocking_notifier_chain_register.
1904 */
1905int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1906{
1907        int ret;
1908
1909        if (cpufreq_disabled())
1910                return -EINVAL;
1911
1912        switch (list) {
1913        case CPUFREQ_TRANSITION_NOTIFIER:
1914                mutex_lock(&cpufreq_fast_switch_lock);
1915
1916                if (cpufreq_fast_switch_count > 0) {
1917                        mutex_unlock(&cpufreq_fast_switch_lock);
1918                        return -EBUSY;
1919                }
1920                ret = srcu_notifier_chain_register(
1921                                &cpufreq_transition_notifier_list, nb);
1922                if (!ret)
1923                        cpufreq_fast_switch_count--;
1924
1925                mutex_unlock(&cpufreq_fast_switch_lock);
1926                break;
1927        case CPUFREQ_POLICY_NOTIFIER:
1928                ret = blocking_notifier_chain_register(
1929                                &cpufreq_policy_notifier_list, nb);
1930                break;
1931        default:
1932                ret = -EINVAL;
1933        }
1934
1935        return ret;
1936}
1937EXPORT_SYMBOL(cpufreq_register_notifier);
1938
1939/**
1940 *      cpufreq_unregister_notifier - unregister a driver with cpufreq
1941 *      @nb: notifier block to be unregistered
1942 *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1943 *
1944 *      Remove a driver from the CPU frequency notifier list.
1945 *
1946 *      This function may sleep, and has the same return conditions as
1947 *      blocking_notifier_chain_unregister.
1948 */
1949int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1950{
1951        int ret;
1952
1953        if (cpufreq_disabled())
1954                return -EINVAL;
1955
1956        switch (list) {
1957        case CPUFREQ_TRANSITION_NOTIFIER:
1958                mutex_lock(&cpufreq_fast_switch_lock);
1959
1960                ret = srcu_notifier_chain_unregister(
1961                                &cpufreq_transition_notifier_list, nb);
1962                if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
1963                        cpufreq_fast_switch_count++;
1964
1965                mutex_unlock(&cpufreq_fast_switch_lock);
1966                break;
1967        case CPUFREQ_POLICY_NOTIFIER:
1968                ret = blocking_notifier_chain_unregister(
1969                                &cpufreq_policy_notifier_list, nb);
1970                break;
1971        default:
1972                ret = -EINVAL;
1973        }
1974
1975        return ret;
1976}
1977EXPORT_SYMBOL(cpufreq_unregister_notifier);
1978
1979
1980/*********************************************************************
1981 *                              GOVERNORS                            *
1982 *********************************************************************/
1983
1984/**
1985 * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
1986 * @policy: cpufreq policy to switch the frequency for.
1987 * @target_freq: New frequency to set (may be approximate).
1988 *
1989 * Carry out a fast frequency switch without sleeping.
1990 *
1991 * The driver's ->fast_switch() callback invoked by this function must be
1992 * suitable for being called from within RCU-sched read-side critical sections
1993 * and it is expected to select the minimum available frequency greater than or
1994 * equal to @target_freq (CPUFREQ_RELATION_L).
1995 *
1996 * This function must not be called if policy->fast_switch_enabled is unset.
1997 *
1998 * Governors calling this function must guarantee that it will never be invoked
1999 * twice in parallel for the same policy and that it will never be called in
2000 * parallel with either ->target() or ->target_index() for the same policy.
2001 *
2002 * Returns the actual frequency set for the CPU.
2003 *
2004 * If 0 is returned by the driver's ->fast_switch() callback to indicate an
2005 * error condition, the hardware configuration must be preserved.
2006 */
2007unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
2008                                        unsigned int target_freq)
2009{
2010        target_freq = clamp_val(target_freq, policy->min, policy->max);
2011
2012        return cpufreq_driver->fast_switch(policy, target_freq);
2013}
2014EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
2015
2016/* Must set freqs->new to intermediate frequency */
2017static int __target_intermediate(struct cpufreq_policy *policy,
2018                                 struct cpufreq_freqs *freqs, int index)
2019{
2020        int ret;
2021
2022        freqs->new = cpufreq_driver->get_intermediate(policy, index);
2023
2024        /* We don't need to switch to intermediate freq */
2025        if (!freqs->new)
2026                return 0;
2027
2028        pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
2029                 __func__, policy->cpu, freqs->old, freqs->new);
2030
2031        cpufreq_freq_transition_begin(policy, freqs);
2032        ret = cpufreq_driver->target_intermediate(policy, index);
2033        cpufreq_freq_transition_end(policy, freqs, ret);
2034
2035        if (ret)
2036                pr_err("%s: Failed to change to intermediate frequency: %d\n",
2037                       __func__, ret);
2038
2039        return ret;
2040}
2041
2042static int __target_index(struct cpufreq_policy *policy, int index)
2043{
2044        struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
2045        unsigned int intermediate_freq = 0;
2046        unsigned int newfreq = policy->freq_table[index].frequency;
2047        int retval = -EINVAL;
2048        bool notify;
2049
2050        if (newfreq == policy->cur)
2051                return 0;
2052
2053        notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
2054        if (notify) {
2055                /* Handle switching to intermediate frequency */
2056                if (cpufreq_driver->get_intermediate) {
2057                        retval = __target_intermediate(policy, &freqs, index);
2058                        if (retval)
2059                                return retval;
2060
2061                        intermediate_freq = freqs.new;
2062                        /* Set old freq to intermediate */
2063                        if (intermediate_freq)
2064                                freqs.old = freqs.new;
2065                }
2066
2067                freqs.new = newfreq;
2068                pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
2069                         __func__, policy->cpu, freqs.old, freqs.new);
2070
2071                cpufreq_freq_transition_begin(policy, &freqs);
2072        }
2073
2074        retval = cpufreq_driver->target_index(policy, index);
2075        if (retval)
2076                pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
2077                       retval);
2078
2079        if (notify) {
2080                cpufreq_freq_transition_end(policy, &freqs, retval);
2081
2082                /*
2083                 * Failed after setting to intermediate freq? Driver should have
2084                 * reverted back to initial frequency and so should we. Check
2085                 * here for intermediate_freq instead of get_intermediate, in
2086                 * case we haven't switched to intermediate freq at all.
2087                 */
2088                if (unlikely(retval && intermediate_freq)) {
2089                        freqs.old = intermediate_freq;
2090                        freqs.new = policy->restore_freq;
2091                        cpufreq_freq_transition_begin(policy, &freqs);
2092                        cpufreq_freq_transition_end(policy, &freqs, 0);
2093                }
2094        }
2095
2096        return retval;
2097}
2098
2099int __cpufreq_driver_target(struct cpufreq_policy *policy,
2100                            unsigned int target_freq,
2101                            unsigned int relation)
2102{
2103        unsigned int old_target_freq = target_freq;
2104        int index;
2105
2106        if (cpufreq_disabled())
2107                return -ENODEV;
2108
2109        /* Make sure that target_freq is within supported range */
2110        target_freq = clamp_val(target_freq, policy->min, policy->max);
2111
2112        pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
2113                 policy->cpu, target_freq, relation, old_target_freq);
2114
2115        /*
2116         * This might look like a redundant call as we are checking it again
2117         * after finding index. But it is left intentionally for cases where
2118         * exactly same freq is called again and so we can save on few function
2119         * calls.
2120         */
2121        if (target_freq == policy->cur)
2122                return 0;
2123
2124        /* Save last value to restore later on errors */
2125        policy->restore_freq = policy->cur;
2126
2127        if (cpufreq_driver->target)
2128                return cpufreq_driver->target(policy, target_freq, relation);
2129
2130        if (!cpufreq_driver->target_index)
2131                return -EINVAL;
2132
2133        index = cpufreq_frequency_table_target(policy, target_freq, relation);
2134
2135        return __target_index(policy, index);
2136}
2137EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
2138
2139int cpufreq_driver_target(struct cpufreq_policy *policy,
2140                          unsigned int target_freq,
2141                          unsigned int relation)
2142{
2143        int ret = -EINVAL;
2144
2145        down_write(&policy->rwsem);
2146
2147        ret = __cpufreq_driver_target(policy, target_freq, relation);
2148
2149        up_write(&policy->rwsem);
2150
2151        return ret;
2152}
2153EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2154
2155__weak struct cpufreq_governor *cpufreq_fallback_governor(void)
2156{
2157        return NULL;
2158}
2159
2160static int cpufreq_init_governor(struct cpufreq_policy *policy)
2161{
2162        int ret;
2163
2164        /* Don't start any governor operations if we are entering suspend */
2165        if (cpufreq_suspended)
2166                return 0;
2167        /*
2168         * Governor might not be initiated here if ACPI _PPC changed
2169         * notification happened, so check it.
2170         */
2171        if (!policy->governor)
2172                return -EINVAL;
2173
2174        /* Platform doesn't want dynamic frequency switching ? */
2175        if (policy->governor->dynamic_switching &&
2176            cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) {
2177                struct cpufreq_governor *gov = cpufreq_fallback_governor();
2178
2179                if (gov) {
2180                        pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n",
2181                                policy->governor->name, gov->name);
2182                        policy->governor = gov;
2183                } else {
2184                        return -EINVAL;
2185                }
2186        }
2187
2188        if (!try_module_get(policy->governor->owner))
2189                return -EINVAL;
2190
2191        pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2192
2193        if (policy->governor->init) {
2194                ret = policy->governor->init(policy);
2195                if (ret) {
2196                        module_put(policy->governor->owner);
2197                        return ret;
2198                }
2199        }
2200
2201        return 0;
2202}
2203
2204static void cpufreq_exit_governor(struct cpufreq_policy *policy)
2205{
2206        if (cpufreq_suspended || !policy->governor)
2207                return;
2208
2209        pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2210
2211        if (policy->governor->exit)
2212                policy->governor->exit(policy);
2213
2214        module_put(policy->governor->owner);
2215}
2216
2217static int cpufreq_start_governor(struct cpufreq_policy *policy)
2218{
2219        int ret;
2220
2221        if (cpufreq_suspended)
2222                return 0;
2223
2224        if (!policy->governor)
2225                return -EINVAL;
2226
2227        pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2228
2229        if (cpufreq_driver->get)
2230                cpufreq_verify_current_freq(policy, false);
2231
2232        if (policy->governor->start) {
2233                ret = policy->governor->start(policy);
2234                if (ret)
2235                        return ret;
2236        }
2237
2238        if (policy->governor->limits)
2239                policy->governor->limits(policy);
2240
2241        return 0;
2242}
2243
2244static void cpufreq_stop_governor(struct cpufreq_policy *policy)
2245{
2246        if (cpufreq_suspended || !policy->governor)
2247                return;
2248
2249        pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2250
2251        if (policy->governor->stop)
2252                policy->governor->stop(policy);
2253}
2254
2255static void cpufreq_governor_limits(struct cpufreq_policy *policy)
2256{
2257        if (cpufreq_suspended || !policy->governor)
2258                return;
2259
2260        pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2261
2262        if (policy->governor->limits)
2263                policy->governor->limits(policy);
2264}
2265
2266int cpufreq_register_governor(struct cpufreq_governor *governor)
2267{
2268        int err;
2269
2270        if (!governor)
2271                return -EINVAL;
2272
2273        if (cpufreq_disabled())
2274                return -ENODEV;
2275
2276        mutex_lock(&cpufreq_governor_mutex);
2277
2278        err = -EBUSY;
2279        if (!find_governor(governor->name)) {
2280                err = 0;
2281                list_add(&governor->governor_list, &cpufreq_governor_list);
2282        }
2283
2284        mutex_unlock(&cpufreq_governor_mutex);
2285        return err;
2286}
2287EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2288
2289void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2290{
2291        struct cpufreq_policy *policy;
2292        unsigned long flags;
2293
2294        if (!governor)
2295                return;
2296
2297        if (cpufreq_disabled())
2298                return;
2299
2300        /* clear last_governor for all inactive policies */
2301        read_lock_irqsave(&cpufreq_driver_lock, flags);
2302        for_each_inactive_policy(policy) {
2303                if (!strcmp(policy->last_governor, governor->name)) {
2304                        policy->governor = NULL;
2305                        strcpy(policy->last_governor, "\0");
2306                }
2307        }
2308        read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2309
2310        mutex_lock(&cpufreq_governor_mutex);
2311        list_del(&governor->governor_list);
2312        mutex_unlock(&cpufreq_governor_mutex);
2313}
2314EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2315
2316
2317/*********************************************************************
2318 *                          POLICY INTERFACE                         *
2319 *********************************************************************/
2320
2321/**
2322 * cpufreq_get_policy - get the current cpufreq_policy
2323 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2324 *      is written
2325 *
2326 * Reads the current cpufreq policy.
2327 */
2328int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2329{
2330        struct cpufreq_policy *cpu_policy;
2331        if (!policy)
2332                return -EINVAL;
2333
2334        cpu_policy = cpufreq_cpu_get(cpu);
2335        if (!cpu_policy)
2336                return -EINVAL;
2337
2338        memcpy(policy, cpu_policy, sizeof(*policy));
2339
2340        cpufreq_cpu_put(cpu_policy);
2341        return 0;
2342}
2343EXPORT_SYMBOL(cpufreq_get_policy);
2344
2345/**
2346 * cpufreq_set_policy - Modify cpufreq policy parameters.
2347 * @policy: Policy object to modify.
2348 * @new_policy: New policy data.
2349 *
2350 * Pass @new_policy to the cpufreq driver's ->verify() callback, run the
2351 * installed policy notifiers for it with the CPUFREQ_ADJUST value, pass it to
2352 * the driver's ->verify() callback again and run the notifiers for it again
2353 * with the CPUFREQ_NOTIFY value.  Next, copy the min and max parameters
2354 * of @new_policy to @policy and either invoke the driver's ->setpolicy()
2355 * callback (if present) or carry out a governor update for @policy.  That is,
2356 * run the current governor's ->limits() callback (if the governor field in
2357 * @new_policy points to the same object as the one in @policy) or replace the
2358 * governor for @policy with the new one stored in @new_policy.
2359 *
2360 * The cpuinfo part of @policy is not updated by this function.
2361 */
2362int cpufreq_set_policy(struct cpufreq_policy *policy,
2363                       struct cpufreq_policy *new_policy)
2364{
2365        struct cpufreq_governor *old_gov;
2366        struct device *cpu_dev = get_cpu_device(policy->cpu);
2367        int ret;
2368
2369        pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2370                 new_policy->cpu, new_policy->min, new_policy->max);
2371
2372        memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2373
2374        /*
2375         * PM QoS framework collects all the requests from users and provide us
2376         * the final aggregated value here.
2377         */
2378        new_policy->min = dev_pm_qos_read_value(cpu_dev, DEV_PM_QOS_MIN_FREQUENCY);
2379        new_policy->max = dev_pm_qos_read_value(cpu_dev, DEV_PM_QOS_MAX_FREQUENCY);
2380
2381        /* verify the cpu speed can be set within this limit */
2382        ret = cpufreq_driver->verify(new_policy);
2383        if (ret)
2384                return ret;
2385
2386        /*
2387         * The notifier-chain shall be removed once all the users of
2388         * CPUFREQ_ADJUST are moved to use the QoS framework.
2389         */
2390        /* adjust if necessary - all reasons */
2391        blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2392                        CPUFREQ_ADJUST, new_policy);
2393
2394        /*
2395         * verify the cpu speed can be set within this limit, which might be
2396         * different to the first one
2397         */
2398        ret = cpufreq_driver->verify(new_policy);
2399        if (ret)
2400                return ret;
2401
2402        /* notification of the new policy */
2403        blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2404                        CPUFREQ_NOTIFY, new_policy);
2405
2406        policy->min = new_policy->min;
2407        policy->max = new_policy->max;
2408        trace_cpu_frequency_limits(policy);
2409
2410        policy->cached_target_freq = UINT_MAX;
2411
2412        pr_debug("new min and max freqs are %u - %u kHz\n",
2413                 policy->min, policy->max);
2414
2415        if (cpufreq_driver->setpolicy) {
2416                policy->policy = new_policy->policy;
2417                pr_debug("setting range\n");
2418                return cpufreq_driver->setpolicy(policy);
2419        }
2420
2421        if (new_policy->governor == policy->governor) {
2422                pr_debug("governor limits update\n");
2423                cpufreq_governor_limits(policy);
2424                return 0;
2425        }
2426
2427        pr_debug("governor switch\n");
2428
2429        /* save old, working values */
2430        old_gov = policy->governor;
2431        /* end old governor */
2432        if (old_gov) {
2433                cpufreq_stop_governor(policy);
2434                cpufreq_exit_governor(policy);
2435        }
2436
2437        /* start new governor */
2438        policy->governor = new_policy->governor;
2439        ret = cpufreq_init_governor(policy);
2440        if (!ret) {
2441                ret = cpufreq_start_governor(policy);
2442                if (!ret) {
2443                        pr_debug("governor change\n");
2444                        sched_cpufreq_governor_change(policy, old_gov);
2445                        return 0;
2446                }
2447                cpufreq_exit_governor(policy);
2448        }
2449
2450        /* new governor failed, so re-start old one */
2451        pr_debug("starting governor %s failed\n", policy->governor->name);
2452        if (old_gov) {
2453                policy->governor = old_gov;
2454                if (cpufreq_init_governor(policy))
2455                        policy->governor = NULL;
2456                else
2457                        cpufreq_start_governor(policy);
2458        }
2459
2460        return ret;
2461}
2462
2463/**
2464 * cpufreq_update_policy - Re-evaluate an existing cpufreq policy.
2465 * @cpu: CPU to re-evaluate the policy for.
2466 *
2467 * Update the current frequency for the cpufreq policy of @cpu and use
2468 * cpufreq_set_policy() to re-apply the min and max limits, which triggers the
2469 * evaluation of policy notifiers and the cpufreq driver's ->verify() callback
2470 * for the policy in question, among other things.
2471 */
2472void cpufreq_update_policy(unsigned int cpu)
2473{
2474        struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
2475
2476        if (!policy)
2477                return;
2478
2479        /*
2480         * BIOS might change freq behind our back
2481         * -> ask driver for current freq and notify governors about a change
2482         */
2483        if (cpufreq_driver->get && has_target() &&
2484            (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
2485                goto unlock;
2486
2487        refresh_frequency_limits(policy);
2488
2489unlock:
2490        cpufreq_cpu_release(policy);
2491}
2492EXPORT_SYMBOL(cpufreq_update_policy);
2493
2494/**
2495 * cpufreq_update_limits - Update policy limits for a given CPU.
2496 * @cpu: CPU to update the policy limits for.
2497 *
2498 * Invoke the driver's ->update_limits callback if present or call
2499 * cpufreq_update_policy() for @cpu.
2500 */
2501void cpufreq_update_limits(unsigned int cpu)
2502{
2503        if (cpufreq_driver->update_limits)
2504                cpufreq_driver->update_limits(cpu);
2505        else
2506                cpufreq_update_policy(cpu);
2507}
2508EXPORT_SYMBOL_GPL(cpufreq_update_limits);
2509
2510/*********************************************************************
2511 *               BOOST                                               *
2512 *********************************************************************/
2513static int cpufreq_boost_set_sw(int state)
2514{
2515        struct cpufreq_policy *policy;
2516        int ret = -EINVAL;
2517
2518        for_each_active_policy(policy) {
2519                if (!policy->freq_table)
2520                        continue;
2521
2522                ret = cpufreq_frequency_table_cpuinfo(policy,
2523                                                      policy->freq_table);
2524                if (ret) {
2525                        pr_err("%s: Policy frequency update failed\n",
2526                               __func__);
2527                        break;
2528                }
2529
2530                ret = dev_pm_qos_update_request(policy->max_freq_req, policy->max);
2531                if (ret < 0)
2532                        break;
2533        }
2534
2535        return ret;
2536}
2537
2538int cpufreq_boost_trigger_state(int state)
2539{
2540        unsigned long flags;
2541        int ret = 0;
2542
2543        if (cpufreq_driver->boost_enabled == state)
2544                return 0;
2545
2546        write_lock_irqsave(&cpufreq_driver_lock, flags);
2547        cpufreq_driver->boost_enabled = state;
2548        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2549
2550        ret = cpufreq_driver->set_boost(state);
2551        if (ret) {
2552                write_lock_irqsave(&cpufreq_driver_lock, flags);
2553                cpufreq_driver->boost_enabled = !state;
2554                write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2555
2556                pr_err("%s: Cannot %s BOOST\n",
2557                       __func__, state ? "enable" : "disable");
2558        }
2559
2560        return ret;
2561}
2562
2563static bool cpufreq_boost_supported(void)
2564{
2565        return cpufreq_driver->set_boost;
2566}
2567
2568static int create_boost_sysfs_file(void)
2569{
2570        int ret;
2571
2572        ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
2573        if (ret)
2574                pr_err("%s: cannot register global BOOST sysfs file\n",
2575                       __func__);
2576
2577        return ret;
2578}
2579
2580static void remove_boost_sysfs_file(void)
2581{
2582        if (cpufreq_boost_supported())
2583                sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
2584}
2585
2586int cpufreq_enable_boost_support(void)
2587{
2588        if (!cpufreq_driver)
2589                return -EINVAL;
2590
2591        if (cpufreq_boost_supported())
2592                return 0;
2593
2594        cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2595
2596        /* This will get removed on driver unregister */
2597        return create_boost_sysfs_file();
2598}
2599EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
2600
2601int cpufreq_boost_enabled(void)
2602{
2603        return cpufreq_driver->boost_enabled;
2604}
2605EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2606
2607/*********************************************************************
2608 *               REGISTER / UNREGISTER CPUFREQ DRIVER                *
2609 *********************************************************************/
2610static enum cpuhp_state hp_online;
2611
2612static int cpuhp_cpufreq_online(unsigned int cpu)
2613{
2614        cpufreq_online(cpu);
2615
2616        return 0;
2617}
2618
2619static int cpuhp_cpufreq_offline(unsigned int cpu)
2620{
2621        cpufreq_offline(cpu);
2622
2623        return 0;
2624}
2625
2626/**
2627 * cpufreq_register_driver - register a CPU Frequency driver
2628 * @driver_data: A struct cpufreq_driver containing the values#
2629 * submitted by the CPU Frequency driver.
2630 *
2631 * Registers a CPU Frequency driver to this core code. This code
2632 * returns zero on success, -EEXIST when another driver got here first
2633 * (and isn't unregistered in the meantime).
2634 *
2635 */
2636int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2637{
2638        unsigned long flags;
2639        int ret;
2640
2641        if (cpufreq_disabled())
2642                return -ENODEV;
2643
2644        if (!driver_data || !driver_data->verify || !driver_data->init ||
2645            !(driver_data->setpolicy || driver_data->target_index ||
2646                    driver_data->target) ||
2647             (driver_data->setpolicy && (driver_data->target_index ||
2648                    driver_data->target)) ||
2649             (!driver_data->get_intermediate != !driver_data->target_intermediate) ||
2650             (!driver_data->online != !driver_data->offline))
2651                return -EINVAL;
2652
2653        pr_debug("trying to register driver %s\n", driver_data->name);
2654
2655        /* Protect against concurrent CPU online/offline. */
2656        cpus_read_lock();
2657
2658        write_lock_irqsave(&cpufreq_driver_lock, flags);
2659        if (cpufreq_driver) {
2660                write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2661                ret = -EEXIST;
2662                goto out;
2663        }
2664        cpufreq_driver = driver_data;
2665        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2666
2667        if (driver_data->setpolicy)
2668                driver_data->flags |= CPUFREQ_CONST_LOOPS;
2669
2670        if (cpufreq_boost_supported()) {
2671                ret = create_boost_sysfs_file();
2672                if (ret)
2673                        goto err_null_driver;
2674        }
2675
2676        ret = subsys_interface_register(&cpufreq_interface);
2677        if (ret)
2678                goto err_boost_unreg;
2679
2680        if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2681            list_empty(&cpufreq_policy_list)) {
2682                /* if all ->init() calls failed, unregister */
2683                ret = -ENODEV;
2684                pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2685                         driver_data->name);
2686                goto err_if_unreg;
2687        }
2688
2689        ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
2690                                                   "cpufreq:online",
2691                                                   cpuhp_cpufreq_online,
2692                                                   cpuhp_cpufreq_offline);
2693        if (ret < 0)
2694                goto err_if_unreg;
2695        hp_online = ret;
2696        ret = 0;
2697
2698        pr_debug("driver %s up and running\n", driver_data->name);
2699        goto out;
2700
2701err_if_unreg:
2702        subsys_interface_unregister(&cpufreq_interface);
2703err_boost_unreg:
2704        remove_boost_sysfs_file();
2705err_null_driver:
2706        write_lock_irqsave(&cpufreq_driver_lock, flags);
2707        cpufreq_driver = NULL;
2708        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2709out:
2710        cpus_read_unlock();
2711        return ret;
2712}
2713EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2714
2715/**
2716 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2717 *
2718 * Unregister the current CPUFreq driver. Only call this if you have
2719 * the right to do so, i.e. if you have succeeded in initialising before!
2720 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2721 * currently not initialised.
2722 */
2723int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2724{
2725        unsigned long flags;
2726
2727        if (!cpufreq_driver || (driver != cpufreq_driver))
2728                return -EINVAL;
2729
2730        pr_debug("unregistering driver %s\n", driver->name);
2731
2732        /* Protect against concurrent cpu hotplug */
2733        cpus_read_lock();
2734        subsys_interface_unregister(&cpufreq_interface);
2735        remove_boost_sysfs_file();
2736        cpuhp_remove_state_nocalls_cpuslocked(hp_online);
2737
2738        write_lock_irqsave(&cpufreq_driver_lock, flags);
2739
2740        cpufreq_driver = NULL;
2741
2742        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2743        cpus_read_unlock();
2744
2745        return 0;
2746}
2747EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2748
2749/*
2750 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2751 * or mutexes when secondary CPUs are halted.
2752 */
2753static struct syscore_ops cpufreq_syscore_ops = {
2754        .shutdown = cpufreq_suspend,
2755};
2756
2757struct kobject *cpufreq_global_kobject;
2758EXPORT_SYMBOL(cpufreq_global_kobject);
2759
2760static int __init cpufreq_core_init(void)
2761{
2762        if (cpufreq_disabled())
2763                return -ENODEV;
2764
2765        cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
2766        BUG_ON(!cpufreq_global_kobject);
2767
2768        register_syscore_ops(&cpufreq_syscore_ops);
2769
2770        return 0;
2771}
2772module_param(off, int, 0444);
2773core_initcall(cpufreq_core_init);
2774