linux/drivers/devfreq/devfreq.c
<<
>>
Prefs
   1/*
   2 * devfreq: Generic Dynamic Voltage and Frequency Scaling (DVFS) Framework
   3 *          for Non-CPU Devices.
   4 *
   5 * Copyright (C) 2011 Samsung Electronics
   6 *      MyungJoo Ham <myungjoo.ham@samsung.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 */
  12
  13#include <linux/kernel.h>
  14#include <linux/sched.h>
  15#include <linux/errno.h>
  16#include <linux/err.h>
  17#include <linux/init.h>
  18#include <linux/module.h>
  19#include <linux/slab.h>
  20#include <linux/stat.h>
  21#include <linux/opp.h>
  22#include <linux/devfreq.h>
  23#include <linux/workqueue.h>
  24#include <linux/platform_device.h>
  25#include <linux/list.h>
  26#include <linux/printk.h>
  27#include <linux/hrtimer.h>
  28#include "governor.h"
  29
  30static struct class *devfreq_class;
  31
  32/*
  33 * devfreq core provides delayed work based load monitoring helper
  34 * functions. Governors can use these or can implement their own
  35 * monitoring mechanism.
  36 */
  37static struct workqueue_struct *devfreq_wq;
  38
  39/* The list of all device-devfreq governors */
  40static LIST_HEAD(devfreq_governor_list);
  41/* The list of all device-devfreq */
  42static LIST_HEAD(devfreq_list);
  43static DEFINE_MUTEX(devfreq_list_lock);
  44
  45/**
  46 * find_device_devfreq() - find devfreq struct using device pointer
  47 * @dev:        device pointer used to lookup device devfreq.
  48 *
  49 * Search the list of device devfreqs and return the matched device's
  50 * devfreq info. devfreq_list_lock should be held by the caller.
  51 */
  52static struct devfreq *find_device_devfreq(struct device *dev)
  53{
  54        struct devfreq *tmp_devfreq;
  55
  56        if (unlikely(IS_ERR_OR_NULL(dev))) {
  57                pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
  58                return ERR_PTR(-EINVAL);
  59        }
  60        WARN(!mutex_is_locked(&devfreq_list_lock),
  61             "devfreq_list_lock must be locked.");
  62
  63        list_for_each_entry(tmp_devfreq, &devfreq_list, node) {
  64                if (tmp_devfreq->dev.parent == dev)
  65                        return tmp_devfreq;
  66        }
  67
  68        return ERR_PTR(-ENODEV);
  69}
  70
  71/**
  72 * devfreq_get_freq_level() - Lookup freq_table for the frequency
  73 * @devfreq:    the devfreq instance
  74 * @freq:       the target frequency
  75 */
  76static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
  77{
  78        int lev;
  79
  80        for (lev = 0; lev < devfreq->profile->max_state; lev++)
  81                if (freq == devfreq->profile->freq_table[lev])
  82                        return lev;
  83
  84        return -EINVAL;
  85}
  86
  87/**
  88 * devfreq_update_status() - Update statistics of devfreq behavior
  89 * @devfreq:    the devfreq instance
  90 * @freq:       the update target frequency
  91 */
  92static int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
  93{
  94        int lev, prev_lev;
  95        unsigned long cur_time;
  96
  97        lev = devfreq_get_freq_level(devfreq, freq);
  98        if (lev < 0)
  99                return lev;
 100
 101        cur_time = jiffies;
 102        devfreq->time_in_state[lev] +=
 103                         cur_time - devfreq->last_stat_updated;
 104        if (freq != devfreq->previous_freq) {
 105                prev_lev = devfreq_get_freq_level(devfreq,
 106                                                devfreq->previous_freq);
 107                devfreq->trans_table[(prev_lev *
 108                                devfreq->profile->max_state) + lev]++;
 109                devfreq->total_trans++;
 110        }
 111        devfreq->last_stat_updated = cur_time;
 112
 113        return 0;
 114}
 115
 116/**
 117 * find_devfreq_governor() - find devfreq governor from name
 118 * @name:       name of the governor
 119 *
 120 * Search the list of devfreq governors and return the matched
 121 * governor's pointer. devfreq_list_lock should be held by the caller.
 122 */
 123static struct devfreq_governor *find_devfreq_governor(const char *name)
 124{
 125        struct devfreq_governor *tmp_governor;
 126
 127        if (unlikely(IS_ERR_OR_NULL(name))) {
 128                pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
 129                return ERR_PTR(-EINVAL);
 130        }
 131        WARN(!mutex_is_locked(&devfreq_list_lock),
 132             "devfreq_list_lock must be locked.");
 133
 134        list_for_each_entry(tmp_governor, &devfreq_governor_list, node) {
 135                if (!strncmp(tmp_governor->name, name, DEVFREQ_NAME_LEN))
 136                        return tmp_governor;
 137        }
 138
 139        return ERR_PTR(-ENODEV);
 140}
 141
 142/* Load monitoring helper functions for governors use */
 143
 144/**
 145 * update_devfreq() - Reevaluate the device and configure frequency.
 146 * @devfreq:    the devfreq instance.
 147 *
 148 * Note: Lock devfreq->lock before calling update_devfreq
 149 *       This function is exported for governors.
 150 */
 151int update_devfreq(struct devfreq *devfreq)
 152{
 153        unsigned long freq;
 154        int err = 0;
 155        u32 flags = 0;
 156
 157        if (!mutex_is_locked(&devfreq->lock)) {
 158                WARN(true, "devfreq->lock must be locked by the caller.\n");
 159                return -EINVAL;
 160        }
 161
 162        if (!devfreq->governor)
 163                return -EINVAL;
 164
 165        /* Reevaluate the proper frequency */
 166        err = devfreq->governor->get_target_freq(devfreq, &freq);
 167        if (err)
 168                return err;
 169
 170        /*
 171         * Adjust the freuqency with user freq and QoS.
 172         *
 173         * List from the highest proiority
 174         * max_freq (probably called by thermal when it's too hot)
 175         * min_freq
 176         */
 177
 178        if (devfreq->min_freq && freq < devfreq->min_freq) {
 179                freq = devfreq->min_freq;
 180                flags &= ~DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use GLB */
 181        }
 182        if (devfreq->max_freq && freq > devfreq->max_freq) {
 183                freq = devfreq->max_freq;
 184                flags |= DEVFREQ_FLAG_LEAST_UPPER_BOUND; /* Use LUB */
 185        }
 186
 187        err = devfreq->profile->target(devfreq->dev.parent, &freq, flags);
 188        if (err)
 189                return err;
 190
 191        if (devfreq->profile->freq_table)
 192                if (devfreq_update_status(devfreq, freq))
 193                        dev_err(&devfreq->dev,
 194                                "Couldn't update frequency transition information.\n");
 195
 196        devfreq->previous_freq = freq;
 197        return err;
 198}
 199EXPORT_SYMBOL(update_devfreq);
 200
 201/**
 202 * devfreq_monitor() - Periodically poll devfreq objects.
 203 * @work:       the work struct used to run devfreq_monitor periodically.
 204 *
 205 */
 206static void devfreq_monitor(struct work_struct *work)
 207{
 208        int err;
 209        struct devfreq *devfreq = container_of(work,
 210                                        struct devfreq, work.work);
 211
 212        mutex_lock(&devfreq->lock);
 213        err = update_devfreq(devfreq);
 214        if (err)
 215                dev_err(&devfreq->dev, "dvfs failed with (%d) error\n", err);
 216
 217        queue_delayed_work(devfreq_wq, &devfreq->work,
 218                                msecs_to_jiffies(devfreq->profile->polling_ms));
 219        mutex_unlock(&devfreq->lock);
 220}
 221
 222/**
 223 * devfreq_monitor_start() - Start load monitoring of devfreq instance
 224 * @devfreq:    the devfreq instance.
 225 *
 226 * Helper function for starting devfreq device load monitoing. By
 227 * default delayed work based monitoring is supported. Function
 228 * to be called from governor in response to DEVFREQ_GOV_START
 229 * event when device is added to devfreq framework.
 230 */
 231void devfreq_monitor_start(struct devfreq *devfreq)
 232{
 233        INIT_DEFERRABLE_WORK(&devfreq->work, devfreq_monitor);
 234        if (devfreq->profile->polling_ms)
 235                queue_delayed_work(devfreq_wq, &devfreq->work,
 236                        msecs_to_jiffies(devfreq->profile->polling_ms));
 237}
 238EXPORT_SYMBOL(devfreq_monitor_start);
 239
 240/**
 241 * devfreq_monitor_stop() - Stop load monitoring of a devfreq instance
 242 * @devfreq:    the devfreq instance.
 243 *
 244 * Helper function to stop devfreq device load monitoing. Function
 245 * to be called from governor in response to DEVFREQ_GOV_STOP
 246 * event when device is removed from devfreq framework.
 247 */
 248void devfreq_monitor_stop(struct devfreq *devfreq)
 249{
 250        cancel_delayed_work_sync(&devfreq->work);
 251}
 252EXPORT_SYMBOL(devfreq_monitor_stop);
 253
 254/**
 255 * devfreq_monitor_suspend() - Suspend load monitoring of a devfreq instance
 256 * @devfreq:    the devfreq instance.
 257 *
 258 * Helper function to suspend devfreq device load monitoing. Function
 259 * to be called from governor in response to DEVFREQ_GOV_SUSPEND
 260 * event or when polling interval is set to zero.
 261 *
 262 * Note: Though this function is same as devfreq_monitor_stop(),
 263 * intentionally kept separate to provide hooks for collecting
 264 * transition statistics.
 265 */
 266void devfreq_monitor_suspend(struct devfreq *devfreq)
 267{
 268        mutex_lock(&devfreq->lock);
 269        if (devfreq->stop_polling) {
 270                mutex_unlock(&devfreq->lock);
 271                return;
 272        }
 273
 274        devfreq_update_status(devfreq, devfreq->previous_freq);
 275        devfreq->stop_polling = true;
 276        mutex_unlock(&devfreq->lock);
 277        cancel_delayed_work_sync(&devfreq->work);
 278}
 279EXPORT_SYMBOL(devfreq_monitor_suspend);
 280
 281/**
 282 * devfreq_monitor_resume() - Resume load monitoring of a devfreq instance
 283 * @devfreq:    the devfreq instance.
 284 *
 285 * Helper function to resume devfreq device load monitoing. Function
 286 * to be called from governor in response to DEVFREQ_GOV_RESUME
 287 * event or when polling interval is set to non-zero.
 288 */
 289void devfreq_monitor_resume(struct devfreq *devfreq)
 290{
 291        unsigned long freq;
 292
 293        mutex_lock(&devfreq->lock);
 294        if (!devfreq->stop_polling)
 295                goto out;
 296
 297        if (!delayed_work_pending(&devfreq->work) &&
 298                        devfreq->profile->polling_ms)
 299                queue_delayed_work(devfreq_wq, &devfreq->work,
 300                        msecs_to_jiffies(devfreq->profile->polling_ms));
 301
 302        devfreq->last_stat_updated = jiffies;
 303        devfreq->stop_polling = false;
 304
 305        if (devfreq->profile->get_cur_freq &&
 306                !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
 307                devfreq->previous_freq = freq;
 308
 309out:
 310        mutex_unlock(&devfreq->lock);
 311}
 312EXPORT_SYMBOL(devfreq_monitor_resume);
 313
 314/**
 315 * devfreq_interval_update() - Update device devfreq monitoring interval
 316 * @devfreq:    the devfreq instance.
 317 * @delay:      new polling interval to be set.
 318 *
 319 * Helper function to set new load monitoring polling interval. Function
 320 * to be called from governor in response to DEVFREQ_GOV_INTERVAL event.
 321 */
 322void devfreq_interval_update(struct devfreq *devfreq, unsigned int *delay)
 323{
 324        unsigned int cur_delay = devfreq->profile->polling_ms;
 325        unsigned int new_delay = *delay;
 326
 327        mutex_lock(&devfreq->lock);
 328        devfreq->profile->polling_ms = new_delay;
 329
 330        if (devfreq->stop_polling)
 331                goto out;
 332
 333        /* if new delay is zero, stop polling */
 334        if (!new_delay) {
 335                mutex_unlock(&devfreq->lock);
 336                cancel_delayed_work_sync(&devfreq->work);
 337                return;
 338        }
 339
 340        /* if current delay is zero, start polling with new delay */
 341        if (!cur_delay) {
 342                queue_delayed_work(devfreq_wq, &devfreq->work,
 343                        msecs_to_jiffies(devfreq->profile->polling_ms));
 344                goto out;
 345        }
 346
 347        /* if current delay is greater than new delay, restart polling */
 348        if (cur_delay > new_delay) {
 349                mutex_unlock(&devfreq->lock);
 350                cancel_delayed_work_sync(&devfreq->work);
 351                mutex_lock(&devfreq->lock);
 352                if (!devfreq->stop_polling)
 353                        queue_delayed_work(devfreq_wq, &devfreq->work,
 354                              msecs_to_jiffies(devfreq->profile->polling_ms));
 355        }
 356out:
 357        mutex_unlock(&devfreq->lock);
 358}
 359EXPORT_SYMBOL(devfreq_interval_update);
 360
 361/**
 362 * devfreq_notifier_call() - Notify that the device frequency requirements
 363 *                         has been changed out of devfreq framework.
 364 * @nb:         the notifier_block (supposed to be devfreq->nb)
 365 * @type:       not used
 366 * @devp:       not used
 367 *
 368 * Called by a notifier that uses devfreq->nb.
 369 */
 370static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
 371                                 void *devp)
 372{
 373        struct devfreq *devfreq = container_of(nb, struct devfreq, nb);
 374        int ret;
 375
 376        mutex_lock(&devfreq->lock);
 377        ret = update_devfreq(devfreq);
 378        mutex_unlock(&devfreq->lock);
 379
 380        return ret;
 381}
 382
 383/**
 384 * _remove_devfreq() - Remove devfreq from the list and release its resources.
 385 * @devfreq:    the devfreq struct
 386 * @skip:       skip calling device_unregister().
 387 */
 388static void _remove_devfreq(struct devfreq *devfreq, bool skip)
 389{
 390        mutex_lock(&devfreq_list_lock);
 391        if (IS_ERR(find_device_devfreq(devfreq->dev.parent))) {
 392                mutex_unlock(&devfreq_list_lock);
 393                dev_warn(&devfreq->dev, "releasing devfreq which doesn't exist\n");
 394                return;
 395        }
 396        list_del(&devfreq->node);
 397        mutex_unlock(&devfreq_list_lock);
 398
 399        if (devfreq->governor)
 400                devfreq->governor->event_handler(devfreq,
 401                                                 DEVFREQ_GOV_STOP, NULL);
 402
 403        if (devfreq->profile->exit)
 404                devfreq->profile->exit(devfreq->dev.parent);
 405
 406        if (!skip && get_device(&devfreq->dev)) {
 407                device_unregister(&devfreq->dev);
 408                put_device(&devfreq->dev);
 409        }
 410
 411        mutex_destroy(&devfreq->lock);
 412        kfree(devfreq);
 413}
 414
 415/**
 416 * devfreq_dev_release() - Callback for struct device to release the device.
 417 * @dev:        the devfreq device
 418 *
 419 * This calls _remove_devfreq() if _remove_devfreq() is not called.
 420 * Note that devfreq_dev_release() could be called by _remove_devfreq() as
 421 * well as by others unregistering the device.
 422 */
 423static void devfreq_dev_release(struct device *dev)
 424{
 425        struct devfreq *devfreq = to_devfreq(dev);
 426
 427        _remove_devfreq(devfreq, true);
 428}
 429
 430/**
 431 * devfreq_add_device() - Add devfreq feature to the device
 432 * @dev:        the device to add devfreq feature.
 433 * @profile:    device-specific profile to run devfreq.
 434 * @governor_name:      name of the policy to choose frequency.
 435 * @data:       private data for the governor. The devfreq framework does not
 436 *              touch this value.
 437 */
 438struct devfreq *devfreq_add_device(struct device *dev,
 439                                   struct devfreq_dev_profile *profile,
 440                                   const char *governor_name,
 441                                   void *data)
 442{
 443        struct devfreq *devfreq;
 444        struct devfreq_governor *governor;
 445        int err = 0;
 446
 447        if (!dev || !profile || !governor_name) {
 448                dev_err(dev, "%s: Invalid parameters.\n", __func__);
 449                return ERR_PTR(-EINVAL);
 450        }
 451
 452        mutex_lock(&devfreq_list_lock);
 453        devfreq = find_device_devfreq(dev);
 454        mutex_unlock(&devfreq_list_lock);
 455        if (!IS_ERR(devfreq)) {
 456                dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__);
 457                err = -EINVAL;
 458                goto err_out;
 459        }
 460
 461        devfreq = kzalloc(sizeof(struct devfreq), GFP_KERNEL);
 462        if (!devfreq) {
 463                dev_err(dev, "%s: Unable to create devfreq for the device\n",
 464                        __func__);
 465                err = -ENOMEM;
 466                goto err_out;
 467        }
 468
 469        mutex_init(&devfreq->lock);
 470        mutex_lock(&devfreq->lock);
 471        devfreq->dev.parent = dev;
 472        devfreq->dev.class = devfreq_class;
 473        devfreq->dev.release = devfreq_dev_release;
 474        devfreq->profile = profile;
 475        strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
 476        devfreq->previous_freq = profile->initial_freq;
 477        devfreq->data = data;
 478        devfreq->nb.notifier_call = devfreq_notifier_call;
 479
 480        devfreq->trans_table =  devm_kzalloc(dev, sizeof(unsigned int) *
 481                                                devfreq->profile->max_state *
 482                                                devfreq->profile->max_state,
 483                                                GFP_KERNEL);
 484        devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned int) *
 485                                                devfreq->profile->max_state,
 486                                                GFP_KERNEL);
 487        devfreq->last_stat_updated = jiffies;
 488
 489        dev_set_name(&devfreq->dev, "%s", dev_name(dev));
 490        err = device_register(&devfreq->dev);
 491        if (err) {
 492                put_device(&devfreq->dev);
 493                mutex_unlock(&devfreq->lock);
 494                goto err_dev;
 495        }
 496
 497        mutex_unlock(&devfreq->lock);
 498
 499        mutex_lock(&devfreq_list_lock);
 500        list_add(&devfreq->node, &devfreq_list);
 501
 502        governor = find_devfreq_governor(devfreq->governor_name);
 503        if (!IS_ERR(governor))
 504                devfreq->governor = governor;
 505        if (devfreq->governor)
 506                err = devfreq->governor->event_handler(devfreq,
 507                                        DEVFREQ_GOV_START, NULL);
 508        mutex_unlock(&devfreq_list_lock);
 509        if (err) {
 510                dev_err(dev, "%s: Unable to start governor for the device\n",
 511                        __func__);
 512                goto err_init;
 513        }
 514
 515        return devfreq;
 516
 517err_init:
 518        list_del(&devfreq->node);
 519        device_unregister(&devfreq->dev);
 520err_dev:
 521        kfree(devfreq);
 522err_out:
 523        return ERR_PTR(err);
 524}
 525EXPORT_SYMBOL(devfreq_add_device);
 526
 527/**
 528 * devfreq_remove_device() - Remove devfreq feature from a device.
 529 * @devfreq:    the devfreq instance to be removed
 530 *
 531 * The opposite of devfreq_add_device().
 532 */
 533int devfreq_remove_device(struct devfreq *devfreq)
 534{
 535        if (!devfreq)
 536                return -EINVAL;
 537
 538        _remove_devfreq(devfreq, false);
 539
 540        return 0;
 541}
 542EXPORT_SYMBOL(devfreq_remove_device);
 543
 544/**
 545 * devfreq_suspend_device() - Suspend devfreq of a device.
 546 * @devfreq: the devfreq instance to be suspended
 547 *
 548 * This function is intended to be called by the pm callbacks
 549 * (e.g., runtime_suspend, suspend) of the device driver that
 550 * holds the devfreq.
 551 */
 552int devfreq_suspend_device(struct devfreq *devfreq)
 553{
 554        if (!devfreq)
 555                return -EINVAL;
 556
 557        if (!devfreq->governor)
 558                return 0;
 559
 560        return devfreq->governor->event_handler(devfreq,
 561                                DEVFREQ_GOV_SUSPEND, NULL);
 562}
 563EXPORT_SYMBOL(devfreq_suspend_device);
 564
 565/**
 566 * devfreq_resume_device() - Resume devfreq of a device.
 567 * @devfreq: the devfreq instance to be resumed
 568 *
 569 * This function is intended to be called by the pm callbacks
 570 * (e.g., runtime_resume, resume) of the device driver that
 571 * holds the devfreq.
 572 */
 573int devfreq_resume_device(struct devfreq *devfreq)
 574{
 575        if (!devfreq)
 576                return -EINVAL;
 577
 578        if (!devfreq->governor)
 579                return 0;
 580
 581        return devfreq->governor->event_handler(devfreq,
 582                                DEVFREQ_GOV_RESUME, NULL);
 583}
 584EXPORT_SYMBOL(devfreq_resume_device);
 585
 586/**
 587 * devfreq_add_governor() - Add devfreq governor
 588 * @governor:   the devfreq governor to be added
 589 */
 590int devfreq_add_governor(struct devfreq_governor *governor)
 591{
 592        struct devfreq_governor *g;
 593        struct devfreq *devfreq;
 594        int err = 0;
 595
 596        if (!governor) {
 597                pr_err("%s: Invalid parameters.\n", __func__);
 598                return -EINVAL;
 599        }
 600
 601        mutex_lock(&devfreq_list_lock);
 602        g = find_devfreq_governor(governor->name);
 603        if (!IS_ERR(g)) {
 604                pr_err("%s: governor %s already registered\n", __func__,
 605                       g->name);
 606                err = -EINVAL;
 607                goto err_out;
 608        }
 609
 610        list_add(&governor->node, &devfreq_governor_list);
 611
 612        list_for_each_entry(devfreq, &devfreq_list, node) {
 613                int ret = 0;
 614                struct device *dev = devfreq->dev.parent;
 615
 616                if (!strncmp(devfreq->governor_name, governor->name,
 617                             DEVFREQ_NAME_LEN)) {
 618                        /* The following should never occur */
 619                        if (devfreq->governor) {
 620                                dev_warn(dev,
 621                                         "%s: Governor %s already present\n",
 622                                         __func__, devfreq->governor->name);
 623                                ret = devfreq->governor->event_handler(devfreq,
 624                                                        DEVFREQ_GOV_STOP, NULL);
 625                                if (ret) {
 626                                        dev_warn(dev,
 627                                                 "%s: Governor %s stop = %d\n",
 628                                                 __func__,
 629                                                 devfreq->governor->name, ret);
 630                                }
 631                                /* Fall through */
 632                        }
 633                        devfreq->governor = governor;
 634                        ret = devfreq->governor->event_handler(devfreq,
 635                                                DEVFREQ_GOV_START, NULL);
 636                        if (ret) {
 637                                dev_warn(dev, "%s: Governor %s start=%d\n",
 638                                         __func__, devfreq->governor->name,
 639                                         ret);
 640                        }
 641                }
 642        }
 643
 644err_out:
 645        mutex_unlock(&devfreq_list_lock);
 646
 647        return err;
 648}
 649EXPORT_SYMBOL(devfreq_add_governor);
 650
 651/**
 652 * devfreq_remove_device() - Remove devfreq feature from a device.
 653 * @governor:   the devfreq governor to be removed
 654 */
 655int devfreq_remove_governor(struct devfreq_governor *governor)
 656{
 657        struct devfreq_governor *g;
 658        struct devfreq *devfreq;
 659        int err = 0;
 660
 661        if (!governor) {
 662                pr_err("%s: Invalid parameters.\n", __func__);
 663                return -EINVAL;
 664        }
 665
 666        mutex_lock(&devfreq_list_lock);
 667        g = find_devfreq_governor(governor->name);
 668        if (IS_ERR(g)) {
 669                pr_err("%s: governor %s not registered\n", __func__,
 670                       governor->name);
 671                err = PTR_ERR(g);
 672                goto err_out;
 673        }
 674        list_for_each_entry(devfreq, &devfreq_list, node) {
 675                int ret;
 676                struct device *dev = devfreq->dev.parent;
 677
 678                if (!strncmp(devfreq->governor_name, governor->name,
 679                             DEVFREQ_NAME_LEN)) {
 680                        /* we should have a devfreq governor! */
 681                        if (!devfreq->governor) {
 682                                dev_warn(dev, "%s: Governor %s NOT present\n",
 683                                         __func__, governor->name);
 684                                continue;
 685                                /* Fall through */
 686                        }
 687                        ret = devfreq->governor->event_handler(devfreq,
 688                                                DEVFREQ_GOV_STOP, NULL);
 689                        if (ret) {
 690                                dev_warn(dev, "%s: Governor %s stop=%d\n",
 691                                         __func__, devfreq->governor->name,
 692                                         ret);
 693                        }
 694                        devfreq->governor = NULL;
 695                }
 696        }
 697
 698        list_del(&governor->node);
 699err_out:
 700        mutex_unlock(&devfreq_list_lock);
 701
 702        return err;
 703}
 704EXPORT_SYMBOL(devfreq_remove_governor);
 705
 706static ssize_t show_governor(struct device *dev,
 707                             struct device_attribute *attr, char *buf)
 708{
 709        if (!to_devfreq(dev)->governor)
 710                return -EINVAL;
 711
 712        return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name);
 713}
 714
 715static ssize_t store_governor(struct device *dev, struct device_attribute *attr,
 716                              const char *buf, size_t count)
 717{
 718        struct devfreq *df = to_devfreq(dev);
 719        int ret;
 720        char str_governor[DEVFREQ_NAME_LEN + 1];
 721        struct devfreq_governor *governor;
 722
 723        ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor);
 724        if (ret != 1)
 725                return -EINVAL;
 726
 727        mutex_lock(&devfreq_list_lock);
 728        governor = find_devfreq_governor(str_governor);
 729        if (IS_ERR(governor)) {
 730                ret = PTR_ERR(governor);
 731                goto out;
 732        }
 733        if (df->governor == governor)
 734                goto out;
 735
 736        if (df->governor) {
 737                ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
 738                if (ret) {
 739                        dev_warn(dev, "%s: Governor %s not stopped(%d)\n",
 740                                 __func__, df->governor->name, ret);
 741                        goto out;
 742                }
 743        }
 744        df->governor = governor;
 745        strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN);
 746        ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
 747        if (ret)
 748                dev_warn(dev, "%s: Governor %s not started(%d)\n",
 749                         __func__, df->governor->name, ret);
 750out:
 751        mutex_unlock(&devfreq_list_lock);
 752
 753        if (!ret)
 754                ret = count;
 755        return ret;
 756}
 757static ssize_t show_available_governors(struct device *d,
 758                                    struct device_attribute *attr,
 759                                    char *buf)
 760{
 761        struct devfreq_governor *tmp_governor;
 762        ssize_t count = 0;
 763
 764        mutex_lock(&devfreq_list_lock);
 765        list_for_each_entry(tmp_governor, &devfreq_governor_list, node)
 766                count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
 767                                   "%s ", tmp_governor->name);
 768        mutex_unlock(&devfreq_list_lock);
 769
 770        /* Truncate the trailing space */
 771        if (count)
 772                count--;
 773
 774        count += sprintf(&buf[count], "\n");
 775
 776        return count;
 777}
 778
 779static ssize_t show_freq(struct device *dev,
 780                         struct device_attribute *attr, char *buf)
 781{
 782        unsigned long freq;
 783        struct devfreq *devfreq = to_devfreq(dev);
 784
 785        if (devfreq->profile->get_cur_freq &&
 786                !devfreq->profile->get_cur_freq(devfreq->dev.parent, &freq))
 787                        return sprintf(buf, "%lu\n", freq);
 788
 789        return sprintf(buf, "%lu\n", devfreq->previous_freq);
 790}
 791
 792static ssize_t show_target_freq(struct device *dev,
 793                        struct device_attribute *attr, char *buf)
 794{
 795        return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq);
 796}
 797
 798static ssize_t show_polling_interval(struct device *dev,
 799                                     struct device_attribute *attr, char *buf)
 800{
 801        return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms);
 802}
 803
 804static ssize_t store_polling_interval(struct device *dev,
 805                                      struct device_attribute *attr,
 806                                      const char *buf, size_t count)
 807{
 808        struct devfreq *df = to_devfreq(dev);
 809        unsigned int value;
 810        int ret;
 811
 812        if (!df->governor)
 813                return -EINVAL;
 814
 815        ret = sscanf(buf, "%u", &value);
 816        if (ret != 1)
 817                return -EINVAL;
 818
 819        df->governor->event_handler(df, DEVFREQ_GOV_INTERVAL, &value);
 820        ret = count;
 821
 822        return ret;
 823}
 824
 825static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr,
 826                              const char *buf, size_t count)
 827{
 828        struct devfreq *df = to_devfreq(dev);
 829        unsigned long value;
 830        int ret;
 831        unsigned long max;
 832
 833        ret = sscanf(buf, "%lu", &value);
 834        if (ret != 1)
 835                return -EINVAL;
 836
 837        mutex_lock(&df->lock);
 838        max = df->max_freq;
 839        if (value && max && value > max) {
 840                ret = -EINVAL;
 841                goto unlock;
 842        }
 843
 844        df->min_freq = value;
 845        update_devfreq(df);
 846        ret = count;
 847unlock:
 848        mutex_unlock(&df->lock);
 849        return ret;
 850}
 851
 852static ssize_t show_min_freq(struct device *dev, struct device_attribute *attr,
 853                             char *buf)
 854{
 855        return sprintf(buf, "%lu\n", to_devfreq(dev)->min_freq);
 856}
 857
 858static ssize_t store_max_freq(struct device *dev, struct device_attribute *attr,
 859                              const char *buf, size_t count)
 860{
 861        struct devfreq *df = to_devfreq(dev);
 862        unsigned long value;
 863        int ret;
 864        unsigned long min;
 865
 866        ret = sscanf(buf, "%lu", &value);
 867        if (ret != 1)
 868                return -EINVAL;
 869
 870        mutex_lock(&df->lock);
 871        min = df->min_freq;
 872        if (value && min && value < min) {
 873                ret = -EINVAL;
 874                goto unlock;
 875        }
 876
 877        df->max_freq = value;
 878        update_devfreq(df);
 879        ret = count;
 880unlock:
 881        mutex_unlock(&df->lock);
 882        return ret;
 883}
 884
 885static ssize_t show_max_freq(struct device *dev, struct device_attribute *attr,
 886                             char *buf)
 887{
 888        return sprintf(buf, "%lu\n", to_devfreq(dev)->max_freq);
 889}
 890
 891static ssize_t show_available_freqs(struct device *d,
 892                                    struct device_attribute *attr,
 893                                    char *buf)
 894{
 895        struct devfreq *df = to_devfreq(d);
 896        struct device *dev = df->dev.parent;
 897        struct opp *opp;
 898        ssize_t count = 0;
 899        unsigned long freq = 0;
 900
 901        rcu_read_lock();
 902        do {
 903                opp = opp_find_freq_ceil(dev, &freq);
 904                if (IS_ERR(opp))
 905                        break;
 906
 907                count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
 908                                   "%lu ", freq);
 909                freq++;
 910        } while (1);
 911        rcu_read_unlock();
 912
 913        /* Truncate the trailing space */
 914        if (count)
 915                count--;
 916
 917        count += sprintf(&buf[count], "\n");
 918
 919        return count;
 920}
 921
 922static ssize_t show_trans_table(struct device *dev, struct device_attribute *attr,
 923                                char *buf)
 924{
 925        struct devfreq *devfreq = to_devfreq(dev);
 926        ssize_t len;
 927        int i, j;
 928        unsigned int max_state = devfreq->profile->max_state;
 929
 930        if (!devfreq->stop_polling &&
 931                        devfreq_update_status(devfreq, devfreq->previous_freq))
 932                return 0;
 933
 934        len = sprintf(buf, "   From  :   To\n");
 935        len += sprintf(buf + len, "         :");
 936        for (i = 0; i < max_state; i++)
 937                len += sprintf(buf + len, "%8u",
 938                                devfreq->profile->freq_table[i]);
 939
 940        len += sprintf(buf + len, "   time(ms)\n");
 941
 942        for (i = 0; i < max_state; i++) {
 943                if (devfreq->profile->freq_table[i]
 944                                        == devfreq->previous_freq) {
 945                        len += sprintf(buf + len, "*");
 946                } else {
 947                        len += sprintf(buf + len, " ");
 948                }
 949                len += sprintf(buf + len, "%8u:",
 950                                devfreq->profile->freq_table[i]);
 951                for (j = 0; j < max_state; j++)
 952                        len += sprintf(buf + len, "%8u",
 953                                devfreq->trans_table[(i * max_state) + j]);
 954                len += sprintf(buf + len, "%10u\n",
 955                        jiffies_to_msecs(devfreq->time_in_state[i]));
 956        }
 957
 958        len += sprintf(buf + len, "Total transition : %u\n",
 959                                        devfreq->total_trans);
 960        return len;
 961}
 962
 963static struct device_attribute devfreq_attrs[] = {
 964        __ATTR(governor, S_IRUGO | S_IWUSR, show_governor, store_governor),
 965        __ATTR(available_governors, S_IRUGO, show_available_governors, NULL),
 966        __ATTR(cur_freq, S_IRUGO, show_freq, NULL),
 967        __ATTR(available_frequencies, S_IRUGO, show_available_freqs, NULL),
 968        __ATTR(target_freq, S_IRUGO, show_target_freq, NULL),
 969        __ATTR(polling_interval, S_IRUGO | S_IWUSR, show_polling_interval,
 970               store_polling_interval),
 971        __ATTR(min_freq, S_IRUGO | S_IWUSR, show_min_freq, store_min_freq),
 972        __ATTR(max_freq, S_IRUGO | S_IWUSR, show_max_freq, store_max_freq),
 973        __ATTR(trans_stat, S_IRUGO, show_trans_table, NULL),
 974        { },
 975};
 976
 977static int __init devfreq_init(void)
 978{
 979        devfreq_class = class_create(THIS_MODULE, "devfreq");
 980        if (IS_ERR(devfreq_class)) {
 981                pr_err("%s: couldn't create class\n", __FILE__);
 982                return PTR_ERR(devfreq_class);
 983        }
 984
 985        devfreq_wq = create_freezable_workqueue("devfreq_wq");
 986        if (IS_ERR(devfreq_wq)) {
 987                class_destroy(devfreq_class);
 988                pr_err("%s: couldn't create workqueue\n", __FILE__);
 989                return PTR_ERR(devfreq_wq);
 990        }
 991        devfreq_class->dev_attrs = devfreq_attrs;
 992
 993        return 0;
 994}
 995subsys_initcall(devfreq_init);
 996
 997static void __exit devfreq_exit(void)
 998{
 999        class_destroy(devfreq_class);
1000        destroy_workqueue(devfreq_wq);
1001}
1002module_exit(devfreq_exit);
1003
1004/*
1005 * The followings are helper functions for devfreq user device drivers with
1006 * OPP framework.
1007 */
1008
1009/**
1010 * devfreq_recommended_opp() - Helper function to get proper OPP for the
1011 *                           freq value given to target callback.
1012 * @dev:        The devfreq user device. (parent of devfreq)
1013 * @freq:       The frequency given to target function
1014 * @flags:      Flags handed from devfreq framework.
1015 *
1016 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
1017 * protected pointer. The reason for the same is that the opp pointer which is
1018 * returned will remain valid for use with opp_get_{voltage, freq} only while
1019 * under the locked area. The pointer returned must be used prior to unlocking
1020 * with rcu_read_unlock() to maintain the integrity of the pointer.
1021 */
1022struct opp *devfreq_recommended_opp(struct device *dev, unsigned long *freq,
1023                                    u32 flags)
1024{
1025        struct opp *opp;
1026
1027        if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND) {
1028                /* The freq is an upper bound. opp should be lower */
1029                opp = opp_find_freq_floor(dev, freq);
1030
1031                /* If not available, use the closest opp */
1032                if (opp == ERR_PTR(-ERANGE))
1033                        opp = opp_find_freq_ceil(dev, freq);
1034        } else {
1035                /* The freq is an lower bound. opp should be higher */
1036                opp = opp_find_freq_ceil(dev, freq);
1037
1038                /* If not available, use the closest opp */
1039                if (opp == ERR_PTR(-ERANGE))
1040                        opp = opp_find_freq_floor(dev, freq);
1041        }
1042
1043        return opp;
1044}
1045
1046/**
1047 * devfreq_register_opp_notifier() - Helper function to get devfreq notified
1048 *                                 for any changes in the OPP availability
1049 *                                 changes
1050 * @dev:        The devfreq user device. (parent of devfreq)
1051 * @devfreq:    The devfreq object.
1052 */
1053int devfreq_register_opp_notifier(struct device *dev, struct devfreq *devfreq)
1054{
1055        struct srcu_notifier_head *nh;
1056        int ret = 0;
1057
1058        rcu_read_lock();
1059        nh = opp_get_notifier(dev);
1060        if (IS_ERR(nh))
1061                ret = PTR_ERR(nh);
1062        rcu_read_unlock();
1063        if (!ret)
1064                ret = srcu_notifier_chain_register(nh, &devfreq->nb);
1065
1066        return ret;
1067}
1068
1069/**
1070 * devfreq_unregister_opp_notifier() - Helper function to stop getting devfreq
1071 *                                   notified for any changes in the OPP
1072 *                                   availability changes anymore.
1073 * @dev:        The devfreq user device. (parent of devfreq)
1074 * @devfreq:    The devfreq object.
1075 *
1076 * At exit() callback of devfreq_dev_profile, this must be included if
1077 * devfreq_recommended_opp is used.
1078 */
1079int devfreq_unregister_opp_notifier(struct device *dev, struct devfreq *devfreq)
1080{
1081        struct srcu_notifier_head *nh;
1082        int ret = 0;
1083
1084        rcu_read_lock();
1085        nh = opp_get_notifier(dev);
1086        if (IS_ERR(nh))
1087                ret = PTR_ERR(nh);
1088        rcu_read_unlock();
1089        if (!ret)
1090                ret = srcu_notifier_chain_unregister(nh, &devfreq->nb);
1091
1092        return ret;
1093}
1094
1095MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
1096MODULE_DESCRIPTION("devfreq class support");
1097MODULE_LICENSE("GPL");
1098