linux/drivers/base/power/opp/core.c
<<
>>
Prefs
   1/*
   2 * Generic OPP Interface
   3 *
   4 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
   5 *      Nishanth Menon
   6 *      Romit Dasgupta
   7 *      Kevin Hilman
   8 *
   9 * This program is free software; you can redistribute it and/or modify
  10 * it under the terms of the GNU General Public License version 2 as
  11 * published by the Free Software Foundation.
  12 */
  13
  14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  15
  16#include <linux/clk.h>
  17#include <linux/errno.h>
  18#include <linux/err.h>
  19#include <linux/slab.h>
  20#include <linux/device.h>
  21#include <linux/export.h>
  22#include <linux/regulator/consumer.h>
  23
  24#include "opp.h"
  25
  26/*
  27 * The root of the list of all opp-tables. All opp_table structures branch off
  28 * from here, with each opp_table containing the list of opps it supports in
  29 * various states of availability.
  30 */
  31LIST_HEAD(opp_tables);
  32/* Lock to allow exclusive modification to the device and opp lists */
  33DEFINE_MUTEX(opp_table_lock);
  34
  35#define opp_rcu_lockdep_assert()                                        \
  36do {                                                                    \
  37        RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&                       \
  38                         !lockdep_is_held(&opp_table_lock),             \
  39                         "Missing rcu_read_lock() or "                  \
  40                         "opp_table_lock protection");                  \
  41} while (0)
  42
  43static struct opp_device *_find_opp_dev(const struct device *dev,
  44                                        struct opp_table *opp_table)
  45{
  46        struct opp_device *opp_dev;
  47
  48        list_for_each_entry(opp_dev, &opp_table->dev_list, node)
  49                if (opp_dev->dev == dev)
  50                        return opp_dev;
  51
  52        return NULL;
  53}
  54
  55/**
  56 * _find_opp_table() - find opp_table struct using device pointer
  57 * @dev:        device pointer used to lookup OPP table
  58 *
  59 * Search OPP table for one containing matching device. Does a RCU reader
  60 * operation to grab the pointer needed.
  61 *
  62 * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or
  63 * -EINVAL based on type of error.
  64 *
  65 * Locking: For readers, this function must be called under rcu_read_lock().
  66 * opp_table is a RCU protected pointer, which means that opp_table is valid
  67 * as long as we are under RCU lock.
  68 *
  69 * For Writers, this function must be called with opp_table_lock held.
  70 */
  71struct opp_table *_find_opp_table(struct device *dev)
  72{
  73        struct opp_table *opp_table;
  74
  75        opp_rcu_lockdep_assert();
  76
  77        if (IS_ERR_OR_NULL(dev)) {
  78                pr_err("%s: Invalid parameters\n", __func__);
  79                return ERR_PTR(-EINVAL);
  80        }
  81
  82        list_for_each_entry_rcu(opp_table, &opp_tables, node)
  83                if (_find_opp_dev(dev, opp_table))
  84                        return opp_table;
  85
  86        return ERR_PTR(-ENODEV);
  87}
  88
  89/**
  90 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an opp
  91 * @opp:        opp for which voltage has to be returned for
  92 *
  93 * Return: voltage in micro volt corresponding to the opp, else
  94 * return 0
  95 *
  96 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
  97 * protected pointer. This means that opp which could have been fetched by
  98 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
  99 * under RCU lock. The pointer returned by the opp_find_freq family must be
 100 * used in the same section as the usage of this function with the pointer
 101 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
 102 * pointer.
 103 */
 104unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
 105{
 106        struct dev_pm_opp *tmp_opp;
 107        unsigned long v = 0;
 108
 109        opp_rcu_lockdep_assert();
 110
 111        tmp_opp = rcu_dereference(opp);
 112        if (IS_ERR_OR_NULL(tmp_opp))
 113                pr_err("%s: Invalid parameters\n", __func__);
 114        else
 115                v = tmp_opp->u_volt;
 116
 117        return v;
 118}
 119EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
 120
 121/**
 122 * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
 123 * @opp:        opp for which frequency has to be returned for
 124 *
 125 * Return: frequency in hertz corresponding to the opp, else
 126 * return 0
 127 *
 128 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
 129 * protected pointer. This means that opp which could have been fetched by
 130 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
 131 * under RCU lock. The pointer returned by the opp_find_freq family must be
 132 * used in the same section as the usage of this function with the pointer
 133 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
 134 * pointer.
 135 */
 136unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
 137{
 138        struct dev_pm_opp *tmp_opp;
 139        unsigned long f = 0;
 140
 141        opp_rcu_lockdep_assert();
 142
 143        tmp_opp = rcu_dereference(opp);
 144        if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available)
 145                pr_err("%s: Invalid parameters\n", __func__);
 146        else
 147                f = tmp_opp->rate;
 148
 149        return f;
 150}
 151EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
 152
 153/**
 154 * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not
 155 * @opp: opp for which turbo mode is being verified
 156 *
 157 * Turbo OPPs are not for normal use, and can be enabled (under certain
 158 * conditions) for short duration of times to finish high throughput work
 159 * quickly. Running on them for longer times may overheat the chip.
 160 *
 161 * Return: true if opp is turbo opp, else false.
 162 *
 163 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
 164 * protected pointer. This means that opp which could have been fetched by
 165 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
 166 * under RCU lock. The pointer returned by the opp_find_freq family must be
 167 * used in the same section as the usage of this function with the pointer
 168 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
 169 * pointer.
 170 */
 171bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp)
 172{
 173        struct dev_pm_opp *tmp_opp;
 174
 175        opp_rcu_lockdep_assert();
 176
 177        tmp_opp = rcu_dereference(opp);
 178        if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) {
 179                pr_err("%s: Invalid parameters\n", __func__);
 180                return false;
 181        }
 182
 183        return tmp_opp->turbo;
 184}
 185EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo);
 186
 187/**
 188 * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds
 189 * @dev:        device for which we do this operation
 190 *
 191 * Return: This function returns the max clock latency in nanoseconds.
 192 *
 193 * Locking: This function takes rcu_read_lock().
 194 */
 195unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev)
 196{
 197        struct opp_table *opp_table;
 198        unsigned long clock_latency_ns;
 199
 200        rcu_read_lock();
 201
 202        opp_table = _find_opp_table(dev);
 203        if (IS_ERR(opp_table))
 204                clock_latency_ns = 0;
 205        else
 206                clock_latency_ns = opp_table->clock_latency_ns_max;
 207
 208        rcu_read_unlock();
 209        return clock_latency_ns;
 210}
 211EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency);
 212
 213/**
 214 * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds
 215 * @dev: device for which we do this operation
 216 *
 217 * Return: This function returns the max voltage latency in nanoseconds.
 218 *
 219 * Locking: This function takes rcu_read_lock().
 220 */
 221unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
 222{
 223        struct opp_table *opp_table;
 224        struct dev_pm_opp *opp;
 225        struct regulator *reg;
 226        unsigned long latency_ns = 0;
 227        unsigned long min_uV = ~0, max_uV = 0;
 228        int ret;
 229
 230        rcu_read_lock();
 231
 232        opp_table = _find_opp_table(dev);
 233        if (IS_ERR(opp_table)) {
 234                rcu_read_unlock();
 235                return 0;
 236        }
 237
 238        reg = opp_table->regulator;
 239        if (IS_ERR(reg)) {
 240                /* Regulator may not be required for device */
 241                rcu_read_unlock();
 242                return 0;
 243        }
 244
 245        list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
 246                if (!opp->available)
 247                        continue;
 248
 249                if (opp->u_volt_min < min_uV)
 250                        min_uV = opp->u_volt_min;
 251                if (opp->u_volt_max > max_uV)
 252                        max_uV = opp->u_volt_max;
 253        }
 254
 255        rcu_read_unlock();
 256
 257        /*
 258         * The caller needs to ensure that opp_table (and hence the regulator)
 259         * isn't freed, while we are executing this routine.
 260         */
 261        ret = regulator_set_voltage_time(reg, min_uV, max_uV);
 262        if (ret > 0)
 263                latency_ns = ret * 1000;
 264
 265        return latency_ns;
 266}
 267EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency);
 268
 269/**
 270 * dev_pm_opp_get_max_transition_latency() - Get max transition latency in
 271 *                                           nanoseconds
 272 * @dev: device for which we do this operation
 273 *
 274 * Return: This function returns the max transition latency, in nanoseconds, to
 275 * switch from one OPP to other.
 276 *
 277 * Locking: This function takes rcu_read_lock().
 278 */
 279unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev)
 280{
 281        return dev_pm_opp_get_max_volt_latency(dev) +
 282                dev_pm_opp_get_max_clock_latency(dev);
 283}
 284EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency);
 285
 286/**
 287 * dev_pm_opp_get_suspend_opp() - Get suspend opp
 288 * @dev:        device for which we do this operation
 289 *
 290 * Return: This function returns pointer to the suspend opp if it is
 291 * defined and available, otherwise it returns NULL.
 292 *
 293 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
 294 * protected pointer. The reason for the same is that the opp pointer which is
 295 * returned will remain valid for use with opp_get_{voltage, freq} only while
 296 * under the locked area. The pointer returned must be used prior to unlocking
 297 * with rcu_read_unlock() to maintain the integrity of the pointer.
 298 */
 299struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
 300{
 301        struct opp_table *opp_table;
 302
 303        opp_rcu_lockdep_assert();
 304
 305        opp_table = _find_opp_table(dev);
 306        if (IS_ERR(opp_table) || !opp_table->suspend_opp ||
 307            !opp_table->suspend_opp->available)
 308                return NULL;
 309
 310        return opp_table->suspend_opp;
 311}
 312EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp);
 313
 314/**
 315 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table
 316 * @dev:        device for which we do this operation
 317 *
 318 * Return: This function returns the number of available opps if there are any,
 319 * else returns 0 if none or the corresponding error value.
 320 *
 321 * Locking: This function takes rcu_read_lock().
 322 */
 323int dev_pm_opp_get_opp_count(struct device *dev)
 324{
 325        struct opp_table *opp_table;
 326        struct dev_pm_opp *temp_opp;
 327        int count = 0;
 328
 329        rcu_read_lock();
 330
 331        opp_table = _find_opp_table(dev);
 332        if (IS_ERR(opp_table)) {
 333                count = PTR_ERR(opp_table);
 334                dev_err(dev, "%s: OPP table not found (%d)\n",
 335                        __func__, count);
 336                goto out_unlock;
 337        }
 338
 339        list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
 340                if (temp_opp->available)
 341                        count++;
 342        }
 343
 344out_unlock:
 345        rcu_read_unlock();
 346        return count;
 347}
 348EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
 349
 350/**
 351 * dev_pm_opp_find_freq_exact() - search for an exact frequency
 352 * @dev:                device for which we do this operation
 353 * @freq:               frequency to search for
 354 * @available:          true/false - match for available opp
 355 *
 356 * Return: Searches for exact match in the opp table and returns pointer to the
 357 * matching opp if found, else returns ERR_PTR in case of error and should
 358 * be handled using IS_ERR. Error return values can be:
 359 * EINVAL:      for bad pointer
 360 * ERANGE:      no match found for search
 361 * ENODEV:      if device not found in list of registered devices
 362 *
 363 * Note: available is a modifier for the search. if available=true, then the
 364 * match is for exact matching frequency and is available in the stored OPP
 365 * table. if false, the match is for exact frequency which is not available.
 366 *
 367 * This provides a mechanism to enable an opp which is not available currently
 368 * or the opposite as well.
 369 *
 370 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
 371 * protected pointer. The reason for the same is that the opp pointer which is
 372 * returned will remain valid for use with opp_get_{voltage, freq} only while
 373 * under the locked area. The pointer returned must be used prior to unlocking
 374 * with rcu_read_unlock() to maintain the integrity of the pointer.
 375 */
 376struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
 377                                              unsigned long freq,
 378                                              bool available)
 379{
 380        struct opp_table *opp_table;
 381        struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
 382
 383        opp_rcu_lockdep_assert();
 384
 385        opp_table = _find_opp_table(dev);
 386        if (IS_ERR(opp_table)) {
 387                int r = PTR_ERR(opp_table);
 388
 389                dev_err(dev, "%s: OPP table not found (%d)\n", __func__, r);
 390                return ERR_PTR(r);
 391        }
 392
 393        list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
 394                if (temp_opp->available == available &&
 395                                temp_opp->rate == freq) {
 396                        opp = temp_opp;
 397                        break;
 398                }
 399        }
 400
 401        return opp;
 402}
 403EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
 404
 405static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table,
 406                                                   unsigned long *freq)
 407{
 408        struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
 409
 410        list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
 411                if (temp_opp->available && temp_opp->rate >= *freq) {
 412                        opp = temp_opp;
 413                        *freq = opp->rate;
 414                        break;
 415                }
 416        }
 417
 418        return opp;
 419}
 420
 421/**
 422 * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
 423 * @dev:        device for which we do this operation
 424 * @freq:       Start frequency
 425 *
 426 * Search for the matching ceil *available* OPP from a starting freq
 427 * for a device.
 428 *
 429 * Return: matching *opp and refreshes *freq accordingly, else returns
 430 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
 431 * values can be:
 432 * EINVAL:      for bad pointer
 433 * ERANGE:      no match found for search
 434 * ENODEV:      if device not found in list of registered devices
 435 *
 436 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
 437 * protected pointer. The reason for the same is that the opp pointer which is
 438 * returned will remain valid for use with opp_get_{voltage, freq} only while
 439 * under the locked area. The pointer returned must be used prior to unlocking
 440 * with rcu_read_unlock() to maintain the integrity of the pointer.
 441 */
 442struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
 443                                             unsigned long *freq)
 444{
 445        struct opp_table *opp_table;
 446
 447        opp_rcu_lockdep_assert();
 448
 449        if (!dev || !freq) {
 450                dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
 451                return ERR_PTR(-EINVAL);
 452        }
 453
 454        opp_table = _find_opp_table(dev);
 455        if (IS_ERR(opp_table))
 456                return ERR_CAST(opp_table);
 457
 458        return _find_freq_ceil(opp_table, freq);
 459}
 460EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
 461
 462/**
 463 * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
 464 * @dev:        device for which we do this operation
 465 * @freq:       Start frequency
 466 *
 467 * Search for the matching floor *available* OPP from a starting freq
 468 * for a device.
 469 *
 470 * Return: matching *opp and refreshes *freq accordingly, else returns
 471 * ERR_PTR in case of error and should be handled using IS_ERR. Error return
 472 * values can be:
 473 * EINVAL:      for bad pointer
 474 * ERANGE:      no match found for search
 475 * ENODEV:      if device not found in list of registered devices
 476 *
 477 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
 478 * protected pointer. The reason for the same is that the opp pointer which is
 479 * returned will remain valid for use with opp_get_{voltage, freq} only while
 480 * under the locked area. The pointer returned must be used prior to unlocking
 481 * with rcu_read_unlock() to maintain the integrity of the pointer.
 482 */
 483struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
 484                                              unsigned long *freq)
 485{
 486        struct opp_table *opp_table;
 487        struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
 488
 489        opp_rcu_lockdep_assert();
 490
 491        if (!dev || !freq) {
 492                dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
 493                return ERR_PTR(-EINVAL);
 494        }
 495
 496        opp_table = _find_opp_table(dev);
 497        if (IS_ERR(opp_table))
 498                return ERR_CAST(opp_table);
 499
 500        list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) {
 501                if (temp_opp->available) {
 502                        /* go to the next node, before choosing prev */
 503                        if (temp_opp->rate > *freq)
 504                                break;
 505                        else
 506                                opp = temp_opp;
 507                }
 508        }
 509        if (!IS_ERR(opp))
 510                *freq = opp->rate;
 511
 512        return opp;
 513}
 514EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
 515
 516/*
 517 * The caller needs to ensure that opp_table (and hence the clk) isn't freed,
 518 * while clk returned here is used.
 519 */
 520static struct clk *_get_opp_clk(struct device *dev)
 521{
 522        struct opp_table *opp_table;
 523        struct clk *clk;
 524
 525        rcu_read_lock();
 526
 527        opp_table = _find_opp_table(dev);
 528        if (IS_ERR(opp_table)) {
 529                dev_err(dev, "%s: device opp doesn't exist\n", __func__);
 530                clk = ERR_CAST(opp_table);
 531                goto unlock;
 532        }
 533
 534        clk = opp_table->clk;
 535        if (IS_ERR(clk))
 536                dev_err(dev, "%s: No clock available for the device\n",
 537                        __func__);
 538
 539unlock:
 540        rcu_read_unlock();
 541        return clk;
 542}
 543
 544static int _set_opp_voltage(struct device *dev, struct regulator *reg,
 545                            unsigned long u_volt, unsigned long u_volt_min,
 546                            unsigned long u_volt_max)
 547{
 548        int ret;
 549
 550        /* Regulator not available for device */
 551        if (IS_ERR(reg)) {
 552                dev_dbg(dev, "%s: regulator not available: %ld\n", __func__,
 553                        PTR_ERR(reg));
 554                return 0;
 555        }
 556
 557        dev_dbg(dev, "%s: voltages (mV): %lu %lu %lu\n", __func__, u_volt_min,
 558                u_volt, u_volt_max);
 559
 560        ret = regulator_set_voltage_triplet(reg, u_volt_min, u_volt,
 561                                            u_volt_max);
 562        if (ret)
 563                dev_err(dev, "%s: failed to set voltage (%lu %lu %lu mV): %d\n",
 564                        __func__, u_volt_min, u_volt, u_volt_max, ret);
 565
 566        return ret;
 567}
 568
 569/**
 570 * dev_pm_opp_set_rate() - Configure new OPP based on frequency
 571 * @dev:         device for which we do this operation
 572 * @target_freq: frequency to achieve
 573 *
 574 * This configures the power-supplies and clock source to the levels specified
 575 * by the OPP corresponding to the target_freq.
 576 *
 577 * Locking: This function takes rcu_read_lock().
 578 */
 579int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
 580{
 581        struct opp_table *opp_table;
 582        struct dev_pm_opp *old_opp, *opp;
 583        struct regulator *reg;
 584        struct clk *clk;
 585        unsigned long freq, old_freq;
 586        unsigned long u_volt, u_volt_min, u_volt_max;
 587        unsigned long ou_volt, ou_volt_min, ou_volt_max;
 588        int ret;
 589
 590        if (unlikely(!target_freq)) {
 591                dev_err(dev, "%s: Invalid target frequency %lu\n", __func__,
 592                        target_freq);
 593                return -EINVAL;
 594        }
 595
 596        clk = _get_opp_clk(dev);
 597        if (IS_ERR(clk))
 598                return PTR_ERR(clk);
 599
 600        freq = clk_round_rate(clk, target_freq);
 601        if ((long)freq <= 0)
 602                freq = target_freq;
 603
 604        old_freq = clk_get_rate(clk);
 605
 606        /* Return early if nothing to do */
 607        if (old_freq == freq) {
 608                dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
 609                        __func__, freq);
 610                return 0;
 611        }
 612
 613        rcu_read_lock();
 614
 615        opp_table = _find_opp_table(dev);
 616        if (IS_ERR(opp_table)) {
 617                dev_err(dev, "%s: device opp doesn't exist\n", __func__);
 618                rcu_read_unlock();
 619                return PTR_ERR(opp_table);
 620        }
 621
 622        old_opp = _find_freq_ceil(opp_table, &old_freq);
 623        if (!IS_ERR(old_opp)) {
 624                ou_volt = old_opp->u_volt;
 625                ou_volt_min = old_opp->u_volt_min;
 626                ou_volt_max = old_opp->u_volt_max;
 627        } else {
 628                dev_err(dev, "%s: failed to find current OPP for freq %lu (%ld)\n",
 629                        __func__, old_freq, PTR_ERR(old_opp));
 630        }
 631
 632        opp = _find_freq_ceil(opp_table, &freq);
 633        if (IS_ERR(opp)) {
 634                ret = PTR_ERR(opp);
 635                dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n",
 636                        __func__, freq, ret);
 637                rcu_read_unlock();
 638                return ret;
 639        }
 640
 641        u_volt = opp->u_volt;
 642        u_volt_min = opp->u_volt_min;
 643        u_volt_max = opp->u_volt_max;
 644
 645        reg = opp_table->regulator;
 646
 647        rcu_read_unlock();
 648
 649        /* Scaling up? Scale voltage before frequency */
 650        if (freq > old_freq) {
 651                ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
 652                                       u_volt_max);
 653                if (ret)
 654                        goto restore_voltage;
 655        }
 656
 657        /* Change frequency */
 658
 659        dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n",
 660                __func__, old_freq, freq);
 661
 662        ret = clk_set_rate(clk, freq);
 663        if (ret) {
 664                dev_err(dev, "%s: failed to set clock rate: %d\n", __func__,
 665                        ret);
 666                goto restore_voltage;
 667        }
 668
 669        /* Scaling down? Scale voltage after frequency */
 670        if (freq < old_freq) {
 671                ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min,
 672                                       u_volt_max);
 673                if (ret)
 674                        goto restore_freq;
 675        }
 676
 677        return 0;
 678
 679restore_freq:
 680        if (clk_set_rate(clk, old_freq))
 681                dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n",
 682                        __func__, old_freq);
 683restore_voltage:
 684        /* This shouldn't harm even if the voltages weren't updated earlier */
 685        if (!IS_ERR(old_opp))
 686                _set_opp_voltage(dev, reg, ou_volt, ou_volt_min, ou_volt_max);
 687
 688        return ret;
 689}
 690EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate);
 691
 692/* OPP-dev Helpers */
 693static void _kfree_opp_dev_rcu(struct rcu_head *head)
 694{
 695        struct opp_device *opp_dev;
 696
 697        opp_dev = container_of(head, struct opp_device, rcu_head);
 698        kfree_rcu(opp_dev, rcu_head);
 699}
 700
 701static void _remove_opp_dev(struct opp_device *opp_dev,
 702                            struct opp_table *opp_table)
 703{
 704        opp_debug_unregister(opp_dev, opp_table);
 705        list_del(&opp_dev->node);
 706        call_srcu(&opp_table->srcu_head.srcu, &opp_dev->rcu_head,
 707                  _kfree_opp_dev_rcu);
 708}
 709
 710struct opp_device *_add_opp_dev(const struct device *dev,
 711                                struct opp_table *opp_table)
 712{
 713        struct opp_device *opp_dev;
 714        int ret;
 715
 716        opp_dev = kzalloc(sizeof(*opp_dev), GFP_KERNEL);
 717        if (!opp_dev)
 718                return NULL;
 719
 720        /* Initialize opp-dev */
 721        opp_dev->dev = dev;
 722        list_add_rcu(&opp_dev->node, &opp_table->dev_list);
 723
 724        /* Create debugfs entries for the opp_table */
 725        ret = opp_debug_register(opp_dev, opp_table);
 726        if (ret)
 727                dev_err(dev, "%s: Failed to register opp debugfs (%d)\n",
 728                        __func__, ret);
 729
 730        return opp_dev;
 731}
 732
 733/**
 734 * _add_opp_table() - Find OPP table or allocate a new one
 735 * @dev:        device for which we do this operation
 736 *
 737 * It tries to find an existing table first, if it couldn't find one, it
 738 * allocates a new OPP table and returns that.
 739 *
 740 * Return: valid opp_table pointer if success, else NULL.
 741 */
 742static struct opp_table *_add_opp_table(struct device *dev)
 743{
 744        struct opp_table *opp_table;
 745        struct opp_device *opp_dev;
 746        int ret;
 747
 748        /* Check for existing table for 'dev' first */
 749        opp_table = _find_opp_table(dev);
 750        if (!IS_ERR(opp_table))
 751                return opp_table;
 752
 753        /*
 754         * Allocate a new OPP table. In the infrequent case where a new
 755         * device is needed to be added, we pay this penalty.
 756         */
 757        opp_table = kzalloc(sizeof(*opp_table), GFP_KERNEL);
 758        if (!opp_table)
 759                return NULL;
 760
 761        INIT_LIST_HEAD(&opp_table->dev_list);
 762
 763        opp_dev = _add_opp_dev(dev, opp_table);
 764        if (!opp_dev) {
 765                kfree(opp_table);
 766                return NULL;
 767        }
 768
 769        _of_init_opp_table(opp_table, dev);
 770
 771        /* Set regulator to a non-NULL error value */
 772        opp_table->regulator = ERR_PTR(-ENXIO);
 773
 774        /* Find clk for the device */
 775        opp_table->clk = clk_get(dev, NULL);
 776        if (IS_ERR(opp_table->clk)) {
 777                ret = PTR_ERR(opp_table->clk);
 778                if (ret != -EPROBE_DEFER)
 779                        dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__,
 780                                ret);
 781        }
 782
 783        srcu_init_notifier_head(&opp_table->srcu_head);
 784        INIT_LIST_HEAD(&opp_table->opp_list);
 785
 786        /* Secure the device table modification */
 787        list_add_rcu(&opp_table->node, &opp_tables);
 788        return opp_table;
 789}
 790
 791/**
 792 * _kfree_device_rcu() - Free opp_table RCU handler
 793 * @head:       RCU head
 794 */
 795static void _kfree_device_rcu(struct rcu_head *head)
 796{
 797        struct opp_table *opp_table = container_of(head, struct opp_table,
 798                                                   rcu_head);
 799
 800        kfree_rcu(opp_table, rcu_head);
 801}
 802
 803/**
 804 * _remove_opp_table() - Removes a OPP table
 805 * @opp_table: OPP table to be removed.
 806 *
 807 * Removes/frees OPP table if it doesn't contain any OPPs.
 808 */
 809static void _remove_opp_table(struct opp_table *opp_table)
 810{
 811        struct opp_device *opp_dev;
 812
 813        if (!list_empty(&opp_table->opp_list))
 814                return;
 815
 816        if (opp_table->supported_hw)
 817                return;
 818
 819        if (opp_table->prop_name)
 820                return;
 821
 822        if (!IS_ERR(opp_table->regulator))
 823                return;
 824
 825        /* Release clk */
 826        if (!IS_ERR(opp_table->clk))
 827                clk_put(opp_table->clk);
 828
 829        opp_dev = list_first_entry(&opp_table->dev_list, struct opp_device,
 830                                   node);
 831
 832        _remove_opp_dev(opp_dev, opp_table);
 833
 834        /* dev_list must be empty now */
 835        WARN_ON(!list_empty(&opp_table->dev_list));
 836
 837        list_del_rcu(&opp_table->node);
 838        call_srcu(&opp_table->srcu_head.srcu, &opp_table->rcu_head,
 839                  _kfree_device_rcu);
 840}
 841
 842/**
 843 * _kfree_opp_rcu() - Free OPP RCU handler
 844 * @head:       RCU head
 845 */
 846static void _kfree_opp_rcu(struct rcu_head *head)
 847{
 848        struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head);
 849
 850        kfree_rcu(opp, rcu_head);
 851}
 852
 853/**
 854 * _opp_remove()  - Remove an OPP from a table definition
 855 * @opp_table:  points back to the opp_table struct this opp belongs to
 856 * @opp:        pointer to the OPP to remove
 857 * @notify:     OPP_EVENT_REMOVE notification should be sent or not
 858 *
 859 * This function removes an opp definition from the opp table.
 860 *
 861 * Locking: The internal opp_table and opp structures are RCU protected.
 862 * It is assumed that the caller holds required mutex for an RCU updater
 863 * strategy.
 864 */
 865void _opp_remove(struct opp_table *opp_table, struct dev_pm_opp *opp,
 866                 bool notify)
 867{
 868        /*
 869         * Notify the changes in the availability of the operable
 870         * frequency/voltage list.
 871         */
 872        if (notify)
 873                srcu_notifier_call_chain(&opp_table->srcu_head,
 874                                         OPP_EVENT_REMOVE, opp);
 875        opp_debug_remove_one(opp);
 876        list_del_rcu(&opp->node);
 877        call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
 878
 879        _remove_opp_table(opp_table);
 880}
 881
 882/**
 883 * dev_pm_opp_remove()  - Remove an OPP from OPP table
 884 * @dev:        device for which we do this operation
 885 * @freq:       OPP to remove with matching 'freq'
 886 *
 887 * This function removes an opp from the opp table.
 888 *
 889 * Locking: The internal opp_table and opp structures are RCU protected.
 890 * Hence this function internally uses RCU updater strategy with mutex locks
 891 * to keep the integrity of the internal data structures. Callers should ensure
 892 * that this function is *NOT* called under RCU protection or in contexts where
 893 * mutex cannot be locked.
 894 */
 895void dev_pm_opp_remove(struct device *dev, unsigned long freq)
 896{
 897        struct dev_pm_opp *opp;
 898        struct opp_table *opp_table;
 899        bool found = false;
 900
 901        /* Hold our table modification lock here */
 902        mutex_lock(&opp_table_lock);
 903
 904        opp_table = _find_opp_table(dev);
 905        if (IS_ERR(opp_table))
 906                goto unlock;
 907
 908        list_for_each_entry(opp, &opp_table->opp_list, node) {
 909                if (opp->rate == freq) {
 910                        found = true;
 911                        break;
 912                }
 913        }
 914
 915        if (!found) {
 916                dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n",
 917                         __func__, freq);
 918                goto unlock;
 919        }
 920
 921        _opp_remove(opp_table, opp, true);
 922unlock:
 923        mutex_unlock(&opp_table_lock);
 924}
 925EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
 926
 927struct dev_pm_opp *_allocate_opp(struct device *dev,
 928                                 struct opp_table **opp_table)
 929{
 930        struct dev_pm_opp *opp;
 931
 932        /* allocate new OPP node */
 933        opp = kzalloc(sizeof(*opp), GFP_KERNEL);
 934        if (!opp)
 935                return NULL;
 936
 937        INIT_LIST_HEAD(&opp->node);
 938
 939        *opp_table = _add_opp_table(dev);
 940        if (!*opp_table) {
 941                kfree(opp);
 942                return NULL;
 943        }
 944
 945        return opp;
 946}
 947
 948static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
 949                                         struct opp_table *opp_table)
 950{
 951        struct regulator *reg = opp_table->regulator;
 952
 953        if (!IS_ERR(reg) &&
 954            !regulator_is_supported_voltage(reg, opp->u_volt_min,
 955                                            opp->u_volt_max)) {
 956                pr_warn("%s: OPP minuV: %lu maxuV: %lu, not supported by regulator\n",
 957                        __func__, opp->u_volt_min, opp->u_volt_max);
 958                return false;
 959        }
 960
 961        return true;
 962}
 963
 964int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
 965             struct opp_table *opp_table)
 966{
 967        struct dev_pm_opp *opp;
 968        struct list_head *head = &opp_table->opp_list;
 969        int ret;
 970
 971        /*
 972         * Insert new OPP in order of increasing frequency and discard if
 973         * already present.
 974         *
 975         * Need to use &opp_table->opp_list in the condition part of the 'for'
 976         * loop, don't replace it with head otherwise it will become an infinite
 977         * loop.
 978         */
 979        list_for_each_entry_rcu(opp, &opp_table->opp_list, node) {
 980                if (new_opp->rate > opp->rate) {
 981                        head = &opp->node;
 982                        continue;
 983                }
 984
 985                if (new_opp->rate < opp->rate)
 986                        break;
 987
 988                /* Duplicate OPPs */
 989                dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n",
 990                         __func__, opp->rate, opp->u_volt, opp->available,
 991                         new_opp->rate, new_opp->u_volt, new_opp->available);
 992
 993                return opp->available && new_opp->u_volt == opp->u_volt ?
 994                        0 : -EEXIST;
 995        }
 996
 997        new_opp->opp_table = opp_table;
 998        list_add_rcu(&new_opp->node, head);
 999
1000        ret = opp_debug_create_one(new_opp, opp_table);
1001        if (ret)
1002                dev_err(dev, "%s: Failed to register opp to debugfs (%d)\n",
1003                        __func__, ret);
1004
1005        if (!_opp_supported_by_regulators(new_opp, opp_table)) {
1006                new_opp->available = false;
1007                dev_warn(dev, "%s: OPP not supported by regulators (%lu)\n",
1008                         __func__, new_opp->rate);
1009        }
1010
1011        return 0;
1012}
1013
1014/**
1015 * _opp_add_v1() - Allocate a OPP based on v1 bindings.
1016 * @dev:        device for which we do this operation
1017 * @freq:       Frequency in Hz for this OPP
1018 * @u_volt:     Voltage in uVolts for this OPP
1019 * @dynamic:    Dynamically added OPPs.
1020 *
1021 * This function adds an opp definition to the opp table and returns status.
1022 * The opp is made available by default and it can be controlled using
1023 * dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
1024 *
1025 * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
1026 * and freed by dev_pm_opp_of_remove_table.
1027 *
1028 * Locking: The internal opp_table and opp structures are RCU protected.
1029 * Hence this function internally uses RCU updater strategy with mutex locks
1030 * to keep the integrity of the internal data structures. Callers should ensure
1031 * that this function is *NOT* called under RCU protection or in contexts where
1032 * mutex cannot be locked.
1033 *
1034 * Return:
1035 * 0            On success OR
1036 *              Duplicate OPPs (both freq and volt are same) and opp->available
1037 * -EEXIST      Freq are same and volt are different OR
1038 *              Duplicate OPPs (both freq and volt are same) and !opp->available
1039 * -ENOMEM      Memory allocation failure
1040 */
1041int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
1042                bool dynamic)
1043{
1044        struct opp_table *opp_table;
1045        struct dev_pm_opp *new_opp;
1046        unsigned long tol;
1047        int ret;
1048
1049        /* Hold our table modification lock here */
1050        mutex_lock(&opp_table_lock);
1051
1052        new_opp = _allocate_opp(dev, &opp_table);
1053        if (!new_opp) {
1054                ret = -ENOMEM;
1055                goto unlock;
1056        }
1057
1058        /* populate the opp table */
1059        new_opp->rate = freq;
1060        tol = u_volt * opp_table->voltage_tolerance_v1 / 100;
1061        new_opp->u_volt = u_volt;
1062        new_opp->u_volt_min = u_volt - tol;
1063        new_opp->u_volt_max = u_volt + tol;
1064        new_opp->available = true;
1065        new_opp->dynamic = dynamic;
1066
1067        ret = _opp_add(dev, new_opp, opp_table);
1068        if (ret)
1069                goto free_opp;
1070
1071        mutex_unlock(&opp_table_lock);
1072
1073        /*
1074         * Notify the changes in the availability of the operable
1075         * frequency/voltage list.
1076         */
1077        srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
1078        return 0;
1079
1080free_opp:
1081        _opp_remove(opp_table, new_opp, false);
1082unlock:
1083        mutex_unlock(&opp_table_lock);
1084        return ret;
1085}
1086
1087/**
1088 * dev_pm_opp_set_supported_hw() - Set supported platforms
1089 * @dev: Device for which supported-hw has to be set.
1090 * @versions: Array of hierarchy of versions to match.
1091 * @count: Number of elements in the array.
1092 *
1093 * This is required only for the V2 bindings, and it enables a platform to
1094 * specify the hierarchy of versions it supports. OPP layer will then enable
1095 * OPPs, which are available for those versions, based on its 'opp-supported-hw'
1096 * property.
1097 *
1098 * Locking: The internal opp_table and opp structures are RCU protected.
1099 * Hence this function internally uses RCU updater strategy with mutex locks
1100 * to keep the integrity of the internal data structures. Callers should ensure
1101 * that this function is *NOT* called under RCU protection or in contexts where
1102 * mutex cannot be locked.
1103 */
1104int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions,
1105                                unsigned int count)
1106{
1107        struct opp_table *opp_table;
1108        int ret = 0;
1109
1110        /* Hold our table modification lock here */
1111        mutex_lock(&opp_table_lock);
1112
1113        opp_table = _add_opp_table(dev);
1114        if (!opp_table) {
1115                ret = -ENOMEM;
1116                goto unlock;
1117        }
1118
1119        /* Make sure there are no concurrent readers while updating opp_table */
1120        WARN_ON(!list_empty(&opp_table->opp_list));
1121
1122        /* Do we already have a version hierarchy associated with opp_table? */
1123        if (opp_table->supported_hw) {
1124                dev_err(dev, "%s: Already have supported hardware list\n",
1125                        __func__);
1126                ret = -EBUSY;
1127                goto err;
1128        }
1129
1130        opp_table->supported_hw = kmemdup(versions, count * sizeof(*versions),
1131                                        GFP_KERNEL);
1132        if (!opp_table->supported_hw) {
1133                ret = -ENOMEM;
1134                goto err;
1135        }
1136
1137        opp_table->supported_hw_count = count;
1138        mutex_unlock(&opp_table_lock);
1139        return 0;
1140
1141err:
1142        _remove_opp_table(opp_table);
1143unlock:
1144        mutex_unlock(&opp_table_lock);
1145
1146        return ret;
1147}
1148EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw);
1149
1150/**
1151 * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw
1152 * @dev: Device for which supported-hw has to be put.
1153 *
1154 * This is required only for the V2 bindings, and is called for a matching
1155 * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure
1156 * will not be freed.
1157 *
1158 * Locking: The internal opp_table and opp structures are RCU protected.
1159 * Hence this function internally uses RCU updater strategy with mutex locks
1160 * to keep the integrity of the internal data structures. Callers should ensure
1161 * that this function is *NOT* called under RCU protection or in contexts where
1162 * mutex cannot be locked.
1163 */
1164void dev_pm_opp_put_supported_hw(struct device *dev)
1165{
1166        struct opp_table *opp_table;
1167
1168        /* Hold our table modification lock here */
1169        mutex_lock(&opp_table_lock);
1170
1171        /* Check for existing table for 'dev' first */
1172        opp_table = _find_opp_table(dev);
1173        if (IS_ERR(opp_table)) {
1174                dev_err(dev, "Failed to find opp_table: %ld\n",
1175                        PTR_ERR(opp_table));
1176                goto unlock;
1177        }
1178
1179        /* Make sure there are no concurrent readers while updating opp_table */
1180        WARN_ON(!list_empty(&opp_table->opp_list));
1181
1182        if (!opp_table->supported_hw) {
1183                dev_err(dev, "%s: Doesn't have supported hardware list\n",
1184                        __func__);
1185                goto unlock;
1186        }
1187
1188        kfree(opp_table->supported_hw);
1189        opp_table->supported_hw = NULL;
1190        opp_table->supported_hw_count = 0;
1191
1192        /* Try freeing opp_table if this was the last blocking resource */
1193        _remove_opp_table(opp_table);
1194
1195unlock:
1196        mutex_unlock(&opp_table_lock);
1197}
1198EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw);
1199
1200/**
1201 * dev_pm_opp_set_prop_name() - Set prop-extn name
1202 * @dev: Device for which the prop-name has to be set.
1203 * @name: name to postfix to properties.
1204 *
1205 * This is required only for the V2 bindings, and it enables a platform to
1206 * specify the extn to be used for certain property names. The properties to
1207 * which the extension will apply are opp-microvolt and opp-microamp. OPP core
1208 * should postfix the property name with -<name> while looking for them.
1209 *
1210 * Locking: The internal opp_table and opp structures are RCU protected.
1211 * Hence this function internally uses RCU updater strategy with mutex locks
1212 * to keep the integrity of the internal data structures. Callers should ensure
1213 * that this function is *NOT* called under RCU protection or in contexts where
1214 * mutex cannot be locked.
1215 */
1216int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
1217{
1218        struct opp_table *opp_table;
1219        int ret = 0;
1220
1221        /* Hold our table modification lock here */
1222        mutex_lock(&opp_table_lock);
1223
1224        opp_table = _add_opp_table(dev);
1225        if (!opp_table) {
1226                ret = -ENOMEM;
1227                goto unlock;
1228        }
1229
1230        /* Make sure there are no concurrent readers while updating opp_table */
1231        WARN_ON(!list_empty(&opp_table->opp_list));
1232
1233        /* Do we already have a prop-name associated with opp_table? */
1234        if (opp_table->prop_name) {
1235                dev_err(dev, "%s: Already have prop-name %s\n", __func__,
1236                        opp_table->prop_name);
1237                ret = -EBUSY;
1238                goto err;
1239        }
1240
1241        opp_table->prop_name = kstrdup(name, GFP_KERNEL);
1242        if (!opp_table->prop_name) {
1243                ret = -ENOMEM;
1244                goto err;
1245        }
1246
1247        mutex_unlock(&opp_table_lock);
1248        return 0;
1249
1250err:
1251        _remove_opp_table(opp_table);
1252unlock:
1253        mutex_unlock(&opp_table_lock);
1254
1255        return ret;
1256}
1257EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name);
1258
1259/**
1260 * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name
1261 * @dev: Device for which the prop-name has to be put.
1262 *
1263 * This is required only for the V2 bindings, and is called for a matching
1264 * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure
1265 * will not be freed.
1266 *
1267 * Locking: The internal opp_table and opp structures are RCU protected.
1268 * Hence this function internally uses RCU updater strategy with mutex locks
1269 * to keep the integrity of the internal data structures. Callers should ensure
1270 * that this function is *NOT* called under RCU protection or in contexts where
1271 * mutex cannot be locked.
1272 */
1273void dev_pm_opp_put_prop_name(struct device *dev)
1274{
1275        struct opp_table *opp_table;
1276
1277        /* Hold our table modification lock here */
1278        mutex_lock(&opp_table_lock);
1279
1280        /* Check for existing table for 'dev' first */
1281        opp_table = _find_opp_table(dev);
1282        if (IS_ERR(opp_table)) {
1283                dev_err(dev, "Failed to find opp_table: %ld\n",
1284                        PTR_ERR(opp_table));
1285                goto unlock;
1286        }
1287
1288        /* Make sure there are no concurrent readers while updating opp_table */
1289        WARN_ON(!list_empty(&opp_table->opp_list));
1290
1291        if (!opp_table->prop_name) {
1292                dev_err(dev, "%s: Doesn't have a prop-name\n", __func__);
1293                goto unlock;
1294        }
1295
1296        kfree(opp_table->prop_name);
1297        opp_table->prop_name = NULL;
1298
1299        /* Try freeing opp_table if this was the last blocking resource */
1300        _remove_opp_table(opp_table);
1301
1302unlock:
1303        mutex_unlock(&opp_table_lock);
1304}
1305EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name);
1306
1307/**
1308 * dev_pm_opp_set_regulator() - Set regulator name for the device
1309 * @dev: Device for which regulator name is being set.
1310 * @name: Name of the regulator.
1311 *
1312 * In order to support OPP switching, OPP layer needs to know the name of the
1313 * device's regulator, as the core would be required to switch voltages as well.
1314 *
1315 * This must be called before any OPPs are initialized for the device.
1316 *
1317 * Locking: The internal opp_table and opp structures are RCU protected.
1318 * Hence this function internally uses RCU updater strategy with mutex locks
1319 * to keep the integrity of the internal data structures. Callers should ensure
1320 * that this function is *NOT* called under RCU protection or in contexts where
1321 * mutex cannot be locked.
1322 */
1323int dev_pm_opp_set_regulator(struct device *dev, const char *name)
1324{
1325        struct opp_table *opp_table;
1326        struct regulator *reg;
1327        int ret;
1328
1329        mutex_lock(&opp_table_lock);
1330
1331        opp_table = _add_opp_table(dev);
1332        if (!opp_table) {
1333                ret = -ENOMEM;
1334                goto unlock;
1335        }
1336
1337        /* This should be called before OPPs are initialized */
1338        if (WARN_ON(!list_empty(&opp_table->opp_list))) {
1339                ret = -EBUSY;
1340                goto err;
1341        }
1342
1343        /* Already have a regulator set */
1344        if (WARN_ON(!IS_ERR(opp_table->regulator))) {
1345                ret = -EBUSY;
1346                goto err;
1347        }
1348        /* Allocate the regulator */
1349        reg = regulator_get_optional(dev, name);
1350        if (IS_ERR(reg)) {
1351                ret = PTR_ERR(reg);
1352                if (ret != -EPROBE_DEFER)
1353                        dev_err(dev, "%s: no regulator (%s) found: %d\n",
1354                                __func__, name, ret);
1355                goto err;
1356        }
1357
1358        opp_table->regulator = reg;
1359
1360        mutex_unlock(&opp_table_lock);
1361        return 0;
1362
1363err:
1364        _remove_opp_table(opp_table);
1365unlock:
1366        mutex_unlock(&opp_table_lock);
1367
1368        return ret;
1369}
1370EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulator);
1371
1372/**
1373 * dev_pm_opp_put_regulator() - Releases resources blocked for regulator
1374 * @dev: Device for which regulator was set.
1375 *
1376 * Locking: The internal opp_table and opp structures are RCU protected.
1377 * Hence this function internally uses RCU updater strategy with mutex locks
1378 * to keep the integrity of the internal data structures. Callers should ensure
1379 * that this function is *NOT* called under RCU protection or in contexts where
1380 * mutex cannot be locked.
1381 */
1382void dev_pm_opp_put_regulator(struct device *dev)
1383{
1384        struct opp_table *opp_table;
1385
1386        mutex_lock(&opp_table_lock);
1387
1388        /* Check for existing table for 'dev' first */
1389        opp_table = _find_opp_table(dev);
1390        if (IS_ERR(opp_table)) {
1391                dev_err(dev, "Failed to find opp_table: %ld\n",
1392                        PTR_ERR(opp_table));
1393                goto unlock;
1394        }
1395
1396        if (IS_ERR(opp_table->regulator)) {
1397                dev_err(dev, "%s: Doesn't have regulator set\n", __func__);
1398                goto unlock;
1399        }
1400
1401        /* Make sure there are no concurrent readers while updating opp_table */
1402        WARN_ON(!list_empty(&opp_table->opp_list));
1403
1404        regulator_put(opp_table->regulator);
1405        opp_table->regulator = ERR_PTR(-ENXIO);
1406
1407        /* Try freeing opp_table if this was the last blocking resource */
1408        _remove_opp_table(opp_table);
1409
1410unlock:
1411        mutex_unlock(&opp_table_lock);
1412}
1413EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulator);
1414
1415/**
1416 * dev_pm_opp_add()  - Add an OPP table from a table definitions
1417 * @dev:        device for which we do this operation
1418 * @freq:       Frequency in Hz for this OPP
1419 * @u_volt:     Voltage in uVolts for this OPP
1420 *
1421 * This function adds an opp definition to the opp table and returns status.
1422 * The opp is made available by default and it can be controlled using
1423 * dev_pm_opp_enable/disable functions.
1424 *
1425 * Locking: The internal opp_table and opp structures are RCU protected.
1426 * Hence this function internally uses RCU updater strategy with mutex locks
1427 * to keep the integrity of the internal data structures. Callers should ensure
1428 * that this function is *NOT* called under RCU protection or in contexts where
1429 * mutex cannot be locked.
1430 *
1431 * Return:
1432 * 0            On success OR
1433 *              Duplicate OPPs (both freq and volt are same) and opp->available
1434 * -EEXIST      Freq are same and volt are different OR
1435 *              Duplicate OPPs (both freq and volt are same) and !opp->available
1436 * -ENOMEM      Memory allocation failure
1437 */
1438int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
1439{
1440        return _opp_add_v1(dev, freq, u_volt, true);
1441}
1442EXPORT_SYMBOL_GPL(dev_pm_opp_add);
1443
1444/**
1445 * _opp_set_availability() - helper to set the availability of an opp
1446 * @dev:                device for which we do this operation
1447 * @freq:               OPP frequency to modify availability
1448 * @availability_req:   availability status requested for this opp
1449 *
1450 * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
1451 * share a common logic which is isolated here.
1452 *
1453 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1454 * copy operation, returns 0 if no modification was done OR modification was
1455 * successful.
1456 *
1457 * Locking: The internal opp_table and opp structures are RCU protected.
1458 * Hence this function internally uses RCU updater strategy with mutex locks to
1459 * keep the integrity of the internal data structures. Callers should ensure
1460 * that this function is *NOT* called under RCU protection or in contexts where
1461 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1462 */
1463static int _opp_set_availability(struct device *dev, unsigned long freq,
1464                                 bool availability_req)
1465{
1466        struct opp_table *opp_table;
1467        struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
1468        int r = 0;
1469
1470        /* keep the node allocated */
1471        new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
1472        if (!new_opp)
1473                return -ENOMEM;
1474
1475        mutex_lock(&opp_table_lock);
1476
1477        /* Find the opp_table */
1478        opp_table = _find_opp_table(dev);
1479        if (IS_ERR(opp_table)) {
1480                r = PTR_ERR(opp_table);
1481                dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
1482                goto unlock;
1483        }
1484
1485        /* Do we have the frequency? */
1486        list_for_each_entry(tmp_opp, &opp_table->opp_list, node) {
1487                if (tmp_opp->rate == freq) {
1488                        opp = tmp_opp;
1489                        break;
1490                }
1491        }
1492        if (IS_ERR(opp)) {
1493                r = PTR_ERR(opp);
1494                goto unlock;
1495        }
1496
1497        /* Is update really needed? */
1498        if (opp->available == availability_req)
1499                goto unlock;
1500        /* copy the old data over */
1501        *new_opp = *opp;
1502
1503        /* plug in new node */
1504        new_opp->available = availability_req;
1505
1506        list_replace_rcu(&opp->node, &new_opp->node);
1507        mutex_unlock(&opp_table_lock);
1508        call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu);
1509
1510        /* Notify the change of the OPP availability */
1511        if (availability_req)
1512                srcu_notifier_call_chain(&opp_table->srcu_head,
1513                                         OPP_EVENT_ENABLE, new_opp);
1514        else
1515                srcu_notifier_call_chain(&opp_table->srcu_head,
1516                                         OPP_EVENT_DISABLE, new_opp);
1517
1518        return 0;
1519
1520unlock:
1521        mutex_unlock(&opp_table_lock);
1522        kfree(new_opp);
1523        return r;
1524}
1525
1526/**
1527 * dev_pm_opp_enable() - Enable a specific OPP
1528 * @dev:        device for which we do this operation
1529 * @freq:       OPP frequency to enable
1530 *
1531 * Enables a provided opp. If the operation is valid, this returns 0, else the
1532 * corresponding error value. It is meant to be used for users an OPP available
1533 * after being temporarily made unavailable with dev_pm_opp_disable.
1534 *
1535 * Locking: The internal opp_table and opp structures are RCU protected.
1536 * Hence this function indirectly uses RCU and mutex locks to keep the
1537 * integrity of the internal data structures. Callers should ensure that
1538 * this function is *NOT* called under RCU protection or in contexts where
1539 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1540 *
1541 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1542 * copy operation, returns 0 if no modification was done OR modification was
1543 * successful.
1544 */
1545int dev_pm_opp_enable(struct device *dev, unsigned long freq)
1546{
1547        return _opp_set_availability(dev, freq, true);
1548}
1549EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
1550
1551/**
1552 * dev_pm_opp_disable() - Disable a specific OPP
1553 * @dev:        device for which we do this operation
1554 * @freq:       OPP frequency to disable
1555 *
1556 * Disables a provided opp. If the operation is valid, this returns
1557 * 0, else the corresponding error value. It is meant to be a temporary
1558 * control by users to make this OPP not available until the circumstances are
1559 * right to make it available again (with a call to dev_pm_opp_enable).
1560 *
1561 * Locking: The internal opp_table and opp structures are RCU protected.
1562 * Hence this function indirectly uses RCU and mutex locks to keep the
1563 * integrity of the internal data structures. Callers should ensure that
1564 * this function is *NOT* called under RCU protection or in contexts where
1565 * mutex locking or synchronize_rcu() blocking calls cannot be used.
1566 *
1567 * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
1568 * copy operation, returns 0 if no modification was done OR modification was
1569 * successful.
1570 */
1571int dev_pm_opp_disable(struct device *dev, unsigned long freq)
1572{
1573        return _opp_set_availability(dev, freq, false);
1574}
1575EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
1576
1577/**
1578 * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
1579 * @dev:        device pointer used to lookup OPP table.
1580 *
1581 * Return: pointer to  notifier head if found, otherwise -ENODEV or
1582 * -EINVAL based on type of error casted as pointer. value must be checked
1583 *  with IS_ERR to determine valid pointer or error result.
1584 *
1585 * Locking: This function must be called under rcu_read_lock(). opp_table is a
1586 * RCU protected pointer. The reason for the same is that the opp pointer which
1587 * is returned will remain valid for use with opp_get_{voltage, freq} only while
1588 * under the locked area. The pointer returned must be used prior to unlocking
1589 * with rcu_read_unlock() to maintain the integrity of the pointer.
1590 */
1591struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
1592{
1593        struct opp_table *opp_table = _find_opp_table(dev);
1594
1595        if (IS_ERR(opp_table))
1596                return ERR_CAST(opp_table); /* matching type */
1597
1598        return &opp_table->srcu_head;
1599}
1600EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
1601
1602/*
1603 * Free OPPs either created using static entries present in DT or even the
1604 * dynamically added entries based on remove_all param.
1605 */
1606void _dev_pm_opp_remove_table(struct device *dev, bool remove_all)
1607{
1608        struct opp_table *opp_table;
1609        struct dev_pm_opp *opp, *tmp;
1610
1611        /* Hold our table modification lock here */
1612        mutex_lock(&opp_table_lock);
1613
1614        /* Check for existing table for 'dev' */
1615        opp_table = _find_opp_table(dev);
1616        if (IS_ERR(opp_table)) {
1617                int error = PTR_ERR(opp_table);
1618
1619                if (error != -ENODEV)
1620                        WARN(1, "%s: opp_table: %d\n",
1621                             IS_ERR_OR_NULL(dev) ?
1622                                        "Invalid device" : dev_name(dev),
1623                             error);
1624                goto unlock;
1625        }
1626
1627        /* Find if opp_table manages a single device */
1628        if (list_is_singular(&opp_table->dev_list)) {
1629                /* Free static OPPs */
1630                list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
1631                        if (remove_all || !opp->dynamic)
1632                                _opp_remove(opp_table, opp, true);
1633                }
1634        } else {
1635                _remove_opp_dev(_find_opp_dev(dev, opp_table), opp_table);
1636        }
1637
1638unlock:
1639        mutex_unlock(&opp_table_lock);
1640}
1641
1642/**
1643 * dev_pm_opp_remove_table() - Free all OPPs associated with the device
1644 * @dev:        device pointer used to lookup OPP table.
1645 *
1646 * Free both OPPs created using static entries present in DT and the
1647 * dynamically added entries.
1648 *
1649 * Locking: The internal opp_table and opp structures are RCU protected.
1650 * Hence this function indirectly uses RCU updater strategy with mutex locks
1651 * to keep the integrity of the internal data structures. Callers should ensure
1652 * that this function is *NOT* called under RCU protection or in contexts where
1653 * mutex cannot be locked.
1654 */
1655void dev_pm_opp_remove_table(struct device *dev)
1656{
1657        _dev_pm_opp_remove_table(dev, true);
1658}
1659EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);
1660