linux/drivers/clk/clk.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
   4 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
   5 *
   6 * Standard functionality for the common clock API.  See Documentation/driver-api/clk.rst
   7 */
   8
   9#include <linux/clk.h>
  10#include <linux/clk-provider.h>
  11#include <linux/clk/clk-conf.h>
  12#include <linux/module.h>
  13#include <linux/mutex.h>
  14#include <linux/spinlock.h>
  15#include <linux/err.h>
  16#include <linux/list.h>
  17#include <linux/slab.h>
  18#include <linux/of.h>
  19#include <linux/device.h>
  20#include <linux/init.h>
  21#include <linux/pm_runtime.h>
  22#include <linux/sched.h>
  23#include <linux/clkdev.h>
  24
  25#include "clk.h"
  26
  27static DEFINE_SPINLOCK(enable_lock);
  28static DEFINE_MUTEX(prepare_lock);
  29
  30static struct task_struct *prepare_owner;
  31static struct task_struct *enable_owner;
  32
  33static int prepare_refcnt;
  34static int enable_refcnt;
  35
  36static HLIST_HEAD(clk_root_list);
  37static HLIST_HEAD(clk_orphan_list);
  38static LIST_HEAD(clk_notifier_list);
  39
  40static struct hlist_head *all_lists[] = {
  41        &clk_root_list,
  42        &clk_orphan_list,
  43        NULL,
  44};
  45
  46/***    private data structures    ***/
  47
  48struct clk_parent_map {
  49        const struct clk_hw     *hw;
  50        struct clk_core         *core;
  51        const char              *fw_name;
  52        const char              *name;
  53        int                     index;
  54};
  55
  56struct clk_core {
  57        const char              *name;
  58        const struct clk_ops    *ops;
  59        struct clk_hw           *hw;
  60        struct module           *owner;
  61        struct device           *dev;
  62        struct device_node      *of_node;
  63        struct clk_core         *parent;
  64        struct clk_parent_map   *parents;
  65        u8                      num_parents;
  66        u8                      new_parent_index;
  67        unsigned long           rate;
  68        unsigned long           req_rate;
  69        unsigned long           new_rate;
  70        struct clk_core         *new_parent;
  71        struct clk_core         *new_child;
  72        unsigned long           flags;
  73        bool                    orphan;
  74        bool                    rpm_enabled;
  75        unsigned int            enable_count;
  76        unsigned int            prepare_count;
  77        unsigned int            protect_count;
  78        unsigned long           min_rate;
  79        unsigned long           max_rate;
  80        unsigned long           accuracy;
  81        int                     phase;
  82        struct clk_duty         duty;
  83        struct hlist_head       children;
  84        struct hlist_node       child_node;
  85        struct hlist_head       clks;
  86        unsigned int            notifier_count;
  87#ifdef CONFIG_DEBUG_FS
  88        struct dentry           *dentry;
  89        struct hlist_node       debug_node;
  90#endif
  91        struct kref             ref;
  92};
  93
  94#define CREATE_TRACE_POINTS
  95#include <trace/events/clk.h>
  96
  97struct clk {
  98        struct clk_core *core;
  99        struct device *dev;
 100        const char *dev_id;
 101        const char *con_id;
 102        unsigned long min_rate;
 103        unsigned long max_rate;
 104        unsigned int exclusive_count;
 105        struct hlist_node clks_node;
 106};
 107
 108/***           runtime pm          ***/
 109static int clk_pm_runtime_get(struct clk_core *core)
 110{
 111        int ret;
 112
 113        if (!core->rpm_enabled)
 114                return 0;
 115
 116        ret = pm_runtime_get_sync(core->dev);
 117        return ret < 0 ? ret : 0;
 118}
 119
 120static void clk_pm_runtime_put(struct clk_core *core)
 121{
 122        if (!core->rpm_enabled)
 123                return;
 124
 125        pm_runtime_put_sync(core->dev);
 126}
 127
 128/***           locking             ***/
 129static void clk_prepare_lock(void)
 130{
 131        if (!mutex_trylock(&prepare_lock)) {
 132                if (prepare_owner == current) {
 133                        prepare_refcnt++;
 134                        return;
 135                }
 136                mutex_lock(&prepare_lock);
 137        }
 138        WARN_ON_ONCE(prepare_owner != NULL);
 139        WARN_ON_ONCE(prepare_refcnt != 0);
 140        prepare_owner = current;
 141        prepare_refcnt = 1;
 142}
 143
 144static void clk_prepare_unlock(void)
 145{
 146        WARN_ON_ONCE(prepare_owner != current);
 147        WARN_ON_ONCE(prepare_refcnt == 0);
 148
 149        if (--prepare_refcnt)
 150                return;
 151        prepare_owner = NULL;
 152        mutex_unlock(&prepare_lock);
 153}
 154
 155static unsigned long clk_enable_lock(void)
 156        __acquires(enable_lock)
 157{
 158        unsigned long flags;
 159
 160        /*
 161         * On UP systems, spin_trylock_irqsave() always returns true, even if
 162         * we already hold the lock. So, in that case, we rely only on
 163         * reference counting.
 164         */
 165        if (!IS_ENABLED(CONFIG_SMP) ||
 166            !spin_trylock_irqsave(&enable_lock, flags)) {
 167                if (enable_owner == current) {
 168                        enable_refcnt++;
 169                        __acquire(enable_lock);
 170                        if (!IS_ENABLED(CONFIG_SMP))
 171                                local_save_flags(flags);
 172                        return flags;
 173                }
 174                spin_lock_irqsave(&enable_lock, flags);
 175        }
 176        WARN_ON_ONCE(enable_owner != NULL);
 177        WARN_ON_ONCE(enable_refcnt != 0);
 178        enable_owner = current;
 179        enable_refcnt = 1;
 180        return flags;
 181}
 182
 183static void clk_enable_unlock(unsigned long flags)
 184        __releases(enable_lock)
 185{
 186        WARN_ON_ONCE(enable_owner != current);
 187        WARN_ON_ONCE(enable_refcnt == 0);
 188
 189        if (--enable_refcnt) {
 190                __release(enable_lock);
 191                return;
 192        }
 193        enable_owner = NULL;
 194        spin_unlock_irqrestore(&enable_lock, flags);
 195}
 196
 197static bool clk_core_rate_is_protected(struct clk_core *core)
 198{
 199        return core->protect_count;
 200}
 201
 202static bool clk_core_is_prepared(struct clk_core *core)
 203{
 204        bool ret = false;
 205
 206        /*
 207         * .is_prepared is optional for clocks that can prepare
 208         * fall back to software usage counter if it is missing
 209         */
 210        if (!core->ops->is_prepared)
 211                return core->prepare_count;
 212
 213        if (!clk_pm_runtime_get(core)) {
 214                ret = core->ops->is_prepared(core->hw);
 215                clk_pm_runtime_put(core);
 216        }
 217
 218        return ret;
 219}
 220
 221static bool clk_core_is_enabled(struct clk_core *core)
 222{
 223        bool ret = false;
 224
 225        /*
 226         * .is_enabled is only mandatory for clocks that gate
 227         * fall back to software usage counter if .is_enabled is missing
 228         */
 229        if (!core->ops->is_enabled)
 230                return core->enable_count;
 231
 232        /*
 233         * Check if clock controller's device is runtime active before
 234         * calling .is_enabled callback. If not, assume that clock is
 235         * disabled, because we might be called from atomic context, from
 236         * which pm_runtime_get() is not allowed.
 237         * This function is called mainly from clk_disable_unused_subtree,
 238         * which ensures proper runtime pm activation of controller before
 239         * taking enable spinlock, but the below check is needed if one tries
 240         * to call it from other places.
 241         */
 242        if (core->rpm_enabled) {
 243                pm_runtime_get_noresume(core->dev);
 244                if (!pm_runtime_active(core->dev)) {
 245                        ret = false;
 246                        goto done;
 247                }
 248        }
 249
 250        ret = core->ops->is_enabled(core->hw);
 251done:
 252        if (core->rpm_enabled)
 253                pm_runtime_put(core->dev);
 254
 255        return ret;
 256}
 257
 258/***    helper functions   ***/
 259
 260const char *__clk_get_name(const struct clk *clk)
 261{
 262        return !clk ? NULL : clk->core->name;
 263}
 264EXPORT_SYMBOL_GPL(__clk_get_name);
 265
 266const char *clk_hw_get_name(const struct clk_hw *hw)
 267{
 268        return hw->core->name;
 269}
 270EXPORT_SYMBOL_GPL(clk_hw_get_name);
 271
 272struct clk_hw *__clk_get_hw(struct clk *clk)
 273{
 274        return !clk ? NULL : clk->core->hw;
 275}
 276EXPORT_SYMBOL_GPL(__clk_get_hw);
 277
 278unsigned int clk_hw_get_num_parents(const struct clk_hw *hw)
 279{
 280        return hw->core->num_parents;
 281}
 282EXPORT_SYMBOL_GPL(clk_hw_get_num_parents);
 283
 284struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw)
 285{
 286        return hw->core->parent ? hw->core->parent->hw : NULL;
 287}
 288EXPORT_SYMBOL_GPL(clk_hw_get_parent);
 289
 290static struct clk_core *__clk_lookup_subtree(const char *name,
 291                                             struct clk_core *core)
 292{
 293        struct clk_core *child;
 294        struct clk_core *ret;
 295
 296        if (!strcmp(core->name, name))
 297                return core;
 298
 299        hlist_for_each_entry(child, &core->children, child_node) {
 300                ret = __clk_lookup_subtree(name, child);
 301                if (ret)
 302                        return ret;
 303        }
 304
 305        return NULL;
 306}
 307
 308static struct clk_core *clk_core_lookup(const char *name)
 309{
 310        struct clk_core *root_clk;
 311        struct clk_core *ret;
 312
 313        if (!name)
 314                return NULL;
 315
 316        /* search the 'proper' clk tree first */
 317        hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
 318                ret = __clk_lookup_subtree(name, root_clk);
 319                if (ret)
 320                        return ret;
 321        }
 322
 323        /* if not found, then search the orphan tree */
 324        hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
 325                ret = __clk_lookup_subtree(name, root_clk);
 326                if (ret)
 327                        return ret;
 328        }
 329
 330        return NULL;
 331}
 332
 333#ifdef CONFIG_OF
 334static int of_parse_clkspec(const struct device_node *np, int index,
 335                            const char *name, struct of_phandle_args *out_args);
 336static struct clk_hw *
 337of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec);
 338#else
 339static inline int of_parse_clkspec(const struct device_node *np, int index,
 340                                   const char *name,
 341                                   struct of_phandle_args *out_args)
 342{
 343        return -ENOENT;
 344}
 345static inline struct clk_hw *
 346of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
 347{
 348        return ERR_PTR(-ENOENT);
 349}
 350#endif
 351
 352/**
 353 * clk_core_get - Find the clk_core parent of a clk
 354 * @core: clk to find parent of
 355 * @p_index: parent index to search for
 356 *
 357 * This is the preferred method for clk providers to find the parent of a
 358 * clk when that parent is external to the clk controller. The parent_names
 359 * array is indexed and treated as a local name matching a string in the device
 360 * node's 'clock-names' property or as the 'con_id' matching the device's
 361 * dev_name() in a clk_lookup. This allows clk providers to use their own
 362 * namespace instead of looking for a globally unique parent string.
 363 *
 364 * For example the following DT snippet would allow a clock registered by the
 365 * clock-controller@c001 that has a clk_init_data::parent_data array
 366 * with 'xtal' in the 'name' member to find the clock provided by the
 367 * clock-controller@f00abcd without needing to get the globally unique name of
 368 * the xtal clk.
 369 *
 370 *      parent: clock-controller@f00abcd {
 371 *              reg = <0xf00abcd 0xabcd>;
 372 *              #clock-cells = <0>;
 373 *      };
 374 *
 375 *      clock-controller@c001 {
 376 *              reg = <0xc001 0xf00d>;
 377 *              clocks = <&parent>;
 378 *              clock-names = "xtal";
 379 *              #clock-cells = <1>;
 380 *      };
 381 *
 382 * Returns: -ENOENT when the provider can't be found or the clk doesn't
 383 * exist in the provider or the name can't be found in the DT node or
 384 * in a clkdev lookup. NULL when the provider knows about the clk but it
 385 * isn't provided on this system.
 386 * A valid clk_core pointer when the clk can be found in the provider.
 387 */
 388static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
 389{
 390        const char *name = core->parents[p_index].fw_name;
 391        int index = core->parents[p_index].index;
 392        struct clk_hw *hw = ERR_PTR(-ENOENT);
 393        struct device *dev = core->dev;
 394        const char *dev_id = dev ? dev_name(dev) : NULL;
 395        struct device_node *np = core->of_node;
 396        struct of_phandle_args clkspec;
 397
 398        if (np && (name || index >= 0) &&
 399            !of_parse_clkspec(np, index, name, &clkspec)) {
 400                hw = of_clk_get_hw_from_clkspec(&clkspec);
 401                of_node_put(clkspec.np);
 402        } else if (name) {
 403                /*
 404                 * If the DT search above couldn't find the provider fallback to
 405                 * looking up via clkdev based clk_lookups.
 406                 */
 407                hw = clk_find_hw(dev_id, name);
 408        }
 409
 410        if (IS_ERR(hw))
 411                return ERR_CAST(hw);
 412
 413        return hw->core;
 414}
 415
 416static void clk_core_fill_parent_index(struct clk_core *core, u8 index)
 417{
 418        struct clk_parent_map *entry = &core->parents[index];
 419        struct clk_core *parent = ERR_PTR(-ENOENT);
 420
 421        if (entry->hw) {
 422                parent = entry->hw->core;
 423                /*
 424                 * We have a direct reference but it isn't registered yet?
 425                 * Orphan it and let clk_reparent() update the orphan status
 426                 * when the parent is registered.
 427                 */
 428                if (!parent)
 429                        parent = ERR_PTR(-EPROBE_DEFER);
 430        } else {
 431                parent = clk_core_get(core, index);
 432                if (PTR_ERR(parent) == -ENOENT && entry->name)
 433                        parent = clk_core_lookup(entry->name);
 434        }
 435
 436        /* Only cache it if it's not an error */
 437        if (!IS_ERR(parent))
 438                entry->core = parent;
 439}
 440
 441static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
 442                                                         u8 index)
 443{
 444        if (!core || index >= core->num_parents || !core->parents)
 445                return NULL;
 446
 447        if (!core->parents[index].core)
 448                clk_core_fill_parent_index(core, index);
 449
 450        return core->parents[index].core;
 451}
 452
 453struct clk_hw *
 454clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index)
 455{
 456        struct clk_core *parent;
 457
 458        parent = clk_core_get_parent_by_index(hw->core, index);
 459
 460        return !parent ? NULL : parent->hw;
 461}
 462EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index);
 463
 464unsigned int __clk_get_enable_count(struct clk *clk)
 465{
 466        return !clk ? 0 : clk->core->enable_count;
 467}
 468
 469static unsigned long clk_core_get_rate_nolock(struct clk_core *core)
 470{
 471        if (!core)
 472                return 0;
 473
 474        if (!core->num_parents || core->parent)
 475                return core->rate;
 476
 477        /*
 478         * Clk must have a parent because num_parents > 0 but the parent isn't
 479         * known yet. Best to return 0 as the rate of this clk until we can
 480         * properly recalc the rate based on the parent's rate.
 481         */
 482        return 0;
 483}
 484
 485unsigned long clk_hw_get_rate(const struct clk_hw *hw)
 486{
 487        return clk_core_get_rate_nolock(hw->core);
 488}
 489EXPORT_SYMBOL_GPL(clk_hw_get_rate);
 490
 491static unsigned long clk_core_get_accuracy_no_lock(struct clk_core *core)
 492{
 493        if (!core)
 494                return 0;
 495
 496        return core->accuracy;
 497}
 498
 499unsigned long __clk_get_flags(struct clk *clk)
 500{
 501        return !clk ? 0 : clk->core->flags;
 502}
 503EXPORT_SYMBOL_GPL(__clk_get_flags);
 504
 505unsigned long clk_hw_get_flags(const struct clk_hw *hw)
 506{
 507        return hw->core->flags;
 508}
 509EXPORT_SYMBOL_GPL(clk_hw_get_flags);
 510
 511bool clk_hw_is_prepared(const struct clk_hw *hw)
 512{
 513        return clk_core_is_prepared(hw->core);
 514}
 515EXPORT_SYMBOL_GPL(clk_hw_is_prepared);
 516
 517bool clk_hw_rate_is_protected(const struct clk_hw *hw)
 518{
 519        return clk_core_rate_is_protected(hw->core);
 520}
 521EXPORT_SYMBOL_GPL(clk_hw_rate_is_protected);
 522
 523bool clk_hw_is_enabled(const struct clk_hw *hw)
 524{
 525        return clk_core_is_enabled(hw->core);
 526}
 527EXPORT_SYMBOL_GPL(clk_hw_is_enabled);
 528
 529bool __clk_is_enabled(struct clk *clk)
 530{
 531        if (!clk)
 532                return false;
 533
 534        return clk_core_is_enabled(clk->core);
 535}
 536EXPORT_SYMBOL_GPL(__clk_is_enabled);
 537
 538static bool mux_is_better_rate(unsigned long rate, unsigned long now,
 539                           unsigned long best, unsigned long flags)
 540{
 541        if (flags & CLK_MUX_ROUND_CLOSEST)
 542                return abs(now - rate) < abs(best - rate);
 543
 544        return now <= rate && now > best;
 545}
 546
 547int clk_mux_determine_rate_flags(struct clk_hw *hw,
 548                                 struct clk_rate_request *req,
 549                                 unsigned long flags)
 550{
 551        struct clk_core *core = hw->core, *parent, *best_parent = NULL;
 552        int i, num_parents, ret;
 553        unsigned long best = 0;
 554        struct clk_rate_request parent_req = *req;
 555
 556        /* if NO_REPARENT flag set, pass through to current parent */
 557        if (core->flags & CLK_SET_RATE_NO_REPARENT) {
 558                parent = core->parent;
 559                if (core->flags & CLK_SET_RATE_PARENT) {
 560                        ret = __clk_determine_rate(parent ? parent->hw : NULL,
 561                                                   &parent_req);
 562                        if (ret)
 563                                return ret;
 564
 565                        best = parent_req.rate;
 566                } else if (parent) {
 567                        best = clk_core_get_rate_nolock(parent);
 568                } else {
 569                        best = clk_core_get_rate_nolock(core);
 570                }
 571
 572                goto out;
 573        }
 574
 575        /* find the parent that can provide the fastest rate <= rate */
 576        num_parents = core->num_parents;
 577        for (i = 0; i < num_parents; i++) {
 578                parent = clk_core_get_parent_by_index(core, i);
 579                if (!parent)
 580                        continue;
 581
 582                if (core->flags & CLK_SET_RATE_PARENT) {
 583                        parent_req = *req;
 584                        ret = __clk_determine_rate(parent->hw, &parent_req);
 585                        if (ret)
 586                                continue;
 587                } else {
 588                        parent_req.rate = clk_core_get_rate_nolock(parent);
 589                }
 590
 591                if (mux_is_better_rate(req->rate, parent_req.rate,
 592                                       best, flags)) {
 593                        best_parent = parent;
 594                        best = parent_req.rate;
 595                }
 596        }
 597
 598        if (!best_parent)
 599                return -EINVAL;
 600
 601out:
 602        if (best_parent)
 603                req->best_parent_hw = best_parent->hw;
 604        req->best_parent_rate = best;
 605        req->rate = best;
 606
 607        return 0;
 608}
 609EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags);
 610
 611struct clk *__clk_lookup(const char *name)
 612{
 613        struct clk_core *core = clk_core_lookup(name);
 614
 615        return !core ? NULL : core->hw->clk;
 616}
 617
 618static void clk_core_get_boundaries(struct clk_core *core,
 619                                    unsigned long *min_rate,
 620                                    unsigned long *max_rate)
 621{
 622        struct clk *clk_user;
 623
 624        lockdep_assert_held(&prepare_lock);
 625
 626        *min_rate = core->min_rate;
 627        *max_rate = core->max_rate;
 628
 629        hlist_for_each_entry(clk_user, &core->clks, clks_node)
 630                *min_rate = max(*min_rate, clk_user->min_rate);
 631
 632        hlist_for_each_entry(clk_user, &core->clks, clks_node)
 633                *max_rate = min(*max_rate, clk_user->max_rate);
 634}
 635
 636void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
 637                           unsigned long max_rate)
 638{
 639        hw->core->min_rate = min_rate;
 640        hw->core->max_rate = max_rate;
 641}
 642EXPORT_SYMBOL_GPL(clk_hw_set_rate_range);
 643
 644/*
 645 * __clk_mux_determine_rate - clk_ops::determine_rate implementation for a mux type clk
 646 * @hw: mux type clk to determine rate on
 647 * @req: rate request, also used to return preferred parent and frequencies
 648 *
 649 * Helper for finding best parent to provide a given frequency. This can be used
 650 * directly as a determine_rate callback (e.g. for a mux), or from a more
 651 * complex clock that may combine a mux with other operations.
 652 *
 653 * Returns: 0 on success, -EERROR value on error
 654 */
 655int __clk_mux_determine_rate(struct clk_hw *hw,
 656                             struct clk_rate_request *req)
 657{
 658        return clk_mux_determine_rate_flags(hw, req, 0);
 659}
 660EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
 661
 662int __clk_mux_determine_rate_closest(struct clk_hw *hw,
 663                                     struct clk_rate_request *req)
 664{
 665        return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST);
 666}
 667EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
 668
 669/***        clk api        ***/
 670
 671static void clk_core_rate_unprotect(struct clk_core *core)
 672{
 673        lockdep_assert_held(&prepare_lock);
 674
 675        if (!core)
 676                return;
 677
 678        if (WARN(core->protect_count == 0,
 679            "%s already unprotected\n", core->name))
 680                return;
 681
 682        if (--core->protect_count > 0)
 683                return;
 684
 685        clk_core_rate_unprotect(core->parent);
 686}
 687
 688static int clk_core_rate_nuke_protect(struct clk_core *core)
 689{
 690        int ret;
 691
 692        lockdep_assert_held(&prepare_lock);
 693
 694        if (!core)
 695                return -EINVAL;
 696
 697        if (core->protect_count == 0)
 698                return 0;
 699
 700        ret = core->protect_count;
 701        core->protect_count = 1;
 702        clk_core_rate_unprotect(core);
 703
 704        return ret;
 705}
 706
 707/**
 708 * clk_rate_exclusive_put - release exclusivity over clock rate control
 709 * @clk: the clk over which the exclusivity is released
 710 *
 711 * clk_rate_exclusive_put() completes a critical section during which a clock
 712 * consumer cannot tolerate any other consumer making any operation on the
 713 * clock which could result in a rate change or rate glitch. Exclusive clocks
 714 * cannot have their rate changed, either directly or indirectly due to changes
 715 * further up the parent chain of clocks. As a result, clocks up parent chain
 716 * also get under exclusive control of the calling consumer.
 717 *
 718 * If exlusivity is claimed more than once on clock, even by the same consumer,
 719 * the rate effectively gets locked as exclusivity can't be preempted.
 720 *
 721 * Calls to clk_rate_exclusive_put() must be balanced with calls to
 722 * clk_rate_exclusive_get(). Calls to this function may sleep, and do not return
 723 * error status.
 724 */
 725void clk_rate_exclusive_put(struct clk *clk)
 726{
 727        if (!clk)
 728                return;
 729
 730        clk_prepare_lock();
 731
 732        /*
 733         * if there is something wrong with this consumer protect count, stop
 734         * here before messing with the provider
 735         */
 736        if (WARN_ON(clk->exclusive_count <= 0))
 737                goto out;
 738
 739        clk_core_rate_unprotect(clk->core);
 740        clk->exclusive_count--;
 741out:
 742        clk_prepare_unlock();
 743}
 744EXPORT_SYMBOL_GPL(clk_rate_exclusive_put);
 745
 746static void clk_core_rate_protect(struct clk_core *core)
 747{
 748        lockdep_assert_held(&prepare_lock);
 749
 750        if (!core)
 751                return;
 752
 753        if (core->protect_count == 0)
 754                clk_core_rate_protect(core->parent);
 755
 756        core->protect_count++;
 757}
 758
 759static void clk_core_rate_restore_protect(struct clk_core *core, int count)
 760{
 761        lockdep_assert_held(&prepare_lock);
 762
 763        if (!core)
 764                return;
 765
 766        if (count == 0)
 767                return;
 768
 769        clk_core_rate_protect(core);
 770        core->protect_count = count;
 771}
 772
 773/**
 774 * clk_rate_exclusive_get - get exclusivity over the clk rate control
 775 * @clk: the clk over which the exclusity of rate control is requested
 776 *
 777 * clk_rate_exclusive_get() begins a critical section during which a clock
 778 * consumer cannot tolerate any other consumer making any operation on the
 779 * clock which could result in a rate change or rate glitch. Exclusive clocks
 780 * cannot have their rate changed, either directly or indirectly due to changes
 781 * further up the parent chain of clocks. As a result, clocks up parent chain
 782 * also get under exclusive control of the calling consumer.
 783 *
 784 * If exlusivity is claimed more than once on clock, even by the same consumer,
 785 * the rate effectively gets locked as exclusivity can't be preempted.
 786 *
 787 * Calls to clk_rate_exclusive_get() should be balanced with calls to
 788 * clk_rate_exclusive_put(). Calls to this function may sleep.
 789 * Returns 0 on success, -EERROR otherwise
 790 */
 791int clk_rate_exclusive_get(struct clk *clk)
 792{
 793        if (!clk)
 794                return 0;
 795
 796        clk_prepare_lock();
 797        clk_core_rate_protect(clk->core);
 798        clk->exclusive_count++;
 799        clk_prepare_unlock();
 800
 801        return 0;
 802}
 803EXPORT_SYMBOL_GPL(clk_rate_exclusive_get);
 804
 805static void clk_core_unprepare(struct clk_core *core)
 806{
 807        lockdep_assert_held(&prepare_lock);
 808
 809        if (!core)
 810                return;
 811
 812        if (WARN(core->prepare_count == 0,
 813            "%s already unprepared\n", core->name))
 814                return;
 815
 816        if (WARN(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL,
 817            "Unpreparing critical %s\n", core->name))
 818                return;
 819
 820        if (core->flags & CLK_SET_RATE_GATE)
 821                clk_core_rate_unprotect(core);
 822
 823        if (--core->prepare_count > 0)
 824                return;
 825
 826        WARN(core->enable_count > 0, "Unpreparing enabled %s\n", core->name);
 827
 828        trace_clk_unprepare(core);
 829
 830        if (core->ops->unprepare)
 831                core->ops->unprepare(core->hw);
 832
 833        clk_pm_runtime_put(core);
 834
 835        trace_clk_unprepare_complete(core);
 836        clk_core_unprepare(core->parent);
 837}
 838
 839static void clk_core_unprepare_lock(struct clk_core *core)
 840{
 841        clk_prepare_lock();
 842        clk_core_unprepare(core);
 843        clk_prepare_unlock();
 844}
 845
 846/**
 847 * clk_unprepare - undo preparation of a clock source
 848 * @clk: the clk being unprepared
 849 *
 850 * clk_unprepare may sleep, which differentiates it from clk_disable.  In a
 851 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
 852 * if the operation may sleep.  One example is a clk which is accessed over
 853 * I2c.  In the complex case a clk gate operation may require a fast and a slow
 854 * part.  It is this reason that clk_unprepare and clk_disable are not mutually
 855 * exclusive.  In fact clk_disable must be called before clk_unprepare.
 856 */
 857void clk_unprepare(struct clk *clk)
 858{
 859        if (IS_ERR_OR_NULL(clk))
 860                return;
 861
 862        clk_core_unprepare_lock(clk->core);
 863}
 864EXPORT_SYMBOL_GPL(clk_unprepare);
 865
 866static int clk_core_prepare(struct clk_core *core)
 867{
 868        int ret = 0;
 869
 870        lockdep_assert_held(&prepare_lock);
 871
 872        if (!core)
 873                return 0;
 874
 875        if (core->prepare_count == 0) {
 876                ret = clk_pm_runtime_get(core);
 877                if (ret)
 878                        return ret;
 879
 880                ret = clk_core_prepare(core->parent);
 881                if (ret)
 882                        goto runtime_put;
 883
 884                trace_clk_prepare(core);
 885
 886                if (core->ops->prepare)
 887                        ret = core->ops->prepare(core->hw);
 888
 889                trace_clk_prepare_complete(core);
 890
 891                if (ret)
 892                        goto unprepare;
 893        }
 894
 895        core->prepare_count++;
 896
 897        /*
 898         * CLK_SET_RATE_GATE is a special case of clock protection
 899         * Instead of a consumer claiming exclusive rate control, it is
 900         * actually the provider which prevents any consumer from making any
 901         * operation which could result in a rate change or rate glitch while
 902         * the clock is prepared.
 903         */
 904        if (core->flags & CLK_SET_RATE_GATE)
 905                clk_core_rate_protect(core);
 906
 907        return 0;
 908unprepare:
 909        clk_core_unprepare(core->parent);
 910runtime_put:
 911        clk_pm_runtime_put(core);
 912        return ret;
 913}
 914
 915static int clk_core_prepare_lock(struct clk_core *core)
 916{
 917        int ret;
 918
 919        clk_prepare_lock();
 920        ret = clk_core_prepare(core);
 921        clk_prepare_unlock();
 922
 923        return ret;
 924}
 925
 926/**
 927 * clk_prepare - prepare a clock source
 928 * @clk: the clk being prepared
 929 *
 930 * clk_prepare may sleep, which differentiates it from clk_enable.  In a simple
 931 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
 932 * operation may sleep.  One example is a clk which is accessed over I2c.  In
 933 * the complex case a clk ungate operation may require a fast and a slow part.
 934 * It is this reason that clk_prepare and clk_enable are not mutually
 935 * exclusive.  In fact clk_prepare must be called before clk_enable.
 936 * Returns 0 on success, -EERROR otherwise.
 937 */
 938int clk_prepare(struct clk *clk)
 939{
 940        if (!clk)
 941                return 0;
 942
 943        return clk_core_prepare_lock(clk->core);
 944}
 945EXPORT_SYMBOL_GPL(clk_prepare);
 946
 947static void clk_core_disable(struct clk_core *core)
 948{
 949        lockdep_assert_held(&enable_lock);
 950
 951        if (!core)
 952                return;
 953
 954        if (WARN(core->enable_count == 0, "%s already disabled\n", core->name))
 955                return;
 956
 957        if (WARN(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL,
 958            "Disabling critical %s\n", core->name))
 959                return;
 960
 961        if (--core->enable_count > 0)
 962                return;
 963
 964        trace_clk_disable_rcuidle(core);
 965
 966        if (core->ops->disable)
 967                core->ops->disable(core->hw);
 968
 969        trace_clk_disable_complete_rcuidle(core);
 970
 971        clk_core_disable(core->parent);
 972}
 973
 974static void clk_core_disable_lock(struct clk_core *core)
 975{
 976        unsigned long flags;
 977
 978        flags = clk_enable_lock();
 979        clk_core_disable(core);
 980        clk_enable_unlock(flags);
 981}
 982
 983/**
 984 * clk_disable - gate a clock
 985 * @clk: the clk being gated
 986 *
 987 * clk_disable must not sleep, which differentiates it from clk_unprepare.  In
 988 * a simple case, clk_disable can be used instead of clk_unprepare to gate a
 989 * clk if the operation is fast and will never sleep.  One example is a
 990 * SoC-internal clk which is controlled via simple register writes.  In the
 991 * complex case a clk gate operation may require a fast and a slow part.  It is
 992 * this reason that clk_unprepare and clk_disable are not mutually exclusive.
 993 * In fact clk_disable must be called before clk_unprepare.
 994 */
 995void clk_disable(struct clk *clk)
 996{
 997        if (IS_ERR_OR_NULL(clk))
 998                return;
 999
1000        clk_core_disable_lock(clk->core);
1001}
1002EXPORT_SYMBOL_GPL(clk_disable);
1003
1004static int clk_core_enable(struct clk_core *core)
1005{
1006        int ret = 0;
1007
1008        lockdep_assert_held(&enable_lock);
1009
1010        if (!core)
1011                return 0;
1012
1013        if (WARN(core->prepare_count == 0,
1014            "Enabling unprepared %s\n", core->name))
1015                return -ESHUTDOWN;
1016
1017        if (core->enable_count == 0) {
1018                ret = clk_core_enable(core->parent);
1019
1020                if (ret)
1021                        return ret;
1022
1023                trace_clk_enable_rcuidle(core);
1024
1025                if (core->ops->enable)
1026                        ret = core->ops->enable(core->hw);
1027
1028                trace_clk_enable_complete_rcuidle(core);
1029
1030                if (ret) {
1031                        clk_core_disable(core->parent);
1032                        return ret;
1033                }
1034        }
1035
1036        core->enable_count++;
1037        return 0;
1038}
1039
1040static int clk_core_enable_lock(struct clk_core *core)
1041{
1042        unsigned long flags;
1043        int ret;
1044
1045        flags = clk_enable_lock();
1046        ret = clk_core_enable(core);
1047        clk_enable_unlock(flags);
1048
1049        return ret;
1050}
1051
1052/**
1053 * clk_gate_restore_context - restore context for poweroff
1054 * @hw: the clk_hw pointer of clock whose state is to be restored
1055 *
1056 * The clock gate restore context function enables or disables
1057 * the gate clocks based on the enable_count. This is done in cases
1058 * where the clock context is lost and based on the enable_count
1059 * the clock either needs to be enabled/disabled. This
1060 * helps restore the state of gate clocks.
1061 */
1062void clk_gate_restore_context(struct clk_hw *hw)
1063{
1064        struct clk_core *core = hw->core;
1065
1066        if (core->enable_count)
1067                core->ops->enable(hw);
1068        else
1069                core->ops->disable(hw);
1070}
1071EXPORT_SYMBOL_GPL(clk_gate_restore_context);
1072
1073static int clk_core_save_context(struct clk_core *core)
1074{
1075        struct clk_core *child;
1076        int ret = 0;
1077
1078        hlist_for_each_entry(child, &core->children, child_node) {
1079                ret = clk_core_save_context(child);
1080                if (ret < 0)
1081                        return ret;
1082        }
1083
1084        if (core->ops && core->ops->save_context)
1085                ret = core->ops->save_context(core->hw);
1086
1087        return ret;
1088}
1089
1090static void clk_core_restore_context(struct clk_core *core)
1091{
1092        struct clk_core *child;
1093
1094        if (core->ops && core->ops->restore_context)
1095                core->ops->restore_context(core->hw);
1096
1097        hlist_for_each_entry(child, &core->children, child_node)
1098                clk_core_restore_context(child);
1099}
1100
1101/**
1102 * clk_save_context - save clock context for poweroff
1103 *
1104 * Saves the context of the clock register for powerstates in which the
1105 * contents of the registers will be lost. Occurs deep within the suspend
1106 * code.  Returns 0 on success.
1107 */
1108int clk_save_context(void)
1109{
1110        struct clk_core *clk;
1111        int ret;
1112
1113        hlist_for_each_entry(clk, &clk_root_list, child_node) {
1114                ret = clk_core_save_context(clk);
1115                if (ret < 0)
1116                        return ret;
1117        }
1118
1119        hlist_for_each_entry(clk, &clk_orphan_list, child_node) {
1120                ret = clk_core_save_context(clk);
1121                if (ret < 0)
1122                        return ret;
1123        }
1124
1125        return 0;
1126}
1127EXPORT_SYMBOL_GPL(clk_save_context);
1128
1129/**
1130 * clk_restore_context - restore clock context after poweroff
1131 *
1132 * Restore the saved clock context upon resume.
1133 *
1134 */
1135void clk_restore_context(void)
1136{
1137        struct clk_core *core;
1138
1139        hlist_for_each_entry(core, &clk_root_list, child_node)
1140                clk_core_restore_context(core);
1141
1142        hlist_for_each_entry(core, &clk_orphan_list, child_node)
1143                clk_core_restore_context(core);
1144}
1145EXPORT_SYMBOL_GPL(clk_restore_context);
1146
1147/**
1148 * clk_enable - ungate a clock
1149 * @clk: the clk being ungated
1150 *
1151 * clk_enable must not sleep, which differentiates it from clk_prepare.  In a
1152 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
1153 * if the operation will never sleep.  One example is a SoC-internal clk which
1154 * is controlled via simple register writes.  In the complex case a clk ungate
1155 * operation may require a fast and a slow part.  It is this reason that
1156 * clk_enable and clk_prepare are not mutually exclusive.  In fact clk_prepare
1157 * must be called before clk_enable.  Returns 0 on success, -EERROR
1158 * otherwise.
1159 */
1160int clk_enable(struct clk *clk)
1161{
1162        if (!clk)
1163                return 0;
1164
1165        return clk_core_enable_lock(clk->core);
1166}
1167EXPORT_SYMBOL_GPL(clk_enable);
1168
1169static int clk_core_prepare_enable(struct clk_core *core)
1170{
1171        int ret;
1172
1173        ret = clk_core_prepare_lock(core);
1174        if (ret)
1175                return ret;
1176
1177        ret = clk_core_enable_lock(core);
1178        if (ret)
1179                clk_core_unprepare_lock(core);
1180
1181        return ret;
1182}
1183
1184static void clk_core_disable_unprepare(struct clk_core *core)
1185{
1186        clk_core_disable_lock(core);
1187        clk_core_unprepare_lock(core);
1188}
1189
1190static void __init clk_unprepare_unused_subtree(struct clk_core *core)
1191{
1192        struct clk_core *child;
1193
1194        lockdep_assert_held(&prepare_lock);
1195
1196        hlist_for_each_entry(child, &core->children, child_node)
1197                clk_unprepare_unused_subtree(child);
1198
1199        if (core->prepare_count)
1200                return;
1201
1202        if (core->flags & CLK_IGNORE_UNUSED)
1203                return;
1204
1205        if (clk_pm_runtime_get(core))
1206                return;
1207
1208        if (clk_core_is_prepared(core)) {
1209                trace_clk_unprepare(core);
1210                if (core->ops->unprepare_unused)
1211                        core->ops->unprepare_unused(core->hw);
1212                else if (core->ops->unprepare)
1213                        core->ops->unprepare(core->hw);
1214                trace_clk_unprepare_complete(core);
1215        }
1216
1217        clk_pm_runtime_put(core);
1218}
1219
1220static void __init clk_disable_unused_subtree(struct clk_core *core)
1221{
1222        struct clk_core *child;
1223        unsigned long flags;
1224
1225        lockdep_assert_held(&prepare_lock);
1226
1227        hlist_for_each_entry(child, &core->children, child_node)
1228                clk_disable_unused_subtree(child);
1229
1230        if (core->flags & CLK_OPS_PARENT_ENABLE)
1231                clk_core_prepare_enable(core->parent);
1232
1233        if (clk_pm_runtime_get(core))
1234                goto unprepare_out;
1235
1236        flags = clk_enable_lock();
1237
1238        if (core->enable_count)
1239                goto unlock_out;
1240
1241        if (core->flags & CLK_IGNORE_UNUSED)
1242                goto unlock_out;
1243
1244        /*
1245         * some gate clocks have special needs during the disable-unused
1246         * sequence.  call .disable_unused if available, otherwise fall
1247         * back to .disable
1248         */
1249        if (clk_core_is_enabled(core)) {
1250                trace_clk_disable(core);
1251                if (core->ops->disable_unused)
1252                        core->ops->disable_unused(core->hw);
1253                else if (core->ops->disable)
1254                        core->ops->disable(core->hw);
1255                trace_clk_disable_complete(core);
1256        }
1257
1258unlock_out:
1259        clk_enable_unlock(flags);
1260        clk_pm_runtime_put(core);
1261unprepare_out:
1262        if (core->flags & CLK_OPS_PARENT_ENABLE)
1263                clk_core_disable_unprepare(core->parent);
1264}
1265
1266static bool clk_ignore_unused __initdata;
1267static int __init clk_ignore_unused_setup(char *__unused)
1268{
1269        clk_ignore_unused = true;
1270        return 1;
1271}
1272__setup("clk_ignore_unused", clk_ignore_unused_setup);
1273
1274static int __init clk_disable_unused(void)
1275{
1276        struct clk_core *core;
1277
1278        if (clk_ignore_unused) {
1279                pr_warn("clk: Not disabling unused clocks\n");
1280                return 0;
1281        }
1282
1283        clk_prepare_lock();
1284
1285        hlist_for_each_entry(core, &clk_root_list, child_node)
1286                clk_disable_unused_subtree(core);
1287
1288        hlist_for_each_entry(core, &clk_orphan_list, child_node)
1289                clk_disable_unused_subtree(core);
1290
1291        hlist_for_each_entry(core, &clk_root_list, child_node)
1292                clk_unprepare_unused_subtree(core);
1293
1294        hlist_for_each_entry(core, &clk_orphan_list, child_node)
1295                clk_unprepare_unused_subtree(core);
1296
1297        clk_prepare_unlock();
1298
1299        return 0;
1300}
1301late_initcall_sync(clk_disable_unused);
1302
1303static int clk_core_determine_round_nolock(struct clk_core *core,
1304                                           struct clk_rate_request *req)
1305{
1306        long rate;
1307
1308        lockdep_assert_held(&prepare_lock);
1309
1310        if (!core)
1311                return 0;
1312
1313        /*
1314         * At this point, core protection will be disabled if
1315         * - if the provider is not protected at all
1316         * - if the calling consumer is the only one which has exclusivity
1317         *   over the provider
1318         */
1319        if (clk_core_rate_is_protected(core)) {
1320                req->rate = core->rate;
1321        } else if (core->ops->determine_rate) {
1322                return core->ops->determine_rate(core->hw, req);
1323        } else if (core->ops->round_rate) {
1324                rate = core->ops->round_rate(core->hw, req->rate,
1325                                             &req->best_parent_rate);
1326                if (rate < 0)
1327                        return rate;
1328
1329                req->rate = rate;
1330        } else {
1331                return -EINVAL;
1332        }
1333
1334        return 0;
1335}
1336
1337static void clk_core_init_rate_req(struct clk_core * const core,
1338                                   struct clk_rate_request *req)
1339{
1340        struct clk_core *parent;
1341
1342        if (WARN_ON(!core || !req))
1343                return;
1344
1345        parent = core->parent;
1346        if (parent) {
1347                req->best_parent_hw = parent->hw;
1348                req->best_parent_rate = parent->rate;
1349        } else {
1350                req->best_parent_hw = NULL;
1351                req->best_parent_rate = 0;
1352        }
1353}
1354
1355static bool clk_core_can_round(struct clk_core * const core)
1356{
1357        return core->ops->determine_rate || core->ops->round_rate;
1358}
1359
1360static int clk_core_round_rate_nolock(struct clk_core *core,
1361                                      struct clk_rate_request *req)
1362{
1363        lockdep_assert_held(&prepare_lock);
1364
1365        if (!core) {
1366                req->rate = 0;
1367                return 0;
1368        }
1369
1370        clk_core_init_rate_req(core, req);
1371
1372        if (clk_core_can_round(core))
1373                return clk_core_determine_round_nolock(core, req);
1374        else if (core->flags & CLK_SET_RATE_PARENT)
1375                return clk_core_round_rate_nolock(core->parent, req);
1376
1377        req->rate = core->rate;
1378        return 0;
1379}
1380
1381/**
1382 * __clk_determine_rate - get the closest rate actually supported by a clock
1383 * @hw: determine the rate of this clock
1384 * @req: target rate request
1385 *
1386 * Useful for clk_ops such as .set_rate and .determine_rate.
1387 */
1388int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
1389{
1390        if (!hw) {
1391                req->rate = 0;
1392                return 0;
1393        }
1394
1395        return clk_core_round_rate_nolock(hw->core, req);
1396}
1397EXPORT_SYMBOL_GPL(__clk_determine_rate);
1398
1399unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate)
1400{
1401        int ret;
1402        struct clk_rate_request req;
1403
1404        clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate);
1405        req.rate = rate;
1406
1407        ret = clk_core_round_rate_nolock(hw->core, &req);
1408        if (ret)
1409                return 0;
1410
1411        return req.rate;
1412}
1413EXPORT_SYMBOL_GPL(clk_hw_round_rate);
1414
1415/**
1416 * clk_round_rate - round the given rate for a clk
1417 * @clk: the clk for which we are rounding a rate
1418 * @rate: the rate which is to be rounded
1419 *
1420 * Takes in a rate as input and rounds it to a rate that the clk can actually
1421 * use which is then returned.  If clk doesn't support round_rate operation
1422 * then the parent rate is returned.
1423 */
1424long clk_round_rate(struct clk *clk, unsigned long rate)
1425{
1426        struct clk_rate_request req;
1427        int ret;
1428
1429        if (!clk)
1430                return 0;
1431
1432        clk_prepare_lock();
1433
1434        if (clk->exclusive_count)
1435                clk_core_rate_unprotect(clk->core);
1436
1437        clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate);
1438        req.rate = rate;
1439
1440        ret = clk_core_round_rate_nolock(clk->core, &req);
1441
1442        if (clk->exclusive_count)
1443                clk_core_rate_protect(clk->core);
1444
1445        clk_prepare_unlock();
1446
1447        if (ret)
1448                return ret;
1449
1450        return req.rate;
1451}
1452EXPORT_SYMBOL_GPL(clk_round_rate);
1453
1454/**
1455 * __clk_notify - call clk notifier chain
1456 * @core: clk that is changing rate
1457 * @msg: clk notifier type (see include/linux/clk.h)
1458 * @old_rate: old clk rate
1459 * @new_rate: new clk rate
1460 *
1461 * Triggers a notifier call chain on the clk rate-change notification
1462 * for 'clk'.  Passes a pointer to the struct clk and the previous
1463 * and current rates to the notifier callback.  Intended to be called by
1464 * internal clock code only.  Returns NOTIFY_DONE from the last driver
1465 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
1466 * a driver returns that.
1467 */
1468static int __clk_notify(struct clk_core *core, unsigned long msg,
1469                unsigned long old_rate, unsigned long new_rate)
1470{
1471        struct clk_notifier *cn;
1472        struct clk_notifier_data cnd;
1473        int ret = NOTIFY_DONE;
1474
1475        cnd.old_rate = old_rate;
1476        cnd.new_rate = new_rate;
1477
1478        list_for_each_entry(cn, &clk_notifier_list, node) {
1479                if (cn->clk->core == core) {
1480                        cnd.clk = cn->clk;
1481                        ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
1482                                        &cnd);
1483                        if (ret & NOTIFY_STOP_MASK)
1484                                return ret;
1485                }
1486        }
1487
1488        return ret;
1489}
1490
1491/**
1492 * __clk_recalc_accuracies
1493 * @core: first clk in the subtree
1494 *
1495 * Walks the subtree of clks starting with clk and recalculates accuracies as
1496 * it goes.  Note that if a clk does not implement the .recalc_accuracy
1497 * callback then it is assumed that the clock will take on the accuracy of its
1498 * parent.
1499 */
1500static void __clk_recalc_accuracies(struct clk_core *core)
1501{
1502        unsigned long parent_accuracy = 0;
1503        struct clk_core *child;
1504
1505        lockdep_assert_held(&prepare_lock);
1506
1507        if (core->parent)
1508                parent_accuracy = core->parent->accuracy;
1509
1510        if (core->ops->recalc_accuracy)
1511                core->accuracy = core->ops->recalc_accuracy(core->hw,
1512                                                          parent_accuracy);
1513        else
1514                core->accuracy = parent_accuracy;
1515
1516        hlist_for_each_entry(child, &core->children, child_node)
1517                __clk_recalc_accuracies(child);
1518}
1519
1520static long clk_core_get_accuracy_recalc(struct clk_core *core)
1521{
1522        if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE))
1523                __clk_recalc_accuracies(core);
1524
1525        return clk_core_get_accuracy_no_lock(core);
1526}
1527
1528/**
1529 * clk_get_accuracy - return the accuracy of clk
1530 * @clk: the clk whose accuracy is being returned
1531 *
1532 * Simply returns the cached accuracy of the clk, unless
1533 * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be
1534 * issued.
1535 * If clk is NULL then returns 0.
1536 */
1537long clk_get_accuracy(struct clk *clk)
1538{
1539        long accuracy;
1540
1541        if (!clk)
1542                return 0;
1543
1544        clk_prepare_lock();
1545        accuracy = clk_core_get_accuracy_recalc(clk->core);
1546        clk_prepare_unlock();
1547
1548        return accuracy;
1549}
1550EXPORT_SYMBOL_GPL(clk_get_accuracy);
1551
1552static unsigned long clk_recalc(struct clk_core *core,
1553                                unsigned long parent_rate)
1554{
1555        unsigned long rate = parent_rate;
1556
1557        if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) {
1558                rate = core->ops->recalc_rate(core->hw, parent_rate);
1559                clk_pm_runtime_put(core);
1560        }
1561        return rate;
1562}
1563
1564/**
1565 * __clk_recalc_rates
1566 * @core: first clk in the subtree
1567 * @msg: notification type (see include/linux/clk.h)
1568 *
1569 * Walks the subtree of clks starting with clk and recalculates rates as it
1570 * goes.  Note that if a clk does not implement the .recalc_rate callback then
1571 * it is assumed that the clock will take on the rate of its parent.
1572 *
1573 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
1574 * if necessary.
1575 */
1576static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
1577{
1578        unsigned long old_rate;
1579        unsigned long parent_rate = 0;
1580        struct clk_core *child;
1581
1582        lockdep_assert_held(&prepare_lock);
1583
1584        old_rate = core->rate;
1585
1586        if (core->parent)
1587                parent_rate = core->parent->rate;
1588
1589        core->rate = clk_recalc(core, parent_rate);
1590
1591        /*
1592         * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
1593         * & ABORT_RATE_CHANGE notifiers
1594         */
1595        if (core->notifier_count && msg)
1596                __clk_notify(core, msg, old_rate, core->rate);
1597
1598        hlist_for_each_entry(child, &core->children, child_node)
1599                __clk_recalc_rates(child, msg);
1600}
1601
1602static unsigned long clk_core_get_rate_recalc(struct clk_core *core)
1603{
1604        if (core && (core->flags & CLK_GET_RATE_NOCACHE))
1605                __clk_recalc_rates(core, 0);
1606
1607        return clk_core_get_rate_nolock(core);
1608}
1609
1610/**
1611 * clk_get_rate - return the rate of clk
1612 * @clk: the clk whose rate is being returned
1613 *
1614 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
1615 * is set, which means a recalc_rate will be issued.
1616 * If clk is NULL then returns 0.
1617 */
1618unsigned long clk_get_rate(struct clk *clk)
1619{
1620        unsigned long rate;
1621
1622        if (!clk)
1623                return 0;
1624
1625        clk_prepare_lock();
1626        rate = clk_core_get_rate_recalc(clk->core);
1627        clk_prepare_unlock();
1628
1629        return rate;
1630}
1631EXPORT_SYMBOL_GPL(clk_get_rate);
1632
1633static int clk_fetch_parent_index(struct clk_core *core,
1634                                  struct clk_core *parent)
1635{
1636        int i;
1637
1638        if (!parent)
1639                return -EINVAL;
1640
1641        for (i = 0; i < core->num_parents; i++) {
1642                /* Found it first try! */
1643                if (core->parents[i].core == parent)
1644                        return i;
1645
1646                /* Something else is here, so keep looking */
1647                if (core->parents[i].core)
1648                        continue;
1649
1650                /* Maybe core hasn't been cached but the hw is all we know? */
1651                if (core->parents[i].hw) {
1652                        if (core->parents[i].hw == parent->hw)
1653                                break;
1654
1655                        /* Didn't match, but we're expecting a clk_hw */
1656                        continue;
1657                }
1658
1659                /* Maybe it hasn't been cached (clk_set_parent() path) */
1660                if (parent == clk_core_get(core, i))
1661                        break;
1662
1663                /* Fallback to comparing globally unique names */
1664                if (core->parents[i].name &&
1665                    !strcmp(parent->name, core->parents[i].name))
1666                        break;
1667        }
1668
1669        if (i == core->num_parents)
1670                return -EINVAL;
1671
1672        core->parents[i].core = parent;
1673        return i;
1674}
1675
1676/**
1677 * clk_hw_get_parent_index - return the index of the parent clock
1678 * @hw: clk_hw associated with the clk being consumed
1679 *
1680 * Fetches and returns the index of parent clock. Returns -EINVAL if the given
1681 * clock does not have a current parent.
1682 */
1683int clk_hw_get_parent_index(struct clk_hw *hw)
1684{
1685        struct clk_hw *parent = clk_hw_get_parent(hw);
1686
1687        if (WARN_ON(parent == NULL))
1688                return -EINVAL;
1689
1690        return clk_fetch_parent_index(hw->core, parent->core);
1691}
1692EXPORT_SYMBOL_GPL(clk_hw_get_parent_index);
1693
1694/*
1695 * Update the orphan status of @core and all its children.
1696 */
1697static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan)
1698{
1699        struct clk_core *child;
1700
1701        core->orphan = is_orphan;
1702
1703        hlist_for_each_entry(child, &core->children, child_node)
1704                clk_core_update_orphan_status(child, is_orphan);
1705}
1706
1707static void clk_reparent(struct clk_core *core, struct clk_core *new_parent)
1708{
1709        bool was_orphan = core->orphan;
1710
1711        hlist_del(&core->child_node);
1712
1713        if (new_parent) {
1714                bool becomes_orphan = new_parent->orphan;
1715
1716                /* avoid duplicate POST_RATE_CHANGE notifications */
1717                if (new_parent->new_child == core)
1718                        new_parent->new_child = NULL;
1719
1720                hlist_add_head(&core->child_node, &new_parent->children);
1721
1722                if (was_orphan != becomes_orphan)
1723                        clk_core_update_orphan_status(core, becomes_orphan);
1724        } else {
1725                hlist_add_head(&core->child_node, &clk_orphan_list);
1726                if (!was_orphan)
1727                        clk_core_update_orphan_status(core, true);
1728        }
1729
1730        core->parent = new_parent;
1731}
1732
1733static struct clk_core *__clk_set_parent_before(struct clk_core *core,
1734                                           struct clk_core *parent)
1735{
1736        unsigned long flags;
1737        struct clk_core *old_parent = core->parent;
1738
1739        /*
1740         * 1. enable parents for CLK_OPS_PARENT_ENABLE clock
1741         *
1742         * 2. Migrate prepare state between parents and prevent race with
1743         * clk_enable().
1744         *
1745         * If the clock is not prepared, then a race with
1746         * clk_enable/disable() is impossible since we already have the
1747         * prepare lock (future calls to clk_enable() need to be preceded by
1748         * a clk_prepare()).
1749         *
1750         * If the clock is prepared, migrate the prepared state to the new
1751         * parent and also protect against a race with clk_enable() by
1752         * forcing the clock and the new parent on.  This ensures that all
1753         * future calls to clk_enable() are practically NOPs with respect to
1754         * hardware and software states.
1755         *
1756         * See also: Comment for clk_set_parent() below.
1757         */
1758
1759        /* enable old_parent & parent if CLK_OPS_PARENT_ENABLE is set */
1760        if (core->flags & CLK_OPS_PARENT_ENABLE) {
1761                clk_core_prepare_enable(old_parent);
1762                clk_core_prepare_enable(parent);
1763        }
1764
1765        /* migrate prepare count if > 0 */
1766        if (core->prepare_count) {
1767                clk_core_prepare_enable(parent);
1768                clk_core_enable_lock(core);
1769        }
1770
1771        /* update the clk tree topology */
1772        flags = clk_enable_lock();
1773        clk_reparent(core, parent);
1774        clk_enable_unlock(flags);
1775
1776        return old_parent;
1777}
1778
1779static void __clk_set_parent_after(struct clk_core *core,
1780                                   struct clk_core *parent,
1781                                   struct clk_core *old_parent)
1782{
1783        /*
1784         * Finish the migration of prepare state and undo the changes done
1785         * for preventing a race with clk_enable().
1786         */
1787        if (core->prepare_count) {
1788                clk_core_disable_lock(core);
1789                clk_core_disable_unprepare(old_parent);
1790        }
1791
1792        /* re-balance ref counting if CLK_OPS_PARENT_ENABLE is set */
1793        if (core->flags & CLK_OPS_PARENT_ENABLE) {
1794                clk_core_disable_unprepare(parent);
1795                clk_core_disable_unprepare(old_parent);
1796        }
1797}
1798
1799static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
1800                            u8 p_index)
1801{
1802        unsigned long flags;
1803        int ret = 0;
1804        struct clk_core *old_parent;
1805
1806        old_parent = __clk_set_parent_before(core, parent);
1807
1808        trace_clk_set_parent(core, parent);
1809
1810        /* change clock input source */
1811        if (parent && core->ops->set_parent)
1812                ret = core->ops->set_parent(core->hw, p_index);
1813
1814        trace_clk_set_parent_complete(core, parent);
1815
1816        if (ret) {
1817                flags = clk_enable_lock();
1818                clk_reparent(core, old_parent);
1819                clk_enable_unlock(flags);
1820                __clk_set_parent_after(core, old_parent, parent);
1821
1822                return ret;
1823        }
1824
1825        __clk_set_parent_after(core, parent, old_parent);
1826
1827        return 0;
1828}
1829
1830/**
1831 * __clk_speculate_rates
1832 * @core: first clk in the subtree
1833 * @parent_rate: the "future" rate of clk's parent
1834 *
1835 * Walks the subtree of clks starting with clk, speculating rates as it
1836 * goes and firing off PRE_RATE_CHANGE notifications as necessary.
1837 *
1838 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
1839 * pre-rate change notifications and returns early if no clks in the
1840 * subtree have subscribed to the notifications.  Note that if a clk does not
1841 * implement the .recalc_rate callback then it is assumed that the clock will
1842 * take on the rate of its parent.
1843 */
1844static int __clk_speculate_rates(struct clk_core *core,
1845                                 unsigned long parent_rate)
1846{
1847        struct clk_core *child;
1848        unsigned long new_rate;
1849        int ret = NOTIFY_DONE;
1850
1851        lockdep_assert_held(&prepare_lock);
1852
1853        new_rate = clk_recalc(core, parent_rate);
1854
1855        /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
1856        if (core->notifier_count)
1857                ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate);
1858
1859        if (ret & NOTIFY_STOP_MASK) {
1860                pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
1861                                __func__, core->name, ret);
1862                goto out;
1863        }
1864
1865        hlist_for_each_entry(child, &core->children, child_node) {
1866                ret = __clk_speculate_rates(child, new_rate);
1867                if (ret & NOTIFY_STOP_MASK)
1868                        break;
1869        }
1870
1871out:
1872        return ret;
1873}
1874
1875static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
1876                             struct clk_core *new_parent, u8 p_index)
1877{
1878        struct clk_core *child;
1879
1880        core->new_rate = new_rate;
1881        core->new_parent = new_parent;
1882        core->new_parent_index = p_index;
1883        /* include clk in new parent's PRE_RATE_CHANGE notifications */
1884        core->new_child = NULL;
1885        if (new_parent && new_parent != core->parent)
1886                new_parent->new_child = core;
1887
1888        hlist_for_each_entry(child, &core->children, child_node) {
1889                child->new_rate = clk_recalc(child, new_rate);
1890                clk_calc_subtree(child, child->new_rate, NULL, 0);
1891        }
1892}
1893
1894/*
1895 * calculate the new rates returning the topmost clock that has to be
1896 * changed.
1897 */
1898static struct clk_core *clk_calc_new_rates(struct clk_core *core,
1899                                           unsigned long rate)
1900{
1901        struct clk_core *top = core;
1902        struct clk_core *old_parent, *parent;
1903        unsigned long best_parent_rate = 0;
1904        unsigned long new_rate;
1905        unsigned long min_rate;
1906        unsigned long max_rate;
1907        int p_index = 0;
1908        long ret;
1909
1910        /* sanity */
1911        if (IS_ERR_OR_NULL(core))
1912                return NULL;
1913
1914        /* save parent rate, if it exists */
1915        parent = old_parent = core->parent;
1916        if (parent)
1917                best_parent_rate = parent->rate;
1918
1919        clk_core_get_boundaries(core, &min_rate, &max_rate);
1920
1921        /* find the closest rate and parent clk/rate */
1922        if (clk_core_can_round(core)) {
1923                struct clk_rate_request req;
1924
1925                req.rate = rate;
1926                req.min_rate = min_rate;
1927                req.max_rate = max_rate;
1928
1929                clk_core_init_rate_req(core, &req);
1930
1931                ret = clk_core_determine_round_nolock(core, &req);
1932                if (ret < 0)
1933                        return NULL;
1934
1935                best_parent_rate = req.best_parent_rate;
1936                new_rate = req.rate;
1937                parent = req.best_parent_hw ? req.best_parent_hw->core : NULL;
1938
1939                if (new_rate < min_rate || new_rate > max_rate)
1940                        return NULL;
1941        } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) {
1942                /* pass-through clock without adjustable parent */
1943                core->new_rate = core->rate;
1944                return NULL;
1945        } else {
1946                /* pass-through clock with adjustable parent */
1947                top = clk_calc_new_rates(parent, rate);
1948                new_rate = parent->new_rate;
1949                goto out;
1950        }
1951
1952        /* some clocks must be gated to change parent */
1953        if (parent != old_parent &&
1954            (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
1955                pr_debug("%s: %s not gated but wants to reparent\n",
1956                         __func__, core->name);
1957                return NULL;
1958        }
1959
1960        /* try finding the new parent index */
1961        if (parent && core->num_parents > 1) {
1962                p_index = clk_fetch_parent_index(core, parent);
1963                if (p_index < 0) {
1964                        pr_debug("%s: clk %s can not be parent of clk %s\n",
1965                                 __func__, parent->name, core->name);
1966                        return NULL;
1967                }
1968        }
1969
1970        if ((core->flags & CLK_SET_RATE_PARENT) && parent &&
1971            best_parent_rate != parent->rate)
1972                top = clk_calc_new_rates(parent, best_parent_rate);
1973
1974out:
1975        clk_calc_subtree(core, new_rate, parent, p_index);
1976
1977        return top;
1978}
1979
1980/*
1981 * Notify about rate changes in a subtree. Always walk down the whole tree
1982 * so that in case of an error we can walk down the whole tree again and
1983 * abort the change.
1984 */
1985static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
1986                                                  unsigned long event)
1987{
1988        struct clk_core *child, *tmp_clk, *fail_clk = NULL;
1989        int ret = NOTIFY_DONE;
1990
1991        if (core->rate == core->new_rate)
1992                return NULL;
1993
1994        if (core->notifier_count) {
1995                ret = __clk_notify(core, event, core->rate, core->new_rate);
1996                if (ret & NOTIFY_STOP_MASK)
1997                        fail_clk = core;
1998        }
1999
2000        hlist_for_each_entry(child, &core->children, child_node) {
2001                /* Skip children who will be reparented to another clock */
2002                if (child->new_parent && child->new_parent != core)
2003                        continue;
2004                tmp_clk = clk_propagate_rate_change(child, event);
2005                if (tmp_clk)
2006                        fail_clk = tmp_clk;
2007        }
2008
2009        /* handle the new child who might not be in core->children yet */
2010        if (core->new_child) {
2011                tmp_clk = clk_propagate_rate_change(core->new_child, event);
2012                if (tmp_clk)
2013                        fail_clk = tmp_clk;
2014        }
2015
2016        return fail_clk;
2017}
2018
2019/*
2020 * walk down a subtree and set the new rates notifying the rate
2021 * change on the way
2022 */
2023static void clk_change_rate(struct clk_core *core)
2024{
2025        struct clk_core *child;
2026        struct hlist_node *tmp;
2027        unsigned long old_rate;
2028        unsigned long best_parent_rate = 0;
2029        bool skip_set_rate = false;
2030        struct clk_core *old_parent;
2031        struct clk_core *parent = NULL;
2032
2033        old_rate = core->rate;
2034
2035        if (core->new_parent) {
2036                parent = core->new_parent;
2037                best_parent_rate = core->new_parent->rate;
2038        } else if (core->parent) {
2039                parent = core->parent;
2040                best_parent_rate = core->parent->rate;
2041        }
2042
2043        if (clk_pm_runtime_get(core))
2044                return;
2045
2046        if (core->flags & CLK_SET_RATE_UNGATE) {
2047                unsigned long flags;
2048
2049                clk_core_prepare(core);
2050                flags = clk_enable_lock();
2051                clk_core_enable(core);
2052                clk_enable_unlock(flags);
2053        }
2054
2055        if (core->new_parent && core->new_parent != core->parent) {
2056                old_parent = __clk_set_parent_before(core, core->new_parent);
2057                trace_clk_set_parent(core, core->new_parent);
2058
2059                if (core->ops->set_rate_and_parent) {
2060                        skip_set_rate = true;
2061                        core->ops->set_rate_and_parent(core->hw, core->new_rate,
2062                                        best_parent_rate,
2063                                        core->new_parent_index);
2064                } else if (core->ops->set_parent) {
2065                        core->ops->set_parent(core->hw, core->new_parent_index);
2066                }
2067
2068                trace_clk_set_parent_complete(core, core->new_parent);
2069                __clk_set_parent_after(core, core->new_parent, old_parent);
2070        }
2071
2072        if (core->flags & CLK_OPS_PARENT_ENABLE)
2073                clk_core_prepare_enable(parent);
2074
2075        trace_clk_set_rate(core, core->new_rate);
2076
2077        if (!skip_set_rate && core->ops->set_rate)
2078                core->ops->set_rate(core->hw, core->new_rate, best_parent_rate);
2079
2080        trace_clk_set_rate_complete(core, core->new_rate);
2081
2082        core->rate = clk_recalc(core, best_parent_rate);
2083
2084        if (core->flags & CLK_SET_RATE_UNGATE) {
2085                unsigned long flags;
2086
2087                flags = clk_enable_lock();
2088                clk_core_disable(core);
2089                clk_enable_unlock(flags);
2090                clk_core_unprepare(core);
2091        }
2092
2093        if (core->flags & CLK_OPS_PARENT_ENABLE)
2094                clk_core_disable_unprepare(parent);
2095
2096        if (core->notifier_count && old_rate != core->rate)
2097                __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate);
2098
2099        if (core->flags & CLK_RECALC_NEW_RATES)
2100                (void)clk_calc_new_rates(core, core->new_rate);
2101
2102        /*
2103         * Use safe iteration, as change_rate can actually swap parents
2104         * for certain clock types.
2105         */
2106        hlist_for_each_entry_safe(child, tmp, &core->children, child_node) {
2107                /* Skip children who will be reparented to another clock */
2108                if (child->new_parent && child->new_parent != core)
2109                        continue;
2110                clk_change_rate(child);
2111        }
2112
2113        /* handle the new child who might not be in core->children yet */
2114        if (core->new_child)
2115                clk_change_rate(core->new_child);
2116
2117        clk_pm_runtime_put(core);
2118}
2119
2120static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core,
2121                                                     unsigned long req_rate)
2122{
2123        int ret, cnt;
2124        struct clk_rate_request req;
2125
2126        lockdep_assert_held(&prepare_lock);
2127
2128        if (!core)
2129                return 0;
2130
2131        /* simulate what the rate would be if it could be freely set */
2132        cnt = clk_core_rate_nuke_protect(core);
2133        if (cnt < 0)
2134                return cnt;
2135
2136        clk_core_get_boundaries(core, &req.min_rate, &req.max_rate);
2137        req.rate = req_rate;
2138
2139        ret = clk_core_round_rate_nolock(core, &req);
2140
2141        /* restore the protection */
2142        clk_core_rate_restore_protect(core, cnt);
2143
2144        return ret ? 0 : req.rate;
2145}
2146
2147static int clk_core_set_rate_nolock(struct clk_core *core,
2148                                    unsigned long req_rate)
2149{
2150        struct clk_core *top, *fail_clk;
2151        unsigned long rate;
2152        int ret = 0;
2153
2154        if (!core)
2155                return 0;
2156
2157        rate = clk_core_req_round_rate_nolock(core, req_rate);
2158
2159        /* bail early if nothing to do */
2160        if (rate == clk_core_get_rate_nolock(core))
2161                return 0;
2162
2163        /* fail on a direct rate set of a protected provider */
2164        if (clk_core_rate_is_protected(core))
2165                return -EBUSY;
2166
2167        /* calculate new rates and get the topmost changed clock */
2168        top = clk_calc_new_rates(core, req_rate);
2169        if (!top)
2170                return -EINVAL;
2171
2172        ret = clk_pm_runtime_get(core);
2173        if (ret)
2174                return ret;
2175
2176        /* notify that we are about to change rates */
2177        fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
2178        if (fail_clk) {
2179                pr_debug("%s: failed to set %s rate\n", __func__,
2180                                fail_clk->name);
2181                clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
2182                ret = -EBUSY;
2183                goto err;
2184        }
2185
2186        /* change the rates */
2187        clk_change_rate(top);
2188
2189        core->req_rate = req_rate;
2190err:
2191        clk_pm_runtime_put(core);
2192
2193        return ret;
2194}
2195
2196/**
2197 * clk_set_rate - specify a new rate for clk
2198 * @clk: the clk whose rate is being changed
2199 * @rate: the new rate for clk
2200 *
2201 * In the simplest case clk_set_rate will only adjust the rate of clk.
2202 *
2203 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
2204 * propagate up to clk's parent; whether or not this happens depends on the
2205 * outcome of clk's .round_rate implementation.  If *parent_rate is unchanged
2206 * after calling .round_rate then upstream parent propagation is ignored.  If
2207 * *parent_rate comes back with a new rate for clk's parent then we propagate
2208 * up to clk's parent and set its rate.  Upward propagation will continue
2209 * until either a clk does not support the CLK_SET_RATE_PARENT flag or
2210 * .round_rate stops requesting changes to clk's parent_rate.
2211 *
2212 * Rate changes are accomplished via tree traversal that also recalculates the
2213 * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
2214 *
2215 * Returns 0 on success, -EERROR otherwise.
2216 */
2217int clk_set_rate(struct clk *clk, unsigned long rate)
2218{
2219        int ret;
2220
2221        if (!clk)
2222                return 0;
2223
2224        /* prevent racing with updates to the clock topology */
2225        clk_prepare_lock();
2226
2227        if (clk->exclusive_count)
2228                clk_core_rate_unprotect(clk->core);
2229
2230        ret = clk_core_set_rate_nolock(clk->core, rate);
2231
2232        if (clk->exclusive_count)
2233                clk_core_rate_protect(clk->core);
2234
2235        clk_prepare_unlock();
2236
2237        return ret;
2238}
2239EXPORT_SYMBOL_GPL(clk_set_rate);
2240
2241/**
2242 * clk_set_rate_exclusive - specify a new rate and get exclusive control
2243 * @clk: the clk whose rate is being changed
2244 * @rate: the new rate for clk
2245 *
2246 * This is a combination of clk_set_rate() and clk_rate_exclusive_get()
2247 * within a critical section
2248 *
2249 * This can be used initially to ensure that at least 1 consumer is
2250 * satisfied when several consumers are competing for exclusivity over the
2251 * same clock provider.
2252 *
2253 * The exclusivity is not applied if setting the rate failed.
2254 *
2255 * Calls to clk_rate_exclusive_get() should be balanced with calls to
2256 * clk_rate_exclusive_put().
2257 *
2258 * Returns 0 on success, -EERROR otherwise.
2259 */
2260int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
2261{
2262        int ret;
2263
2264        if (!clk)
2265                return 0;
2266
2267        /* prevent racing with updates to the clock topology */
2268        clk_prepare_lock();
2269
2270        /*
2271         * The temporary protection removal is not here, on purpose
2272         * This function is meant to be used instead of clk_rate_protect,
2273         * so before the consumer code path protect the clock provider
2274         */
2275
2276        ret = clk_core_set_rate_nolock(clk->core, rate);
2277        if (!ret) {
2278                clk_core_rate_protect(clk->core);
2279                clk->exclusive_count++;
2280        }
2281
2282        clk_prepare_unlock();
2283
2284        return ret;
2285}
2286EXPORT_SYMBOL_GPL(clk_set_rate_exclusive);
2287
2288/**
2289 * clk_set_rate_range - set a rate range for a clock source
2290 * @clk: clock source
2291 * @min: desired minimum clock rate in Hz, inclusive
2292 * @max: desired maximum clock rate in Hz, inclusive
2293 *
2294 * Returns success (0) or negative errno.
2295 */
2296int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
2297{
2298        int ret = 0;
2299        unsigned long old_min, old_max, rate;
2300
2301        if (!clk)
2302                return 0;
2303
2304        if (min > max) {
2305                pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
2306                       __func__, clk->core->name, clk->dev_id, clk->con_id,
2307                       min, max);
2308                return -EINVAL;
2309        }
2310
2311        clk_prepare_lock();
2312
2313        if (clk->exclusive_count)
2314                clk_core_rate_unprotect(clk->core);
2315
2316        /* Save the current values in case we need to rollback the change */
2317        old_min = clk->min_rate;
2318        old_max = clk->max_rate;
2319        clk->min_rate = min;
2320        clk->max_rate = max;
2321
2322        rate = clk_core_get_rate_nolock(clk->core);
2323        if (rate < min || rate > max) {
2324                /*
2325                 * FIXME:
2326                 * We are in bit of trouble here, current rate is outside the
2327                 * the requested range. We are going try to request appropriate
2328                 * range boundary but there is a catch. It may fail for the
2329                 * usual reason (clock broken, clock protected, etc) but also
2330                 * because:
2331                 * - round_rate() was not favorable and fell on the wrong
2332                 *   side of the boundary
2333                 * - the determine_rate() callback does not really check for
2334                 *   this corner case when determining the rate
2335                 */
2336
2337                if (rate < min)
2338                        rate = min;
2339                else
2340                        rate = max;
2341
2342                ret = clk_core_set_rate_nolock(clk->core, rate);
2343                if (ret) {
2344                        /* rollback the changes */
2345                        clk->min_rate = old_min;
2346                        clk->max_rate = old_max;
2347                }
2348        }
2349
2350        if (clk->exclusive_count)
2351                clk_core_rate_protect(clk->core);
2352
2353        clk_prepare_unlock();
2354
2355        return ret;
2356}
2357EXPORT_SYMBOL_GPL(clk_set_rate_range);
2358
2359/**
2360 * clk_set_min_rate - set a minimum clock rate for a clock source
2361 * @clk: clock source
2362 * @rate: desired minimum clock rate in Hz, inclusive
2363 *
2364 * Returns success (0) or negative errno.
2365 */
2366int clk_set_min_rate(struct clk *clk, unsigned long rate)
2367{
2368        if (!clk)
2369                return 0;
2370
2371        return clk_set_rate_range(clk, rate, clk->max_rate);
2372}
2373EXPORT_SYMBOL_GPL(clk_set_min_rate);
2374
2375/**
2376 * clk_set_max_rate - set a maximum clock rate for a clock source
2377 * @clk: clock source
2378 * @rate: desired maximum clock rate in Hz, inclusive
2379 *
2380 * Returns success (0) or negative errno.
2381 */
2382int clk_set_max_rate(struct clk *clk, unsigned long rate)
2383{
2384        if (!clk)
2385                return 0;
2386
2387        return clk_set_rate_range(clk, clk->min_rate, rate);
2388}
2389EXPORT_SYMBOL_GPL(clk_set_max_rate);
2390
2391/**
2392 * clk_get_parent - return the parent of a clk
2393 * @clk: the clk whose parent gets returned
2394 *
2395 * Simply returns clk->parent.  Returns NULL if clk is NULL.
2396 */
2397struct clk *clk_get_parent(struct clk *clk)
2398{
2399        struct clk *parent;
2400
2401        if (!clk)
2402                return NULL;
2403
2404        clk_prepare_lock();
2405        /* TODO: Create a per-user clk and change callers to call clk_put */
2406        parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk;
2407        clk_prepare_unlock();
2408
2409        return parent;
2410}
2411EXPORT_SYMBOL_GPL(clk_get_parent);
2412
2413static struct clk_core *__clk_init_parent(struct clk_core *core)
2414{
2415        u8 index = 0;
2416
2417        if (core->num_parents > 1 && core->ops->get_parent)
2418                index = core->ops->get_parent(core->hw);
2419
2420        return clk_core_get_parent_by_index(core, index);
2421}
2422
2423static void clk_core_reparent(struct clk_core *core,
2424                                  struct clk_core *new_parent)
2425{
2426        clk_reparent(core, new_parent);
2427        __clk_recalc_accuracies(core);
2428        __clk_recalc_rates(core, POST_RATE_CHANGE);
2429}
2430
2431void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent)
2432{
2433        if (!hw)
2434                return;
2435
2436        clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core);
2437}
2438
2439/**
2440 * clk_has_parent - check if a clock is a possible parent for another
2441 * @clk: clock source
2442 * @parent: parent clock source
2443 *
2444 * This function can be used in drivers that need to check that a clock can be
2445 * the parent of another without actually changing the parent.
2446 *
2447 * Returns true if @parent is a possible parent for @clk, false otherwise.
2448 */
2449bool clk_has_parent(struct clk *clk, struct clk *parent)
2450{
2451        struct clk_core *core, *parent_core;
2452        int i;
2453
2454        /* NULL clocks should be nops, so return success if either is NULL. */
2455        if (!clk || !parent)
2456                return true;
2457
2458        core = clk->core;
2459        parent_core = parent->core;
2460
2461        /* Optimize for the case where the parent is already the parent. */
2462        if (core->parent == parent_core)
2463                return true;
2464
2465        for (i = 0; i < core->num_parents; i++)
2466                if (!strcmp(core->parents[i].name, parent_core->name))
2467                        return true;
2468
2469        return false;
2470}
2471EXPORT_SYMBOL_GPL(clk_has_parent);
2472
2473static int clk_core_set_parent_nolock(struct clk_core *core,
2474                                      struct clk_core *parent)
2475{
2476        int ret = 0;
2477        int p_index = 0;
2478        unsigned long p_rate = 0;
2479
2480        lockdep_assert_held(&prepare_lock);
2481
2482        if (!core)
2483                return 0;
2484
2485        if (core->parent == parent)
2486                return 0;
2487
2488        /* verify ops for multi-parent clks */
2489        if (core->num_parents > 1 && !core->ops->set_parent)
2490                return -EPERM;
2491
2492        /* check that we are allowed to re-parent if the clock is in use */
2493        if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count)
2494                return -EBUSY;
2495
2496        if (clk_core_rate_is_protected(core))
2497                return -EBUSY;
2498
2499        /* try finding the new parent index */
2500        if (parent) {
2501                p_index = clk_fetch_parent_index(core, parent);
2502                if (p_index < 0) {
2503                        pr_debug("%s: clk %s can not be parent of clk %s\n",
2504                                        __func__, parent->name, core->name);
2505                        return p_index;
2506                }
2507                p_rate = parent->rate;
2508        }
2509
2510        ret = clk_pm_runtime_get(core);
2511        if (ret)
2512                return ret;
2513
2514        /* propagate PRE_RATE_CHANGE notifications */
2515        ret = __clk_speculate_rates(core, p_rate);
2516
2517        /* abort if a driver objects */
2518        if (ret & NOTIFY_STOP_MASK)
2519                goto runtime_put;
2520
2521        /* do the re-parent */
2522        ret = __clk_set_parent(core, parent, p_index);
2523
2524        /* propagate rate an accuracy recalculation accordingly */
2525        if (ret) {
2526                __clk_recalc_rates(core, ABORT_RATE_CHANGE);
2527        } else {
2528                __clk_recalc_rates(core, POST_RATE_CHANGE);
2529                __clk_recalc_accuracies(core);
2530        }
2531
2532runtime_put:
2533        clk_pm_runtime_put(core);
2534
2535        return ret;
2536}
2537
2538int clk_hw_set_parent(struct clk_hw *hw, struct clk_hw *parent)
2539{
2540        return clk_core_set_parent_nolock(hw->core, parent->core);
2541}
2542EXPORT_SYMBOL_GPL(clk_hw_set_parent);
2543
2544/**
2545 * clk_set_parent - switch the parent of a mux clk
2546 * @clk: the mux clk whose input we are switching
2547 * @parent: the new input to clk
2548 *
2549 * Re-parent clk to use parent as its new input source.  If clk is in
2550 * prepared state, the clk will get enabled for the duration of this call. If
2551 * that's not acceptable for a specific clk (Eg: the consumer can't handle
2552 * that, the reparenting is glitchy in hardware, etc), use the
2553 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
2554 *
2555 * After successfully changing clk's parent clk_set_parent will update the
2556 * clk topology, sysfs topology and propagate rate recalculation via
2557 * __clk_recalc_rates.
2558 *
2559 * Returns 0 on success, -EERROR otherwise.
2560 */
2561int clk_set_parent(struct clk *clk, struct clk *parent)
2562{
2563        int ret;
2564
2565        if (!clk)
2566                return 0;
2567
2568        clk_prepare_lock();
2569
2570        if (clk->exclusive_count)
2571                clk_core_rate_unprotect(clk->core);
2572
2573        ret = clk_core_set_parent_nolock(clk->core,
2574                                         parent ? parent->core : NULL);
2575
2576        if (clk->exclusive_count)
2577                clk_core_rate_protect(clk->core);
2578
2579        clk_prepare_unlock();
2580
2581        return ret;
2582}
2583EXPORT_SYMBOL_GPL(clk_set_parent);
2584
2585static int clk_core_set_phase_nolock(struct clk_core *core, int degrees)
2586{
2587        int ret = -EINVAL;
2588
2589        lockdep_assert_held(&prepare_lock);
2590
2591        if (!core)
2592                return 0;
2593
2594        if (clk_core_rate_is_protected(core))
2595                return -EBUSY;
2596
2597        trace_clk_set_phase(core, degrees);
2598
2599        if (core->ops->set_phase) {
2600                ret = core->ops->set_phase(core->hw, degrees);
2601                if (!ret)
2602                        core->phase = degrees;
2603        }
2604
2605        trace_clk_set_phase_complete(core, degrees);
2606
2607        return ret;
2608}
2609
2610/**
2611 * clk_set_phase - adjust the phase shift of a clock signal
2612 * @clk: clock signal source
2613 * @degrees: number of degrees the signal is shifted
2614 *
2615 * Shifts the phase of a clock signal by the specified
2616 * degrees. Returns 0 on success, -EERROR otherwise.
2617 *
2618 * This function makes no distinction about the input or reference
2619 * signal that we adjust the clock signal phase against. For example
2620 * phase locked-loop clock signal generators we may shift phase with
2621 * respect to feedback clock signal input, but for other cases the
2622 * clock phase may be shifted with respect to some other, unspecified
2623 * signal.
2624 *
2625 * Additionally the concept of phase shift does not propagate through
2626 * the clock tree hierarchy, which sets it apart from clock rates and
2627 * clock accuracy. A parent clock phase attribute does not have an
2628 * impact on the phase attribute of a child clock.
2629 */
2630int clk_set_phase(struct clk *clk, int degrees)
2631{
2632        int ret;
2633
2634        if (!clk)
2635                return 0;
2636
2637        /* sanity check degrees */
2638        degrees %= 360;
2639        if (degrees < 0)
2640                degrees += 360;
2641
2642        clk_prepare_lock();
2643
2644        if (clk->exclusive_count)
2645                clk_core_rate_unprotect(clk->core);
2646
2647        ret = clk_core_set_phase_nolock(clk->core, degrees);
2648
2649        if (clk->exclusive_count)
2650                clk_core_rate_protect(clk->core);
2651
2652        clk_prepare_unlock();
2653
2654        return ret;
2655}
2656EXPORT_SYMBOL_GPL(clk_set_phase);
2657
2658static int clk_core_get_phase(struct clk_core *core)
2659{
2660        int ret;
2661
2662        lockdep_assert_held(&prepare_lock);
2663        if (!core->ops->get_phase)
2664                return 0;
2665
2666        /* Always try to update cached phase if possible */
2667        ret = core->ops->get_phase(core->hw);
2668        if (ret >= 0)
2669                core->phase = ret;
2670
2671        return ret;
2672}
2673
2674/**
2675 * clk_get_phase - return the phase shift of a clock signal
2676 * @clk: clock signal source
2677 *
2678 * Returns the phase shift of a clock node in degrees, otherwise returns
2679 * -EERROR.
2680 */
2681int clk_get_phase(struct clk *clk)
2682{
2683        int ret;
2684
2685        if (!clk)
2686                return 0;
2687
2688        clk_prepare_lock();
2689        ret = clk_core_get_phase(clk->core);
2690        clk_prepare_unlock();
2691
2692        return ret;
2693}
2694EXPORT_SYMBOL_GPL(clk_get_phase);
2695
2696static void clk_core_reset_duty_cycle_nolock(struct clk_core *core)
2697{
2698        /* Assume a default value of 50% */
2699        core->duty.num = 1;
2700        core->duty.den = 2;
2701}
2702
2703static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core);
2704
2705static int clk_core_update_duty_cycle_nolock(struct clk_core *core)
2706{
2707        struct clk_duty *duty = &core->duty;
2708        int ret = 0;
2709
2710        if (!core->ops->get_duty_cycle)
2711                return clk_core_update_duty_cycle_parent_nolock(core);
2712
2713        ret = core->ops->get_duty_cycle(core->hw, duty);
2714        if (ret)
2715                goto reset;
2716
2717        /* Don't trust the clock provider too much */
2718        if (duty->den == 0 || duty->num > duty->den) {
2719                ret = -EINVAL;
2720                goto reset;
2721        }
2722
2723        return 0;
2724
2725reset:
2726        clk_core_reset_duty_cycle_nolock(core);
2727        return ret;
2728}
2729
2730static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core)
2731{
2732        int ret = 0;
2733
2734        if (core->parent &&
2735            core->flags & CLK_DUTY_CYCLE_PARENT) {
2736                ret = clk_core_update_duty_cycle_nolock(core->parent);
2737                memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
2738        } else {
2739                clk_core_reset_duty_cycle_nolock(core);
2740        }
2741
2742        return ret;
2743}
2744
2745static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
2746                                                 struct clk_duty *duty);
2747
2748static int clk_core_set_duty_cycle_nolock(struct clk_core *core,
2749                                          struct clk_duty *duty)
2750{
2751        int ret;
2752
2753        lockdep_assert_held(&prepare_lock);
2754
2755        if (clk_core_rate_is_protected(core))
2756                return -EBUSY;
2757
2758        trace_clk_set_duty_cycle(core, duty);
2759
2760        if (!core->ops->set_duty_cycle)
2761                return clk_core_set_duty_cycle_parent_nolock(core, duty);
2762
2763        ret = core->ops->set_duty_cycle(core->hw, duty);
2764        if (!ret)
2765                memcpy(&core->duty, duty, sizeof(*duty));
2766
2767        trace_clk_set_duty_cycle_complete(core, duty);
2768
2769        return ret;
2770}
2771
2772static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
2773                                                 struct clk_duty *duty)
2774{
2775        int ret = 0;
2776
2777        if (core->parent &&
2778            core->flags & (CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)) {
2779                ret = clk_core_set_duty_cycle_nolock(core->parent, duty);
2780                memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
2781        }
2782
2783        return ret;
2784}
2785
2786/**
2787 * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal
2788 * @clk: clock signal source
2789 * @num: numerator of the duty cycle ratio to be applied
2790 * @den: denominator of the duty cycle ratio to be applied
2791 *
2792 * Apply the duty cycle ratio if the ratio is valid and the clock can
2793 * perform this operation
2794 *
2795 * Returns (0) on success, a negative errno otherwise.
2796 */
2797int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den)
2798{
2799        int ret;
2800        struct clk_duty duty;
2801
2802        if (!clk)
2803                return 0;
2804
2805        /* sanity check the ratio */
2806        if (den == 0 || num > den)
2807                return -EINVAL;
2808
2809        duty.num = num;
2810        duty.den = den;
2811
2812        clk_prepare_lock();
2813
2814        if (clk->exclusive_count)
2815                clk_core_rate_unprotect(clk->core);
2816
2817        ret = clk_core_set_duty_cycle_nolock(clk->core, &duty);
2818
2819        if (clk->exclusive_count)
2820                clk_core_rate_protect(clk->core);
2821
2822        clk_prepare_unlock();
2823
2824        return ret;
2825}
2826EXPORT_SYMBOL_GPL(clk_set_duty_cycle);
2827
2828static int clk_core_get_scaled_duty_cycle(struct clk_core *core,
2829                                          unsigned int scale)
2830{
2831        struct clk_duty *duty = &core->duty;
2832        int ret;
2833
2834        clk_prepare_lock();
2835
2836        ret = clk_core_update_duty_cycle_nolock(core);
2837        if (!ret)
2838                ret = mult_frac(scale, duty->num, duty->den);
2839
2840        clk_prepare_unlock();
2841
2842        return ret;
2843}
2844
2845/**
2846 * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal
2847 * @clk: clock signal source
2848 * @scale: scaling factor to be applied to represent the ratio as an integer
2849 *
2850 * Returns the duty cycle ratio of a clock node multiplied by the provided
2851 * scaling factor, or negative errno on error.
2852 */
2853int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale)
2854{
2855        if (!clk)
2856                return 0;
2857
2858        return clk_core_get_scaled_duty_cycle(clk->core, scale);
2859}
2860EXPORT_SYMBOL_GPL(clk_get_scaled_duty_cycle);
2861
2862/**
2863 * clk_is_match - check if two clk's point to the same hardware clock
2864 * @p: clk compared against q
2865 * @q: clk compared against p
2866 *
2867 * Returns true if the two struct clk pointers both point to the same hardware
2868 * clock node. Put differently, returns true if struct clk *p and struct clk *q
2869 * share the same struct clk_core object.
2870 *
2871 * Returns false otherwise. Note that two NULL clks are treated as matching.
2872 */
2873bool clk_is_match(const struct clk *p, const struct clk *q)
2874{
2875        /* trivial case: identical struct clk's or both NULL */
2876        if (p == q)
2877                return true;
2878
2879        /* true if clk->core pointers match. Avoid dereferencing garbage */
2880        if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q))
2881                if (p->core == q->core)
2882                        return true;
2883
2884        return false;
2885}
2886EXPORT_SYMBOL_GPL(clk_is_match);
2887
2888/***        debugfs support        ***/
2889
2890#ifdef CONFIG_DEBUG_FS
2891#include <linux/debugfs.h>
2892
2893static struct dentry *rootdir;
2894static int inited = 0;
2895static DEFINE_MUTEX(clk_debug_lock);
2896static HLIST_HEAD(clk_debug_list);
2897
2898static struct hlist_head *orphan_list[] = {
2899        &clk_orphan_list,
2900        NULL,
2901};
2902
2903static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
2904                                 int level)
2905{
2906        int phase;
2907
2908        seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu ",
2909                   level * 3 + 1, "",
2910                   30 - level * 3, c->name,
2911                   c->enable_count, c->prepare_count, c->protect_count,
2912                   clk_core_get_rate_recalc(c),
2913                   clk_core_get_accuracy_recalc(c));
2914
2915        phase = clk_core_get_phase(c);
2916        if (phase >= 0)
2917                seq_printf(s, "%5d", phase);
2918        else
2919                seq_puts(s, "-----");
2920
2921        seq_printf(s, " %6d\n", clk_core_get_scaled_duty_cycle(c, 100000));
2922}
2923
2924static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
2925                                     int level)
2926{
2927        struct clk_core *child;
2928
2929        clk_summary_show_one(s, c, level);
2930
2931        hlist_for_each_entry(child, &c->children, child_node)
2932                clk_summary_show_subtree(s, child, level + 1);
2933}
2934
2935static int clk_summary_show(struct seq_file *s, void *data)
2936{
2937        struct clk_core *c;
2938        struct hlist_head **lists = (struct hlist_head **)s->private;
2939
2940        seq_puts(s, "                                 enable  prepare  protect                                duty\n");
2941        seq_puts(s, "   clock                          count    count    count        rate   accuracy phase  cycle\n");
2942        seq_puts(s, "---------------------------------------------------------------------------------------------\n");
2943
2944        clk_prepare_lock();
2945
2946        for (; *lists; lists++)
2947                hlist_for_each_entry(c, *lists, child_node)
2948                        clk_summary_show_subtree(s, c, 0);
2949
2950        clk_prepare_unlock();
2951
2952        return 0;
2953}
2954DEFINE_SHOW_ATTRIBUTE(clk_summary);
2955
2956static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
2957{
2958        int phase;
2959        unsigned long min_rate, max_rate;
2960
2961        clk_core_get_boundaries(c, &min_rate, &max_rate);
2962
2963        /* This should be JSON format, i.e. elements separated with a comma */
2964        seq_printf(s, "\"%s\": { ", c->name);
2965        seq_printf(s, "\"enable_count\": %d,", c->enable_count);
2966        seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
2967        seq_printf(s, "\"protect_count\": %d,", c->protect_count);
2968        seq_printf(s, "\"rate\": %lu,", clk_core_get_rate_recalc(c));
2969        seq_printf(s, "\"min_rate\": %lu,", min_rate);
2970        seq_printf(s, "\"max_rate\": %lu,", max_rate);
2971        seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy_recalc(c));
2972        phase = clk_core_get_phase(c);
2973        if (phase >= 0)
2974                seq_printf(s, "\"phase\": %d,", phase);
2975        seq_printf(s, "\"duty_cycle\": %u",
2976                   clk_core_get_scaled_duty_cycle(c, 100000));
2977}
2978
2979static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
2980{
2981        struct clk_core *child;
2982
2983        clk_dump_one(s, c, level);
2984
2985        hlist_for_each_entry(child, &c->children, child_node) {
2986                seq_putc(s, ',');
2987                clk_dump_subtree(s, child, level + 1);
2988        }
2989
2990        seq_putc(s, '}');
2991}
2992
2993static int clk_dump_show(struct seq_file *s, void *data)
2994{
2995        struct clk_core *c;
2996        bool first_node = true;
2997        struct hlist_head **lists = (struct hlist_head **)s->private;
2998
2999        seq_putc(s, '{');
3000        clk_prepare_lock();
3001
3002        for (; *lists; lists++) {
3003                hlist_for_each_entry(c, *lists, child_node) {
3004                        if (!first_node)
3005                                seq_putc(s, ',');
3006                        first_node = false;
3007                        clk_dump_subtree(s, c, 0);
3008                }
3009        }
3010
3011        clk_prepare_unlock();
3012
3013        seq_puts(s, "}\n");
3014        return 0;
3015}
3016DEFINE_SHOW_ATTRIBUTE(clk_dump);
3017
3018#undef CLOCK_ALLOW_WRITE_DEBUGFS
3019#ifdef CLOCK_ALLOW_WRITE_DEBUGFS
3020/*
3021 * This can be dangerous, therefore don't provide any real compile time
3022 * configuration option for this feature.
3023 * People who want to use this will need to modify the source code directly.
3024 */
3025static int clk_rate_set(void *data, u64 val)
3026{
3027        struct clk_core *core = data;
3028        int ret;
3029
3030        clk_prepare_lock();
3031        ret = clk_core_set_rate_nolock(core, val);
3032        clk_prepare_unlock();
3033
3034        return ret;
3035}
3036
3037#define clk_rate_mode   0644
3038#else
3039#define clk_rate_set    NULL
3040#define clk_rate_mode   0444
3041#endif
3042
3043static int clk_rate_get(void *data, u64 *val)
3044{
3045        struct clk_core *core = data;
3046
3047        *val = core->rate;
3048        return 0;
3049}
3050
3051DEFINE_DEBUGFS_ATTRIBUTE(clk_rate_fops, clk_rate_get, clk_rate_set, "%llu\n");
3052
3053static const struct {
3054        unsigned long flag;
3055        const char *name;
3056} clk_flags[] = {
3057#define ENTRY(f) { f, #f }
3058        ENTRY(CLK_SET_RATE_GATE),
3059        ENTRY(CLK_SET_PARENT_GATE),
3060        ENTRY(CLK_SET_RATE_PARENT),
3061        ENTRY(CLK_IGNORE_UNUSED),
3062        ENTRY(CLK_GET_RATE_NOCACHE),
3063        ENTRY(CLK_SET_RATE_NO_REPARENT),
3064        ENTRY(CLK_GET_ACCURACY_NOCACHE),
3065        ENTRY(CLK_RECALC_NEW_RATES),
3066        ENTRY(CLK_SET_RATE_UNGATE),
3067        ENTRY(CLK_IS_CRITICAL),
3068        ENTRY(CLK_OPS_PARENT_ENABLE),
3069        ENTRY(CLK_DUTY_CYCLE_PARENT),
3070#undef ENTRY
3071};
3072
3073static int clk_flags_show(struct seq_file *s, void *data)
3074{
3075        struct clk_core *core = s->private;
3076        unsigned long flags = core->flags;
3077        unsigned int i;
3078
3079        for (i = 0; flags && i < ARRAY_SIZE(clk_flags); i++) {
3080                if (flags & clk_flags[i].flag) {
3081                        seq_printf(s, "%s\n", clk_flags[i].name);
3082                        flags &= ~clk_flags[i].flag;
3083                }
3084        }
3085        if (flags) {
3086                /* Unknown flags */
3087                seq_printf(s, "0x%lx\n", flags);
3088        }
3089
3090        return 0;
3091}
3092DEFINE_SHOW_ATTRIBUTE(clk_flags);
3093
3094static void possible_parent_show(struct seq_file *s, struct clk_core *core,
3095                                 unsigned int i, char terminator)
3096{
3097        struct clk_core *parent;
3098
3099        /*
3100         * Go through the following options to fetch a parent's name.
3101         *
3102         * 1. Fetch the registered parent clock and use its name
3103         * 2. Use the global (fallback) name if specified
3104         * 3. Use the local fw_name if provided
3105         * 4. Fetch parent clock's clock-output-name if DT index was set
3106         *
3107         * This may still fail in some cases, such as when the parent is
3108         * specified directly via a struct clk_hw pointer, but it isn't
3109         * registered (yet).
3110         */
3111        parent = clk_core_get_parent_by_index(core, i);
3112        if (parent)
3113                seq_puts(s, parent->name);
3114        else if (core->parents[i].name)
3115                seq_puts(s, core->parents[i].name);
3116        else if (core->parents[i].fw_name)
3117                seq_printf(s, "<%s>(fw)", core->parents[i].fw_name);
3118        else if (core->parents[i].index >= 0)
3119                seq_puts(s,
3120                         of_clk_get_parent_name(core->of_node,
3121                                                core->parents[i].index));
3122        else
3123                seq_puts(s, "(missing)");
3124
3125        seq_putc(s, terminator);
3126}
3127
3128static int possible_parents_show(struct seq_file *s, void *data)
3129{
3130        struct clk_core *core = s->private;
3131        int i;
3132
3133        for (i = 0; i < core->num_parents - 1; i++)
3134                possible_parent_show(s, core, i, ' ');
3135
3136        possible_parent_show(s, core, i, '\n');
3137
3138        return 0;
3139}
3140DEFINE_SHOW_ATTRIBUTE(possible_parents);
3141
3142static int current_parent_show(struct seq_file *s, void *data)
3143{
3144        struct clk_core *core = s->private;
3145
3146        if (core->parent)
3147                seq_printf(s, "%s\n", core->parent->name);
3148
3149        return 0;
3150}
3151DEFINE_SHOW_ATTRIBUTE(current_parent);
3152
3153static int clk_duty_cycle_show(struct seq_file *s, void *data)
3154{
3155        struct clk_core *core = s->private;
3156        struct clk_duty *duty = &core->duty;
3157
3158        seq_printf(s, "%u/%u\n", duty->num, duty->den);
3159
3160        return 0;
3161}
3162DEFINE_SHOW_ATTRIBUTE(clk_duty_cycle);
3163
3164static int clk_min_rate_show(struct seq_file *s, void *data)
3165{
3166        struct clk_core *core = s->private;
3167        unsigned long min_rate, max_rate;
3168
3169        clk_prepare_lock();
3170        clk_core_get_boundaries(core, &min_rate, &max_rate);
3171        clk_prepare_unlock();
3172        seq_printf(s, "%lu\n", min_rate);
3173
3174        return 0;
3175}
3176DEFINE_SHOW_ATTRIBUTE(clk_min_rate);
3177
3178static int clk_max_rate_show(struct seq_file *s, void *data)
3179{
3180        struct clk_core *core = s->private;
3181        unsigned long min_rate, max_rate;
3182
3183        clk_prepare_lock();
3184        clk_core_get_boundaries(core, &min_rate, &max_rate);
3185        clk_prepare_unlock();
3186        seq_printf(s, "%lu\n", max_rate);
3187
3188        return 0;
3189}
3190DEFINE_SHOW_ATTRIBUTE(clk_max_rate);
3191
3192static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
3193{
3194        struct dentry *root;
3195
3196        if (!core || !pdentry)
3197                return;
3198
3199        root = debugfs_create_dir(core->name, pdentry);
3200        core->dentry = root;
3201
3202        debugfs_create_file("clk_rate", clk_rate_mode, root, core,
3203                            &clk_rate_fops);
3204        debugfs_create_file("clk_min_rate", 0444, root, core, &clk_min_rate_fops);
3205        debugfs_create_file("clk_max_rate", 0444, root, core, &clk_max_rate_fops);
3206        debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy);
3207        debugfs_create_u32("clk_phase", 0444, root, &core->phase);
3208        debugfs_create_file("clk_flags", 0444, root, core, &clk_flags_fops);
3209        debugfs_create_u32("clk_prepare_count", 0444, root, &core->prepare_count);
3210        debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count);
3211        debugfs_create_u32("clk_protect_count", 0444, root, &core->protect_count);
3212        debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count);
3213        debugfs_create_file("clk_duty_cycle", 0444, root, core,
3214                            &clk_duty_cycle_fops);
3215
3216        if (core->num_parents > 0)
3217                debugfs_create_file("clk_parent", 0444, root, core,
3218                                    &current_parent_fops);
3219
3220        if (core->num_parents > 1)
3221                debugfs_create_file("clk_possible_parents", 0444, root, core,
3222                                    &possible_parents_fops);
3223
3224        if (core->ops->debug_init)
3225                core->ops->debug_init(core->hw, core->dentry);
3226}
3227
3228/**
3229 * clk_debug_register - add a clk node to the debugfs clk directory
3230 * @core: the clk being added to the debugfs clk directory
3231 *
3232 * Dynamically adds a clk to the debugfs clk directory if debugfs has been
3233 * initialized.  Otherwise it bails out early since the debugfs clk directory
3234 * will be created lazily by clk_debug_init as part of a late_initcall.
3235 */
3236static void clk_debug_register(struct clk_core *core)
3237{
3238        mutex_lock(&clk_debug_lock);
3239        hlist_add_head(&core->debug_node, &clk_debug_list);
3240        if (inited)
3241                clk_debug_create_one(core, rootdir);
3242        mutex_unlock(&clk_debug_lock);
3243}
3244
3245 /**
3246 * clk_debug_unregister - remove a clk node from the debugfs clk directory
3247 * @core: the clk being removed from the debugfs clk directory
3248 *
3249 * Dynamically removes a clk and all its child nodes from the
3250 * debugfs clk directory if clk->dentry points to debugfs created by
3251 * clk_debug_register in __clk_core_init.
3252 */
3253static void clk_debug_unregister(struct clk_core *core)
3254{
3255        mutex_lock(&clk_debug_lock);
3256        hlist_del_init(&core->debug_node);
3257        debugfs_remove_recursive(core->dentry);
3258        core->dentry = NULL;
3259        mutex_unlock(&clk_debug_lock);
3260}
3261
3262/**
3263 * clk_debug_init - lazily populate the debugfs clk directory
3264 *
3265 * clks are often initialized very early during boot before memory can be
3266 * dynamically allocated and well before debugfs is setup. This function
3267 * populates the debugfs clk directory once at boot-time when we know that
3268 * debugfs is setup. It should only be called once at boot-time, all other clks
3269 * added dynamically will be done so with clk_debug_register.
3270 */
3271static int __init clk_debug_init(void)
3272{
3273        struct clk_core *core;
3274
3275        rootdir = debugfs_create_dir("clk", NULL);
3276
3277        debugfs_create_file("clk_summary", 0444, rootdir, &all_lists,
3278                            &clk_summary_fops);
3279        debugfs_create_file("clk_dump", 0444, rootdir, &all_lists,
3280                            &clk_dump_fops);
3281        debugfs_create_file("clk_orphan_summary", 0444, rootdir, &orphan_list,
3282                            &clk_summary_fops);
3283        debugfs_create_file("clk_orphan_dump", 0444, rootdir, &orphan_list,
3284                            &clk_dump_fops);
3285
3286        mutex_lock(&clk_debug_lock);
3287        hlist_for_each_entry(core, &clk_debug_list, debug_node)
3288                clk_debug_create_one(core, rootdir);
3289
3290        inited = 1;
3291        mutex_unlock(&clk_debug_lock);
3292
3293        return 0;
3294}
3295late_initcall(clk_debug_init);
3296#else
3297static inline void clk_debug_register(struct clk_core *core) { }
3298static inline void clk_debug_reparent(struct clk_core *core,
3299                                      struct clk_core *new_parent)
3300{
3301}
3302static inline void clk_debug_unregister(struct clk_core *core)
3303{
3304}
3305#endif
3306
3307static void clk_core_reparent_orphans_nolock(void)
3308{
3309        struct clk_core *orphan;
3310        struct hlist_node *tmp2;
3311
3312        /*
3313         * walk the list of orphan clocks and reparent any that newly finds a
3314         * parent.
3315         */
3316        hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
3317                struct clk_core *parent = __clk_init_parent(orphan);
3318
3319                /*
3320                 * We need to use __clk_set_parent_before() and _after() to
3321                 * to properly migrate any prepare/enable count of the orphan
3322                 * clock. This is important for CLK_IS_CRITICAL clocks, which
3323                 * are enabled during init but might not have a parent yet.
3324                 */
3325                if (parent) {
3326                        /* update the clk tree topology */
3327                        __clk_set_parent_before(orphan, parent);
3328                        __clk_set_parent_after(orphan, parent, NULL);
3329                        __clk_recalc_accuracies(orphan);
3330                        __clk_recalc_rates(orphan, 0);
3331                }
3332        }
3333}
3334
3335/**
3336 * __clk_core_init - initialize the data structures in a struct clk_core
3337 * @core:       clk_core being initialized
3338 *
3339 * Initializes the lists in struct clk_core, queries the hardware for the
3340 * parent and rate and sets them both.
3341 */
3342static int __clk_core_init(struct clk_core *core)
3343{
3344        int ret;
3345        struct clk_core *parent;
3346        unsigned long rate;
3347        int phase;
3348
3349        if (!core)
3350                return -EINVAL;
3351
3352        clk_prepare_lock();
3353
3354        ret = clk_pm_runtime_get(core);
3355        if (ret)
3356                goto unlock;
3357
3358        /* check to see if a clock with this name is already registered */
3359        if (clk_core_lookup(core->name)) {
3360                pr_debug("%s: clk %s already initialized\n",
3361                                __func__, core->name);
3362                ret = -EEXIST;
3363                goto out;
3364        }
3365
3366        /* check that clk_ops are sane.  See Documentation/driver-api/clk.rst */
3367        if (core->ops->set_rate &&
3368            !((core->ops->round_rate || core->ops->determine_rate) &&
3369              core->ops->recalc_rate)) {
3370                pr_err("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
3371                       __func__, core->name);
3372                ret = -EINVAL;
3373                goto out;
3374        }
3375
3376        if (core->ops->set_parent && !core->ops->get_parent) {
3377                pr_err("%s: %s must implement .get_parent & .set_parent\n",
3378                       __func__, core->name);
3379                ret = -EINVAL;
3380                goto out;
3381        }
3382
3383        if (core->num_parents > 1 && !core->ops->get_parent) {
3384                pr_err("%s: %s must implement .get_parent as it has multi parents\n",
3385                       __func__, core->name);
3386                ret = -EINVAL;
3387                goto out;
3388        }
3389
3390        if (core->ops->set_rate_and_parent &&
3391                        !(core->ops->set_parent && core->ops->set_rate)) {
3392                pr_err("%s: %s must implement .set_parent & .set_rate\n",
3393                                __func__, core->name);
3394                ret = -EINVAL;
3395                goto out;
3396        }
3397
3398        /*
3399         * optional platform-specific magic
3400         *
3401         * The .init callback is not used by any of the basic clock types, but
3402         * exists for weird hardware that must perform initialization magic for
3403         * CCF to get an accurate view of clock for any other callbacks. It may
3404         * also be used needs to perform dynamic allocations. Such allocation
3405         * must be freed in the terminate() callback.
3406         * This callback shall not be used to initialize the parameters state,
3407         * such as rate, parent, etc ...
3408         *
3409         * If it exist, this callback should called before any other callback of
3410         * the clock
3411         */
3412        if (core->ops->init) {
3413                ret = core->ops->init(core->hw);
3414                if (ret)
3415                        goto out;
3416        }
3417
3418        parent = core->parent = __clk_init_parent(core);
3419
3420        /*
3421         * Populate core->parent if parent has already been clk_core_init'd. If
3422         * parent has not yet been clk_core_init'd then place clk in the orphan
3423         * list.  If clk doesn't have any parents then place it in the root
3424         * clk list.
3425         *
3426         * Every time a new clk is clk_init'd then we walk the list of orphan
3427         * clocks and re-parent any that are children of the clock currently
3428         * being clk_init'd.
3429         */
3430        if (parent) {
3431                hlist_add_head(&core->child_node, &parent->children);
3432                core->orphan = parent->orphan;
3433        } else if (!core->num_parents) {
3434                hlist_add_head(&core->child_node, &clk_root_list);
3435                core->orphan = false;
3436        } else {
3437                hlist_add_head(&core->child_node, &clk_orphan_list);
3438                core->orphan = true;
3439        }
3440
3441        /*
3442         * Set clk's accuracy.  The preferred method is to use
3443         * .recalc_accuracy. For simple clocks and lazy developers the default
3444         * fallback is to use the parent's accuracy.  If a clock doesn't have a
3445         * parent (or is orphaned) then accuracy is set to zero (perfect
3446         * clock).
3447         */
3448        if (core->ops->recalc_accuracy)
3449                core->accuracy = core->ops->recalc_accuracy(core->hw,
3450                                        clk_core_get_accuracy_no_lock(parent));
3451        else if (parent)
3452                core->accuracy = parent->accuracy;
3453        else
3454                core->accuracy = 0;
3455
3456        /*
3457         * Set clk's phase by clk_core_get_phase() caching the phase.
3458         * Since a phase is by definition relative to its parent, just
3459         * query the current clock phase, or just assume it's in phase.
3460         */
3461        phase = clk_core_get_phase(core);
3462        if (phase < 0) {
3463                ret = phase;
3464                pr_warn("%s: Failed to get phase for clk '%s'\n", __func__,
3465                        core->name);
3466                goto out;
3467        }
3468
3469        /*
3470         * Set clk's duty cycle.
3471         */
3472        clk_core_update_duty_cycle_nolock(core);
3473
3474        /*
3475         * Set clk's rate.  The preferred method is to use .recalc_rate.  For
3476         * simple clocks and lazy developers the default fallback is to use the
3477         * parent's rate.  If a clock doesn't have a parent (or is orphaned)
3478         * then rate is set to zero.
3479         */
3480        if (core->ops->recalc_rate)
3481                rate = core->ops->recalc_rate(core->hw,
3482                                clk_core_get_rate_nolock(parent));
3483        else if (parent)
3484                rate = parent->rate;
3485        else
3486                rate = 0;
3487        core->rate = core->req_rate = rate;
3488
3489        /*
3490         * Enable CLK_IS_CRITICAL clocks so newly added critical clocks
3491         * don't get accidentally disabled when walking the orphan tree and
3492         * reparenting clocks
3493         */
3494        if (core->flags & CLK_IS_CRITICAL) {
3495                unsigned long flags;
3496
3497                ret = clk_core_prepare(core);
3498                if (ret) {
3499                        pr_warn("%s: critical clk '%s' failed to prepare\n",
3500                               __func__, core->name);
3501                        goto out;
3502                }
3503
3504                flags = clk_enable_lock();
3505                ret = clk_core_enable(core);
3506                clk_enable_unlock(flags);
3507                if (ret) {
3508                        pr_warn("%s: critical clk '%s' failed to enable\n",
3509                               __func__, core->name);
3510                        clk_core_unprepare(core);
3511                        goto out;
3512                }
3513        }
3514
3515        clk_core_reparent_orphans_nolock();
3516
3517
3518        kref_init(&core->ref);
3519out:
3520        clk_pm_runtime_put(core);
3521unlock:
3522        if (ret)
3523                hlist_del_init(&core->child_node);
3524
3525        clk_prepare_unlock();
3526
3527        if (!ret)
3528                clk_debug_register(core);
3529
3530        return ret;
3531}
3532
3533/**
3534 * clk_core_link_consumer - Add a clk consumer to the list of consumers in a clk_core
3535 * @core: clk to add consumer to
3536 * @clk: consumer to link to a clk
3537 */
3538static void clk_core_link_consumer(struct clk_core *core, struct clk *clk)
3539{
3540        clk_prepare_lock();
3541        hlist_add_head(&clk->clks_node, &core->clks);
3542        clk_prepare_unlock();
3543}
3544
3545/**
3546 * clk_core_unlink_consumer - Remove a clk consumer from the list of consumers in a clk_core
3547 * @clk: consumer to unlink
3548 */
3549static void clk_core_unlink_consumer(struct clk *clk)
3550{
3551        lockdep_assert_held(&prepare_lock);
3552        hlist_del(&clk->clks_node);
3553}
3554
3555/**
3556 * alloc_clk - Allocate a clk consumer, but leave it unlinked to the clk_core
3557 * @core: clk to allocate a consumer for
3558 * @dev_id: string describing device name
3559 * @con_id: connection ID string on device
3560 *
3561 * Returns: clk consumer left unlinked from the consumer list
3562 */
3563static struct clk *alloc_clk(struct clk_core *core, const char *dev_id,
3564                             const char *con_id)
3565{
3566        struct clk *clk;
3567
3568        clk = kzalloc(sizeof(*clk), GFP_KERNEL);
3569        if (!clk)
3570                return ERR_PTR(-ENOMEM);
3571
3572        clk->core = core;
3573        clk->dev_id = dev_id;
3574        clk->con_id = kstrdup_const(con_id, GFP_KERNEL);
3575        clk->max_rate = ULONG_MAX;
3576
3577        return clk;
3578}
3579
3580/**
3581 * free_clk - Free a clk consumer
3582 * @clk: clk consumer to free
3583 *
3584 * Note, this assumes the clk has been unlinked from the clk_core consumer
3585 * list.
3586 */
3587static void free_clk(struct clk *clk)
3588{
3589        kfree_const(clk->con_id);
3590        kfree(clk);
3591}
3592
3593/**
3594 * clk_hw_create_clk: Allocate and link a clk consumer to a clk_core given
3595 * a clk_hw
3596 * @dev: clk consumer device
3597 * @hw: clk_hw associated with the clk being consumed
3598 * @dev_id: string describing device name
3599 * @con_id: connection ID string on device
3600 *
3601 * This is the main function used to create a clk pointer for use by clk
3602 * consumers. It connects a consumer to the clk_core and clk_hw structures
3603 * used by the framework and clk provider respectively.
3604 */
3605struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw,
3606                              const char *dev_id, const char *con_id)
3607{
3608        struct clk *clk;
3609        struct clk_core *core;
3610
3611        /* This is to allow this function to be chained to others */
3612        if (IS_ERR_OR_NULL(hw))
3613                return ERR_CAST(hw);
3614
3615        core = hw->core;
3616        clk = alloc_clk(core, dev_id, con_id);
3617        if (IS_ERR(clk))
3618                return clk;
3619        clk->dev = dev;
3620
3621        if (!try_module_get(core->owner)) {
3622                free_clk(clk);
3623                return ERR_PTR(-ENOENT);
3624        }
3625
3626        kref_get(&core->ref);
3627        clk_core_link_consumer(core, clk);
3628
3629        return clk;
3630}
3631
3632static int clk_cpy_name(const char **dst_p, const char *src, bool must_exist)
3633{
3634        const char *dst;
3635
3636        if (!src) {
3637                if (must_exist)
3638                        return -EINVAL;
3639                return 0;
3640        }
3641
3642        *dst_p = dst = kstrdup_const(src, GFP_KERNEL);
3643        if (!dst)
3644                return -ENOMEM;
3645
3646        return 0;
3647}
3648
3649static int clk_core_populate_parent_map(struct clk_core *core,
3650                                        const struct clk_init_data *init)
3651{
3652        u8 num_parents = init->num_parents;
3653        const char * const *parent_names = init->parent_names;
3654        const struct clk_hw **parent_hws = init->parent_hws;
3655        const struct clk_parent_data *parent_data = init->parent_data;
3656        int i, ret = 0;
3657        struct clk_parent_map *parents, *parent;
3658
3659        if (!num_parents)
3660                return 0;
3661
3662        /*
3663         * Avoid unnecessary string look-ups of clk_core's possible parents by
3664         * having a cache of names/clk_hw pointers to clk_core pointers.
3665         */
3666        parents = kcalloc(num_parents, sizeof(*parents), GFP_KERNEL);
3667        core->parents = parents;
3668        if (!parents)
3669                return -ENOMEM;
3670
3671        /* Copy everything over because it might be __initdata */
3672        for (i = 0, parent = parents; i < num_parents; i++, parent++) {
3673                parent->index = -1;
3674                if (parent_names) {
3675                        /* throw a WARN if any entries are NULL */
3676                        WARN(!parent_names[i],
3677                                "%s: invalid NULL in %s's .parent_names\n",
3678                                __func__, core->name);
3679                        ret = clk_cpy_name(&parent->name, parent_names[i],
3680                                           true);
3681                } else if (parent_data) {
3682                        parent->hw = parent_data[i].hw;
3683                        parent->index = parent_data[i].index;
3684                        ret = clk_cpy_name(&parent->fw_name,
3685                                           parent_data[i].fw_name, false);
3686                        if (!ret)
3687                                ret = clk_cpy_name(&parent->name,
3688                                                   parent_data[i].name,
3689                                                   false);
3690                } else if (parent_hws) {
3691                        parent->hw = parent_hws[i];
3692                } else {
3693                        ret = -EINVAL;
3694                        WARN(1, "Must specify parents if num_parents > 0\n");
3695                }
3696
3697                if (ret) {
3698                        do {
3699                                kfree_const(parents[i].name);
3700                                kfree_const(parents[i].fw_name);
3701                        } while (--i >= 0);
3702                        kfree(parents);
3703
3704                        return ret;
3705                }
3706        }
3707
3708        return 0;
3709}
3710
3711static void clk_core_free_parent_map(struct clk_core *core)
3712{
3713        int i = core->num_parents;
3714
3715        if (!core->num_parents)
3716                return;
3717
3718        while (--i >= 0) {
3719                kfree_const(core->parents[i].name);
3720                kfree_const(core->parents[i].fw_name);
3721        }
3722
3723        kfree(core->parents);
3724}
3725
3726static struct clk *
3727__clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
3728{
3729        int ret;
3730        struct clk_core *core;
3731        const struct clk_init_data *init = hw->init;
3732
3733        /*
3734         * The init data is not supposed to be used outside of registration path.
3735         * Set it to NULL so that provider drivers can't use it either and so that
3736         * we catch use of hw->init early on in the core.
3737         */
3738        hw->init = NULL;
3739
3740        core = kzalloc(sizeof(*core), GFP_KERNEL);
3741        if (!core) {
3742                ret = -ENOMEM;
3743                goto fail_out;
3744        }
3745
3746        core->name = kstrdup_const(init->name, GFP_KERNEL);
3747        if (!core->name) {
3748                ret = -ENOMEM;
3749                goto fail_name;
3750        }
3751
3752        if (WARN_ON(!init->ops)) {
3753                ret = -EINVAL;
3754                goto fail_ops;
3755        }
3756        core->ops = init->ops;
3757
3758        if (dev && pm_runtime_enabled(dev))
3759                core->rpm_enabled = true;
3760        core->dev = dev;
3761        core->of_node = np;
3762        if (dev && dev->driver)
3763                core->owner = dev->driver->owner;
3764        core->hw = hw;
3765        core->flags = init->flags;
3766        core->num_parents = init->num_parents;
3767        core->min_rate = 0;
3768        core->max_rate = ULONG_MAX;
3769        hw->core = core;
3770
3771        ret = clk_core_populate_parent_map(core, init);
3772        if (ret)
3773                goto fail_parents;
3774
3775        INIT_HLIST_HEAD(&core->clks);
3776
3777        /*
3778         * Don't call clk_hw_create_clk() here because that would pin the
3779         * provider module to itself and prevent it from ever being removed.
3780         */
3781        hw->clk = alloc_clk(core, NULL, NULL);
3782        if (IS_ERR(hw->clk)) {
3783                ret = PTR_ERR(hw->clk);
3784                goto fail_create_clk;
3785        }
3786
3787        clk_core_link_consumer(hw->core, hw->clk);
3788
3789        ret = __clk_core_init(core);
3790        if (!ret)
3791                return hw->clk;
3792
3793        clk_prepare_lock();
3794        clk_core_unlink_consumer(hw->clk);
3795        clk_prepare_unlock();
3796
3797        free_clk(hw->clk);
3798        hw->clk = NULL;
3799
3800fail_create_clk:
3801        clk_core_free_parent_map(core);
3802fail_parents:
3803fail_ops:
3804        kfree_const(core->name);
3805fail_name:
3806        kfree(core);
3807fail_out:
3808        return ERR_PTR(ret);
3809}
3810
3811/**
3812 * dev_or_parent_of_node() - Get device node of @dev or @dev's parent
3813 * @dev: Device to get device node of
3814 *
3815 * Return: device node pointer of @dev, or the device node pointer of
3816 * @dev->parent if dev doesn't have a device node, or NULL if neither
3817 * @dev or @dev->parent have a device node.
3818 */
3819static struct device_node *dev_or_parent_of_node(struct device *dev)
3820{
3821        struct device_node *np;
3822
3823        if (!dev)
3824                return NULL;
3825
3826        np = dev_of_node(dev);
3827        if (!np)
3828                np = dev_of_node(dev->parent);
3829
3830        return np;
3831}
3832
3833/**
3834 * clk_register - allocate a new clock, register it and return an opaque cookie
3835 * @dev: device that is registering this clock
3836 * @hw: link to hardware-specific clock data
3837 *
3838 * clk_register is the *deprecated* interface for populating the clock tree with
3839 * new clock nodes. Use clk_hw_register() instead.
3840 *
3841 * Returns: a pointer to the newly allocated struct clk which
3842 * cannot be dereferenced by driver code but may be used in conjunction with the
3843 * rest of the clock API.  In the event of an error clk_register will return an
3844 * error code; drivers must test for an error code after calling clk_register.
3845 */
3846struct clk *clk_register(struct device *dev, struct clk_hw *hw)
3847{
3848        return __clk_register(dev, dev_or_parent_of_node(dev), hw);
3849}
3850EXPORT_SYMBOL_GPL(clk_register);
3851
3852/**
3853 * clk_hw_register - register a clk_hw and return an error code
3854 * @dev: device that is registering this clock
3855 * @hw: link to hardware-specific clock data
3856 *
3857 * clk_hw_register is the primary interface for populating the clock tree with
3858 * new clock nodes. It returns an integer equal to zero indicating success or
3859 * less than zero indicating failure. Drivers must test for an error code after
3860 * calling clk_hw_register().
3861 */
3862int clk_hw_register(struct device *dev, struct clk_hw *hw)
3863{
3864        return PTR_ERR_OR_ZERO(__clk_register(dev, dev_or_parent_of_node(dev),
3865                               hw));
3866}
3867EXPORT_SYMBOL_GPL(clk_hw_register);
3868
3869/*
3870 * of_clk_hw_register - register a clk_hw and return an error code
3871 * @node: device_node of device that is registering this clock
3872 * @hw: link to hardware-specific clock data
3873 *
3874 * of_clk_hw_register() is the primary interface for populating the clock tree
3875 * with new clock nodes when a struct device is not available, but a struct
3876 * device_node is. It returns an integer equal to zero indicating success or
3877 * less than zero indicating failure. Drivers must test for an error code after
3878 * calling of_clk_hw_register().
3879 */
3880int of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
3881{
3882        return PTR_ERR_OR_ZERO(__clk_register(NULL, node, hw));
3883}
3884EXPORT_SYMBOL_GPL(of_clk_hw_register);
3885
3886/* Free memory allocated for a clock. */
3887static void __clk_release(struct kref *ref)
3888{
3889        struct clk_core *core = container_of(ref, struct clk_core, ref);
3890
3891        lockdep_assert_held(&prepare_lock);
3892
3893        clk_core_free_parent_map(core);
3894        kfree_const(core->name);
3895        kfree(core);
3896}
3897
3898/*
3899 * Empty clk_ops for unregistered clocks. These are used temporarily
3900 * after clk_unregister() was called on a clock and until last clock
3901 * consumer calls clk_put() and the struct clk object is freed.
3902 */
3903static int clk_nodrv_prepare_enable(struct clk_hw *hw)
3904{
3905        return -ENXIO;
3906}
3907
3908static void clk_nodrv_disable_unprepare(struct clk_hw *hw)
3909{
3910        WARN_ON_ONCE(1);
3911}
3912
3913static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate,
3914                                        unsigned long parent_rate)
3915{
3916        return -ENXIO;
3917}
3918
3919static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
3920{
3921        return -ENXIO;
3922}
3923
3924static const struct clk_ops clk_nodrv_ops = {
3925        .enable         = clk_nodrv_prepare_enable,
3926        .disable        = clk_nodrv_disable_unprepare,
3927        .prepare        = clk_nodrv_prepare_enable,
3928        .unprepare      = clk_nodrv_disable_unprepare,
3929        .set_rate       = clk_nodrv_set_rate,
3930        .set_parent     = clk_nodrv_set_parent,
3931};
3932
3933static void clk_core_evict_parent_cache_subtree(struct clk_core *root,
3934                                                struct clk_core *target)
3935{
3936        int i;
3937        struct clk_core *child;
3938
3939        for (i = 0; i < root->num_parents; i++)
3940                if (root->parents[i].core == target)
3941                        root->parents[i].core = NULL;
3942
3943        hlist_for_each_entry(child, &root->children, child_node)
3944                clk_core_evict_parent_cache_subtree(child, target);
3945}
3946
3947/* Remove this clk from all parent caches */
3948static void clk_core_evict_parent_cache(struct clk_core *core)
3949{
3950        struct hlist_head **lists;
3951        struct clk_core *root;
3952
3953        lockdep_assert_held(&prepare_lock);
3954
3955        for (lists = all_lists; *lists; lists++)
3956                hlist_for_each_entry(root, *lists, child_node)
3957                        clk_core_evict_parent_cache_subtree(root, core);
3958
3959}
3960
3961/**
3962 * clk_unregister - unregister a currently registered clock
3963 * @clk: clock to unregister
3964 */
3965void clk_unregister(struct clk *clk)
3966{
3967        unsigned long flags;
3968        const struct clk_ops *ops;
3969
3970        if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
3971                return;
3972
3973        clk_debug_unregister(clk->core);
3974
3975        clk_prepare_lock();
3976
3977        ops = clk->core->ops;
3978        if (ops == &clk_nodrv_ops) {
3979                pr_err("%s: unregistered clock: %s\n", __func__,
3980                       clk->core->name);
3981                goto unlock;
3982        }
3983        /*
3984         * Assign empty clock ops for consumers that might still hold
3985         * a reference to this clock.
3986         */
3987        flags = clk_enable_lock();
3988        clk->core->ops = &clk_nodrv_ops;
3989        clk_enable_unlock(flags);
3990
3991        if (ops->terminate)
3992                ops->terminate(clk->core->hw);
3993
3994        if (!hlist_empty(&clk->core->children)) {
3995                struct clk_core *child;
3996                struct hlist_node *t;
3997
3998                /* Reparent all children to the orphan list. */
3999                hlist_for_each_entry_safe(child, t, &clk->core->children,
4000                                          child_node)
4001                        clk_core_set_parent_nolock(child, NULL);
4002        }
4003
4004        clk_core_evict_parent_cache(clk->core);
4005
4006        hlist_del_init(&clk->core->child_node);
4007
4008        if (clk->core->prepare_count)
4009                pr_warn("%s: unregistering prepared clock: %s\n",
4010                                        __func__, clk->core->name);
4011
4012        if (clk->core->protect_count)
4013                pr_warn("%s: unregistering protected clock: %s\n",
4014                                        __func__, clk->core->name);
4015
4016        kref_put(&clk->core->ref, __clk_release);
4017        free_clk(clk);
4018unlock:
4019        clk_prepare_unlock();
4020}
4021EXPORT_SYMBOL_GPL(clk_unregister);
4022
4023/**
4024 * clk_hw_unregister - unregister a currently registered clk_hw
4025 * @hw: hardware-specific clock data to unregister
4026 */
4027void clk_hw_unregister(struct clk_hw *hw)
4028{
4029        clk_unregister(hw->clk);
4030}
4031EXPORT_SYMBOL_GPL(clk_hw_unregister);
4032
4033static void devm_clk_release(struct device *dev, void *res)
4034{
4035        clk_unregister(*(struct clk **)res);
4036}
4037
4038static void devm_clk_hw_release(struct device *dev, void *res)
4039{
4040        clk_hw_unregister(*(struct clk_hw **)res);
4041}
4042
4043/**
4044 * devm_clk_register - resource managed clk_register()
4045 * @dev: device that is registering this clock
4046 * @hw: link to hardware-specific clock data
4047 *
4048 * Managed clk_register(). This function is *deprecated*, use devm_clk_hw_register() instead.
4049 *
4050 * Clocks returned from this function are automatically clk_unregister()ed on
4051 * driver detach. See clk_register() for more information.
4052 */
4053struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
4054{
4055        struct clk *clk;
4056        struct clk **clkp;
4057
4058        clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
4059        if (!clkp)
4060                return ERR_PTR(-ENOMEM);
4061
4062        clk = clk_register(dev, hw);
4063        if (!IS_ERR(clk)) {
4064                *clkp = clk;
4065                devres_add(dev, clkp);
4066        } else {
4067                devres_free(clkp);
4068        }
4069
4070        return clk;
4071}
4072EXPORT_SYMBOL_GPL(devm_clk_register);
4073
4074/**
4075 * devm_clk_hw_register - resource managed clk_hw_register()
4076 * @dev: device that is registering this clock
4077 * @hw: link to hardware-specific clock data
4078 *
4079 * Managed clk_hw_register(). Clocks registered by this function are
4080 * automatically clk_hw_unregister()ed on driver detach. See clk_hw_register()
4081 * for more information.
4082 */
4083int devm_clk_hw_register(struct device *dev, struct clk_hw *hw)
4084{
4085        struct clk_hw **hwp;
4086        int ret;
4087
4088        hwp = devres_alloc(devm_clk_hw_release, sizeof(*hwp), GFP_KERNEL);
4089        if (!hwp)
4090                return -ENOMEM;
4091
4092        ret = clk_hw_register(dev, hw);
4093        if (!ret) {
4094                *hwp = hw;
4095                devres_add(dev, hwp);
4096        } else {
4097                devres_free(hwp);
4098        }
4099
4100        return ret;
4101}
4102EXPORT_SYMBOL_GPL(devm_clk_hw_register);
4103
4104static int devm_clk_match(struct device *dev, void *res, void *data)
4105{
4106        struct clk *c = res;
4107        if (WARN_ON(!c))
4108                return 0;
4109        return c == data;
4110}
4111
4112static int devm_clk_hw_match(struct device *dev, void *res, void *data)
4113{
4114        struct clk_hw *hw = res;
4115
4116        if (WARN_ON(!hw))
4117                return 0;
4118        return hw == data;
4119}
4120
4121/**
4122 * devm_clk_unregister - resource managed clk_unregister()
4123 * @clk: clock to unregister
4124 *
4125 * Deallocate a clock allocated with devm_clk_register(). Normally
4126 * this function will not need to be called and the resource management
4127 * code will ensure that the resource is freed.
4128 */
4129void devm_clk_unregister(struct device *dev, struct clk *clk)
4130{
4131        WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
4132}
4133EXPORT_SYMBOL_GPL(devm_clk_unregister);
4134
4135/**
4136 * devm_clk_hw_unregister - resource managed clk_hw_unregister()
4137 * @dev: device that is unregistering the hardware-specific clock data
4138 * @hw: link to hardware-specific clock data
4139 *
4140 * Unregister a clk_hw registered with devm_clk_hw_register(). Normally
4141 * this function will not need to be called and the resource management
4142 * code will ensure that the resource is freed.
4143 */
4144void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw)
4145{
4146        WARN_ON(devres_release(dev, devm_clk_hw_release, devm_clk_hw_match,
4147                                hw));
4148}
4149EXPORT_SYMBOL_GPL(devm_clk_hw_unregister);
4150
4151/*
4152 * clkdev helpers
4153 */
4154
4155void __clk_put(struct clk *clk)
4156{
4157        struct module *owner;
4158
4159        if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
4160                return;
4161
4162        clk_prepare_lock();
4163
4164        /*
4165         * Before calling clk_put, all calls to clk_rate_exclusive_get() from a
4166         * given user should be balanced with calls to clk_rate_exclusive_put()
4167         * and by that same consumer
4168         */
4169        if (WARN_ON(clk->exclusive_count)) {
4170                /* We voiced our concern, let's sanitize the situation */
4171                clk->core->protect_count -= (clk->exclusive_count - 1);
4172                clk_core_rate_unprotect(clk->core);
4173                clk->exclusive_count = 0;
4174        }
4175
4176        hlist_del(&clk->clks_node);
4177        if (clk->min_rate > clk->core->req_rate ||
4178            clk->max_rate < clk->core->req_rate)
4179                clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
4180
4181        owner = clk->core->owner;
4182        kref_put(&clk->core->ref, __clk_release);
4183
4184        clk_prepare_unlock();
4185
4186        module_put(owner);
4187
4188        free_clk(clk);
4189}
4190
4191/***        clk rate change notifiers        ***/
4192
4193/**
4194 * clk_notifier_register - add a clk rate change notifier
4195 * @clk: struct clk * to watch
4196 * @nb: struct notifier_block * with callback info
4197 *
4198 * Request notification when clk's rate changes.  This uses an SRCU
4199 * notifier because we want it to block and notifier unregistrations are
4200 * uncommon.  The callbacks associated with the notifier must not
4201 * re-enter into the clk framework by calling any top-level clk APIs;
4202 * this will cause a nested prepare_lock mutex.
4203 *
4204 * In all notification cases (pre, post and abort rate change) the original
4205 * clock rate is passed to the callback via struct clk_notifier_data.old_rate
4206 * and the new frequency is passed via struct clk_notifier_data.new_rate.
4207 *
4208 * clk_notifier_register() must be called from non-atomic context.
4209 * Returns -EINVAL if called with null arguments, -ENOMEM upon
4210 * allocation failure; otherwise, passes along the return value of
4211 * srcu_notifier_chain_register().
4212 */
4213int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
4214{
4215        struct clk_notifier *cn;
4216        int ret = -ENOMEM;
4217
4218        if (!clk || !nb)
4219                return -EINVAL;
4220
4221        clk_prepare_lock();
4222
4223        /* search the list of notifiers for this clk */
4224        list_for_each_entry(cn, &clk_notifier_list, node)
4225                if (cn->clk == clk)
4226                        break;
4227
4228        /* if clk wasn't in the notifier list, allocate new clk_notifier */
4229        if (cn->clk != clk) {
4230                cn = kzalloc(sizeof(*cn), GFP_KERNEL);
4231                if (!cn)
4232                        goto out;
4233
4234                cn->clk = clk;
4235                srcu_init_notifier_head(&cn->notifier_head);
4236
4237                list_add(&cn->node, &clk_notifier_list);
4238        }
4239
4240        ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
4241
4242        clk->core->notifier_count++;
4243
4244out:
4245        clk_prepare_unlock();
4246
4247        return ret;
4248}
4249EXPORT_SYMBOL_GPL(clk_notifier_register);
4250
4251/**
4252 * clk_notifier_unregister - remove a clk rate change notifier
4253 * @clk: struct clk *
4254 * @nb: struct notifier_block * with callback info
4255 *
4256 * Request no further notification for changes to 'clk' and frees memory
4257 * allocated in clk_notifier_register.
4258 *
4259 * Returns -EINVAL if called with null arguments; otherwise, passes
4260 * along the return value of srcu_notifier_chain_unregister().
4261 */
4262int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
4263{
4264        struct clk_notifier *cn = NULL;
4265        int ret = -EINVAL;
4266
4267        if (!clk || !nb)
4268                return -EINVAL;
4269
4270        clk_prepare_lock();
4271
4272        list_for_each_entry(cn, &clk_notifier_list, node)
4273                if (cn->clk == clk)
4274                        break;
4275
4276        if (cn->clk == clk) {
4277                ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
4278
4279                clk->core->notifier_count--;
4280
4281                /* XXX the notifier code should handle this better */
4282                if (!cn->notifier_head.head) {
4283                        srcu_cleanup_notifier_head(&cn->notifier_head);
4284                        list_del(&cn->node);
4285                        kfree(cn);
4286                }
4287
4288        } else {
4289                ret = -ENOENT;
4290        }
4291
4292        clk_prepare_unlock();
4293
4294        return ret;
4295}
4296EXPORT_SYMBOL_GPL(clk_notifier_unregister);
4297
4298#ifdef CONFIG_OF
4299static void clk_core_reparent_orphans(void)
4300{
4301        clk_prepare_lock();
4302        clk_core_reparent_orphans_nolock();
4303        clk_prepare_unlock();
4304}
4305
4306/**
4307 * struct of_clk_provider - Clock provider registration structure
4308 * @link: Entry in global list of clock providers
4309 * @node: Pointer to device tree node of clock provider
4310 * @get: Get clock callback.  Returns NULL or a struct clk for the
4311 *       given clock specifier
4312 * @data: context pointer to be passed into @get callback
4313 */
4314struct of_clk_provider {
4315        struct list_head link;
4316
4317        struct device_node *node;
4318        struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
4319        struct clk_hw *(*get_hw)(struct of_phandle_args *clkspec, void *data);
4320        void *data;
4321};
4322
4323extern struct of_device_id __clk_of_table;
4324static const struct of_device_id __clk_of_table_sentinel
4325        __used __section(__clk_of_table_end);
4326
4327static LIST_HEAD(of_clk_providers);
4328static DEFINE_MUTEX(of_clk_mutex);
4329
4330struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
4331                                     void *data)
4332{
4333        return data;
4334}
4335EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
4336
4337struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data)
4338{
4339        return data;
4340}
4341EXPORT_SYMBOL_GPL(of_clk_hw_simple_get);
4342
4343struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
4344{
4345        struct clk_onecell_data *clk_data = data;
4346        unsigned int idx = clkspec->args[0];
4347
4348        if (idx >= clk_data->clk_num) {
4349                pr_err("%s: invalid clock index %u\n", __func__, idx);
4350                return ERR_PTR(-EINVAL);
4351        }
4352
4353        return clk_data->clks[idx];
4354}
4355EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
4356
4357struct clk_hw *
4358of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data)
4359{
4360        struct clk_hw_onecell_data *hw_data = data;
4361        unsigned int idx = clkspec->args[0];
4362
4363        if (idx >= hw_data->num) {
4364                pr_err("%s: invalid index %u\n", __func__, idx);
4365                return ERR_PTR(-EINVAL);
4366        }
4367
4368        return hw_data->hws[idx];
4369}
4370EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get);
4371
4372/**
4373 * of_clk_add_provider() - Register a clock provider for a node
4374 * @np: Device node pointer associated with clock provider
4375 * @clk_src_get: callback for decoding clock
4376 * @data: context pointer for @clk_src_get callback.
4377 *
4378 * This function is *deprecated*. Use of_clk_add_hw_provider() instead.
4379 */
4380int of_clk_add_provider(struct device_node *np,
4381                        struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
4382                                                   void *data),
4383                        void *data)
4384{
4385        struct of_clk_provider *cp;
4386        int ret;
4387
4388        cp = kzalloc(sizeof(*cp), GFP_KERNEL);
4389        if (!cp)
4390                return -ENOMEM;
4391
4392        cp->node = of_node_get(np);
4393        cp->data = data;
4394        cp->get = clk_src_get;
4395
4396        mutex_lock(&of_clk_mutex);
4397        list_add(&cp->link, &of_clk_providers);
4398        mutex_unlock(&of_clk_mutex);
4399        pr_debug("Added clock from %pOF\n", np);
4400
4401        clk_core_reparent_orphans();
4402
4403        ret = of_clk_set_defaults(np, true);
4404        if (ret < 0)
4405                of_clk_del_provider(np);
4406
4407        return ret;
4408}
4409EXPORT_SYMBOL_GPL(of_clk_add_provider);
4410
4411/**
4412 * of_clk_add_hw_provider() - Register a clock provider for a node
4413 * @np: Device node pointer associated with clock provider
4414 * @get: callback for decoding clk_hw
4415 * @data: context pointer for @get callback.
4416 */
4417int of_clk_add_hw_provider(struct device_node *np,
4418                           struct clk_hw *(*get)(struct of_phandle_args *clkspec,
4419                                                 void *data),
4420                           void *data)
4421{
4422        struct of_clk_provider *cp;
4423        int ret;
4424
4425        cp = kzalloc(sizeof(*cp), GFP_KERNEL);
4426        if (!cp)
4427                return -ENOMEM;
4428
4429        cp->node = of_node_get(np);
4430        cp->data = data;
4431        cp->get_hw = get;
4432
4433        mutex_lock(&of_clk_mutex);
4434        list_add(&cp->link, &of_clk_providers);
4435        mutex_unlock(&of_clk_mutex);
4436        pr_debug("Added clk_hw provider from %pOF\n", np);
4437
4438        clk_core_reparent_orphans();
4439
4440        ret = of_clk_set_defaults(np, true);
4441        if (ret < 0)
4442                of_clk_del_provider(np);
4443
4444        return ret;
4445}
4446EXPORT_SYMBOL_GPL(of_clk_add_hw_provider);
4447
4448static void devm_of_clk_release_provider(struct device *dev, void *res)
4449{
4450        of_clk_del_provider(*(struct device_node **)res);
4451}
4452
4453/*
4454 * We allow a child device to use its parent device as the clock provider node
4455 * for cases like MFD sub-devices where the child device driver wants to use
4456 * devm_*() APIs but not list the device in DT as a sub-node.
4457 */
4458static struct device_node *get_clk_provider_node(struct device *dev)
4459{
4460        struct device_node *np, *parent_np;
4461
4462        np = dev->of_node;
4463        parent_np = dev->parent ? dev->parent->of_node : NULL;
4464
4465        if (!of_find_property(np, "#clock-cells", NULL))
4466                if (of_find_property(parent_np, "#clock-cells", NULL))
4467                        np = parent_np;
4468
4469        return np;
4470}
4471
4472/**
4473 * devm_of_clk_add_hw_provider() - Managed clk provider node registration
4474 * @dev: Device acting as the clock provider (used for DT node and lifetime)
4475 * @get: callback for decoding clk_hw
4476 * @data: context pointer for @get callback
4477 *
4478 * Registers clock provider for given device's node. If the device has no DT
4479 * node or if the device node lacks of clock provider information (#clock-cells)
4480 * then the parent device's node is scanned for this information. If parent node
4481 * has the #clock-cells then it is used in registration. Provider is
4482 * automatically released at device exit.
4483 *
4484 * Return: 0 on success or an errno on failure.
4485 */
4486int devm_of_clk_add_hw_provider(struct device *dev,
4487                        struct clk_hw *(*get)(struct of_phandle_args *clkspec,
4488                                              void *data),
4489                        void *data)
4490{
4491        struct device_node **ptr, *np;
4492        int ret;
4493
4494        ptr = devres_alloc(devm_of_clk_release_provider, sizeof(*ptr),
4495                           GFP_KERNEL);
4496        if (!ptr)
4497                return -ENOMEM;
4498
4499        np = get_clk_provider_node(dev);
4500        ret = of_clk_add_hw_provider(np, get, data);
4501        if (!ret) {
4502                *ptr = np;
4503                devres_add(dev, ptr);
4504        } else {
4505                devres_free(ptr);
4506        }
4507
4508        return ret;
4509}
4510EXPORT_SYMBOL_GPL(devm_of_clk_add_hw_provider);
4511
4512/**
4513 * of_clk_del_provider() - Remove a previously registered clock provider
4514 * @np: Device node pointer associated with clock provider
4515 */
4516void of_clk_del_provider(struct device_node *np)
4517{
4518        struct of_clk_provider *cp;
4519
4520        mutex_lock(&of_clk_mutex);
4521        list_for_each_entry(cp, &of_clk_providers, link) {
4522                if (cp->node == np) {
4523                        list_del(&cp->link);
4524                        of_node_put(cp->node);
4525                        kfree(cp);
4526                        break;
4527                }
4528        }
4529        mutex_unlock(&of_clk_mutex);
4530}
4531EXPORT_SYMBOL_GPL(of_clk_del_provider);
4532
4533static int devm_clk_provider_match(struct device *dev, void *res, void *data)
4534{
4535        struct device_node **np = res;
4536
4537        if (WARN_ON(!np || !*np))
4538                return 0;
4539
4540        return *np == data;
4541}
4542
4543/**
4544 * devm_of_clk_del_provider() - Remove clock provider registered using devm
4545 * @dev: Device to whose lifetime the clock provider was bound
4546 */
4547void devm_of_clk_del_provider(struct device *dev)
4548{
4549        int ret;
4550        struct device_node *np = get_clk_provider_node(dev);
4551
4552        ret = devres_release(dev, devm_of_clk_release_provider,
4553                             devm_clk_provider_match, np);
4554
4555        WARN_ON(ret);
4556}
4557EXPORT_SYMBOL(devm_of_clk_del_provider);
4558
4559/**
4560 * of_parse_clkspec() - Parse a DT clock specifier for a given device node
4561 * @np: device node to parse clock specifier from
4562 * @index: index of phandle to parse clock out of. If index < 0, @name is used
4563 * @name: clock name to find and parse. If name is NULL, the index is used
4564 * @out_args: Result of parsing the clock specifier
4565 *
4566 * Parses a device node's "clocks" and "clock-names" properties to find the
4567 * phandle and cells for the index or name that is desired. The resulting clock
4568 * specifier is placed into @out_args, or an errno is returned when there's a
4569 * parsing error. The @index argument is ignored if @name is non-NULL.
4570 *
4571 * Example:
4572 *
4573 * phandle1: clock-controller@1 {
4574 *      #clock-cells = <2>;
4575 * }
4576 *
4577 * phandle2: clock-controller@2 {
4578 *      #clock-cells = <1>;
4579 * }
4580 *
4581 * clock-consumer@3 {
4582 *      clocks = <&phandle1 1 2 &phandle2 3>;
4583 *      clock-names = "name1", "name2";
4584 * }
4585 *
4586 * To get a device_node for `clock-controller@2' node you may call this
4587 * function a few different ways:
4588 *
4589 *   of_parse_clkspec(clock-consumer@3, -1, "name2", &args);
4590 *   of_parse_clkspec(clock-consumer@3, 1, NULL, &args);
4591 *   of_parse_clkspec(clock-consumer@3, 1, "name2", &args);
4592 *
4593 * Return: 0 upon successfully parsing the clock specifier. Otherwise, -ENOENT
4594 * if @name is NULL or -EINVAL if @name is non-NULL and it can't be found in
4595 * the "clock-names" property of @np.
4596 */
4597static int of_parse_clkspec(const struct device_node *np, int index,
4598                            const char *name, struct of_phandle_args *out_args)
4599{
4600        int ret = -ENOENT;
4601
4602        /* Walk up the tree of devices looking for a clock property that matches */
4603        while (np) {
4604                /*
4605                 * For named clocks, first look up the name in the
4606                 * "clock-names" property.  If it cannot be found, then index
4607                 * will be an error code and of_parse_phandle_with_args() will
4608                 * return -EINVAL.
4609                 */
4610                if (name)
4611                        index = of_property_match_string(np, "clock-names", name);
4612                ret = of_parse_phandle_with_args(np, "clocks", "#clock-cells",
4613                                                 index, out_args);
4614                if (!ret)
4615                        break;
4616                if (name && index >= 0)
4617                        break;
4618
4619                /*
4620                 * No matching clock found on this node.  If the parent node
4621                 * has a "clock-ranges" property, then we can try one of its
4622                 * clocks.
4623                 */
4624                np = np->parent;
4625                if (np && !of_get_property(np, "clock-ranges", NULL))
4626                        break;
4627                index = 0;
4628        }
4629
4630        return ret;
4631}
4632
4633static struct clk_hw *
4634__of_clk_get_hw_from_provider(struct of_clk_provider *provider,
4635                              struct of_phandle_args *clkspec)
4636{
4637        struct clk *clk;
4638
4639        if (provider->get_hw)
4640                return provider->get_hw(clkspec, provider->data);
4641
4642        clk = provider->get(clkspec, provider->data);
4643        if (IS_ERR(clk))
4644                return ERR_CAST(clk);
4645        return __clk_get_hw(clk);
4646}
4647
4648static struct clk_hw *
4649of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
4650{
4651        struct of_clk_provider *provider;
4652        struct clk_hw *hw = ERR_PTR(-EPROBE_DEFER);
4653
4654        if (!clkspec)
4655                return ERR_PTR(-EINVAL);
4656
4657        mutex_lock(&of_clk_mutex);
4658        list_for_each_entry(provider, &of_clk_providers, link) {
4659                if (provider->node == clkspec->np) {
4660                        hw = __of_clk_get_hw_from_provider(provider, clkspec);
4661                        if (!IS_ERR(hw))
4662                                break;
4663                }
4664        }
4665        mutex_unlock(&of_clk_mutex);
4666
4667        return hw;
4668}
4669
4670/**
4671 * of_clk_get_from_provider() - Lookup a clock from a clock provider
4672 * @clkspec: pointer to a clock specifier data structure
4673 *
4674 * This function looks up a struct clk from the registered list of clock
4675 * providers, an input is a clock specifier data structure as returned
4676 * from the of_parse_phandle_with_args() function call.
4677 */
4678struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
4679{
4680        struct clk_hw *hw = of_clk_get_hw_from_clkspec(clkspec);
4681
4682        return clk_hw_create_clk(NULL, hw, NULL, __func__);
4683}
4684EXPORT_SYMBOL_GPL(of_clk_get_from_provider);
4685
4686struct clk_hw *of_clk_get_hw(struct device_node *np, int index,
4687                             const char *con_id)
4688{
4689        int ret;
4690        struct clk_hw *hw;
4691        struct of_phandle_args clkspec;
4692
4693        ret = of_parse_clkspec(np, index, con_id, &clkspec);
4694        if (ret)
4695                return ERR_PTR(ret);
4696
4697        hw = of_clk_get_hw_from_clkspec(&clkspec);
4698        of_node_put(clkspec.np);
4699
4700        return hw;
4701}
4702
4703static struct clk *__of_clk_get(struct device_node *np,
4704                                int index, const char *dev_id,
4705                                const char *con_id)
4706{
4707        struct clk_hw *hw = of_clk_get_hw(np, index, con_id);
4708
4709        return clk_hw_create_clk(NULL, hw, dev_id, con_id);
4710}
4711
4712struct clk *of_clk_get(struct device_node *np, int index)
4713{
4714        return __of_clk_get(np, index, np->full_name, NULL);
4715}
4716EXPORT_SYMBOL(of_clk_get);
4717
4718/**
4719 * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node
4720 * @np: pointer to clock consumer node
4721 * @name: name of consumer's clock input, or NULL for the first clock reference
4722 *
4723 * This function parses the clocks and clock-names properties,
4724 * and uses them to look up the struct clk from the registered list of clock
4725 * providers.
4726 */
4727struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
4728{
4729        if (!np)
4730                return ERR_PTR(-ENOENT);
4731
4732        return __of_clk_get(np, 0, np->full_name, name);
4733}
4734EXPORT_SYMBOL(of_clk_get_by_name);
4735
4736/**
4737 * of_clk_get_parent_count() - Count the number of clocks a device node has
4738 * @np: device node to count
4739 *
4740 * Returns: The number of clocks that are possible parents of this node
4741 */
4742unsigned int of_clk_get_parent_count(const struct device_node *np)
4743{
4744        int count;
4745
4746        count = of_count_phandle_with_args(np, "clocks", "#clock-cells");
4747        if (count < 0)
4748                return 0;
4749
4750        return count;
4751}
4752EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
4753
4754const char *of_clk_get_parent_name(const struct device_node *np, int index)
4755{
4756        struct of_phandle_args clkspec;
4757        struct property *prop;
4758        const char *clk_name;
4759        const __be32 *vp;
4760        u32 pv;
4761        int rc;
4762        int count;
4763        struct clk *clk;
4764
4765        rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
4766                                        &clkspec);
4767        if (rc)
4768                return NULL;
4769
4770        index = clkspec.args_count ? clkspec.args[0] : 0;
4771        count = 0;
4772
4773        /* if there is an indices property, use it to transfer the index
4774         * specified into an array offset for the clock-output-names property.
4775         */
4776        of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) {
4777                if (index == pv) {
4778                        index = count;
4779                        break;
4780                }
4781                count++;
4782        }
4783        /* We went off the end of 'clock-indices' without finding it */
4784        if (prop && !vp)
4785                return NULL;
4786
4787        if (of_property_read_string_index(clkspec.np, "clock-output-names",
4788                                          index,
4789                                          &clk_name) < 0) {
4790                /*
4791                 * Best effort to get the name if the clock has been
4792                 * registered with the framework. If the clock isn't
4793                 * registered, we return the node name as the name of
4794                 * the clock as long as #clock-cells = 0.
4795                 */
4796                clk = of_clk_get_from_provider(&clkspec);
4797                if (IS_ERR(clk)) {
4798                        if (clkspec.args_count == 0)
4799                                clk_name = clkspec.np->name;
4800                        else
4801                                clk_name = NULL;
4802                } else {
4803                        clk_name = __clk_get_name(clk);
4804                        clk_put(clk);
4805                }
4806        }
4807
4808
4809        of_node_put(clkspec.np);
4810        return clk_name;
4811}
4812EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
4813
4814/**
4815 * of_clk_parent_fill() - Fill @parents with names of @np's parents and return
4816 * number of parents
4817 * @np: Device node pointer associated with clock provider
4818 * @parents: pointer to char array that hold the parents' names
4819 * @size: size of the @parents array
4820 *
4821 * Return: number of parents for the clock node.
4822 */
4823int of_clk_parent_fill(struct device_node *np, const char **parents,
4824                       unsigned int size)
4825{
4826        unsigned int i = 0;
4827
4828        while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL)
4829                i++;
4830
4831        return i;
4832}
4833EXPORT_SYMBOL_GPL(of_clk_parent_fill);
4834
4835struct clock_provider {
4836        void (*clk_init_cb)(struct device_node *);
4837        struct device_node *np;
4838        struct list_head node;
4839};
4840
4841/*
4842 * This function looks for a parent clock. If there is one, then it
4843 * checks that the provider for this parent clock was initialized, in
4844 * this case the parent clock will be ready.
4845 */
4846static int parent_ready(struct device_node *np)
4847{
4848        int i = 0;
4849
4850        while (true) {
4851                struct clk *clk = of_clk_get(np, i);
4852
4853                /* this parent is ready we can check the next one */
4854                if (!IS_ERR(clk)) {
4855                        clk_put(clk);
4856                        i++;
4857                        continue;
4858                }
4859
4860                /* at least one parent is not ready, we exit now */
4861                if (PTR_ERR(clk) == -EPROBE_DEFER)
4862                        return 0;
4863
4864                /*
4865                 * Here we make assumption that the device tree is
4866                 * written correctly. So an error means that there is
4867                 * no more parent. As we didn't exit yet, then the
4868                 * previous parent are ready. If there is no clock
4869                 * parent, no need to wait for them, then we can
4870                 * consider their absence as being ready
4871                 */
4872                return 1;
4873        }
4874}
4875
4876/**
4877 * of_clk_detect_critical() - set CLK_IS_CRITICAL flag from Device Tree
4878 * @np: Device node pointer associated with clock provider
4879 * @index: clock index
4880 * @flags: pointer to top-level framework flags
4881 *
4882 * Detects if the clock-critical property exists and, if so, sets the
4883 * corresponding CLK_IS_CRITICAL flag.
4884 *
4885 * Do not use this function. It exists only for legacy Device Tree
4886 * bindings, such as the one-clock-per-node style that are outdated.
4887 * Those bindings typically put all clock data into .dts and the Linux
4888 * driver has no clock data, thus making it impossible to set this flag
4889 * correctly from the driver. Only those drivers may call
4890 * of_clk_detect_critical from their setup functions.
4891 *
4892 * Return: error code or zero on success
4893 */
4894int of_clk_detect_critical(struct device_node *np, int index,
4895                           unsigned long *flags)
4896{
4897        struct property *prop;
4898        const __be32 *cur;
4899        uint32_t idx;
4900
4901        if (!np || !flags)
4902                return -EINVAL;
4903
4904        of_property_for_each_u32(np, "clock-critical", prop, cur, idx)
4905                if (index == idx)
4906                        *flags |= CLK_IS_CRITICAL;
4907
4908        return 0;
4909}
4910
4911/**
4912 * of_clk_init() - Scan and init clock providers from the DT
4913 * @matches: array of compatible values and init functions for providers.
4914 *
4915 * This function scans the device tree for matching clock providers
4916 * and calls their initialization functions. It also does it by trying
4917 * to follow the dependencies.
4918 */
4919void __init of_clk_init(const struct of_device_id *matches)
4920{
4921        const struct of_device_id *match;
4922        struct device_node *np;
4923        struct clock_provider *clk_provider, *next;
4924        bool is_init_done;
4925        bool force = false;
4926        LIST_HEAD(clk_provider_list);
4927
4928        if (!matches)
4929                matches = &__clk_of_table;
4930
4931        /* First prepare the list of the clocks providers */
4932        for_each_matching_node_and_match(np, matches, &match) {
4933                struct clock_provider *parent;
4934
4935                if (!of_device_is_available(np))
4936                        continue;
4937
4938                parent = kzalloc(sizeof(*parent), GFP_KERNEL);
4939                if (!parent) {
4940                        list_for_each_entry_safe(clk_provider, next,
4941                                                 &clk_provider_list, node) {
4942                                list_del(&clk_provider->node);
4943                                of_node_put(clk_provider->np);
4944                                kfree(clk_provider);
4945                        }
4946                        of_node_put(np);
4947                        return;
4948                }
4949
4950                parent->clk_init_cb = match->data;
4951                parent->np = of_node_get(np);
4952                list_add_tail(&parent->node, &clk_provider_list);
4953        }
4954
4955        while (!list_empty(&clk_provider_list)) {
4956                is_init_done = false;
4957                list_for_each_entry_safe(clk_provider, next,
4958                                        &clk_provider_list, node) {
4959                        if (force || parent_ready(clk_provider->np)) {
4960
4961                                /* Don't populate platform devices */
4962                                of_node_set_flag(clk_provider->np,
4963                                                 OF_POPULATED);
4964
4965                                clk_provider->clk_init_cb(clk_provider->np);
4966                                of_clk_set_defaults(clk_provider->np, true);
4967
4968                                list_del(&clk_provider->node);
4969                                of_node_put(clk_provider->np);
4970                                kfree(clk_provider);
4971                                is_init_done = true;
4972                        }
4973                }
4974
4975                /*
4976                 * We didn't manage to initialize any of the
4977                 * remaining providers during the last loop, so now we
4978                 * initialize all the remaining ones unconditionally
4979                 * in case the clock parent was not mandatory
4980                 */
4981                if (!is_init_done)
4982                        force = true;
4983        }
4984}
4985#endif
4986