linux/drivers/clk/clk.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
   3 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License version 2 as
   7 * published by the Free Software Foundation.
   8 *
   9 * Standard functionality for the common clock API.  See Documentation/clk.txt
  10 */
  11
  12#include <linux/clk.h>
  13#include <linux/clk-provider.h>
  14#include <linux/clk/clk-conf.h>
  15#include <linux/module.h>
  16#include <linux/mutex.h>
  17#include <linux/spinlock.h>
  18#include <linux/err.h>
  19#include <linux/list.h>
  20#include <linux/slab.h>
  21#include <linux/of.h>
  22#include <linux/device.h>
  23#include <linux/init.h>
  24#include <linux/sched.h>
  25#include <linux/clkdev.h>
  26
  27#include "clk.h"
  28
  29static DEFINE_SPINLOCK(enable_lock);
  30static DEFINE_MUTEX(prepare_lock);
  31
  32static struct task_struct *prepare_owner;
  33static struct task_struct *enable_owner;
  34
  35static int prepare_refcnt;
  36static int enable_refcnt;
  37
  38static HLIST_HEAD(clk_root_list);
  39static HLIST_HEAD(clk_orphan_list);
  40static LIST_HEAD(clk_notifier_list);
  41
  42/***    private data structures    ***/
  43
  44struct clk_core {
  45        const char              *name;
  46        const struct clk_ops    *ops;
  47        struct clk_hw           *hw;
  48        struct module           *owner;
  49        struct clk_core         *parent;
  50        const char              **parent_names;
  51        struct clk_core         **parents;
  52        u8                      num_parents;
  53        u8                      new_parent_index;
  54        unsigned long           rate;
  55        unsigned long           req_rate;
  56        unsigned long           new_rate;
  57        struct clk_core         *new_parent;
  58        struct clk_core         *new_child;
  59        unsigned long           flags;
  60        bool                    orphan;
  61        unsigned int            enable_count;
  62        unsigned int            prepare_count;
  63        unsigned long           min_rate;
  64        unsigned long           max_rate;
  65        unsigned long           accuracy;
  66        int                     phase;
  67        struct hlist_head       children;
  68        struct hlist_node       child_node;
  69        struct hlist_head       clks;
  70        unsigned int            notifier_count;
  71#ifdef CONFIG_DEBUG_FS
  72        struct dentry           *dentry;
  73        struct hlist_node       debug_node;
  74#endif
  75        struct kref             ref;
  76};
  77
  78#define CREATE_TRACE_POINTS
  79#include <trace/events/clk.h>
  80
  81struct clk {
  82        struct clk_core *core;
  83        const char *dev_id;
  84        const char *con_id;
  85        unsigned long min_rate;
  86        unsigned long max_rate;
  87        struct hlist_node clks_node;
  88};
  89
  90/***           locking             ***/
  91static void clk_prepare_lock(void)
  92{
  93        if (!mutex_trylock(&prepare_lock)) {
  94                if (prepare_owner == current) {
  95                        prepare_refcnt++;
  96                        return;
  97                }
  98                mutex_lock(&prepare_lock);
  99        }
 100        WARN_ON_ONCE(prepare_owner != NULL);
 101        WARN_ON_ONCE(prepare_refcnt != 0);
 102        prepare_owner = current;
 103        prepare_refcnt = 1;
 104}
 105
 106static void clk_prepare_unlock(void)
 107{
 108        WARN_ON_ONCE(prepare_owner != current);
 109        WARN_ON_ONCE(prepare_refcnt == 0);
 110
 111        if (--prepare_refcnt)
 112                return;
 113        prepare_owner = NULL;
 114        mutex_unlock(&prepare_lock);
 115}
 116
 117static unsigned long clk_enable_lock(void)
 118        __acquires(enable_lock)
 119{
 120        unsigned long flags;
 121
 122        if (!spin_trylock_irqsave(&enable_lock, flags)) {
 123                if (enable_owner == current) {
 124                        enable_refcnt++;
 125                        __acquire(enable_lock);
 126                        return flags;
 127                }
 128                spin_lock_irqsave(&enable_lock, flags);
 129        }
 130        WARN_ON_ONCE(enable_owner != NULL);
 131        WARN_ON_ONCE(enable_refcnt != 0);
 132        enable_owner = current;
 133        enable_refcnt = 1;
 134        return flags;
 135}
 136
 137static void clk_enable_unlock(unsigned long flags)
 138        __releases(enable_lock)
 139{
 140        WARN_ON_ONCE(enable_owner != current);
 141        WARN_ON_ONCE(enable_refcnt == 0);
 142
 143        if (--enable_refcnt) {
 144                __release(enable_lock);
 145                return;
 146        }
 147        enable_owner = NULL;
 148        spin_unlock_irqrestore(&enable_lock, flags);
 149}
 150
 151static bool clk_core_is_prepared(struct clk_core *core)
 152{
 153        /*
 154         * .is_prepared is optional for clocks that can prepare
 155         * fall back to software usage counter if it is missing
 156         */
 157        if (!core->ops->is_prepared)
 158                return core->prepare_count;
 159
 160        return core->ops->is_prepared(core->hw);
 161}
 162
 163static bool clk_core_is_enabled(struct clk_core *core)
 164{
 165        /*
 166         * .is_enabled is only mandatory for clocks that gate
 167         * fall back to software usage counter if .is_enabled is missing
 168         */
 169        if (!core->ops->is_enabled)
 170                return core->enable_count;
 171
 172        return core->ops->is_enabled(core->hw);
 173}
 174
 175static void clk_unprepare_unused_subtree(struct clk_core *core)
 176{
 177        struct clk_core *child;
 178
 179        lockdep_assert_held(&prepare_lock);
 180
 181        hlist_for_each_entry(child, &core->children, child_node)
 182                clk_unprepare_unused_subtree(child);
 183
 184        if (core->prepare_count)
 185                return;
 186
 187        if (core->flags & CLK_IGNORE_UNUSED)
 188                return;
 189
 190        if (clk_core_is_prepared(core)) {
 191                trace_clk_unprepare(core);
 192                if (core->ops->unprepare_unused)
 193                        core->ops->unprepare_unused(core->hw);
 194                else if (core->ops->unprepare)
 195                        core->ops->unprepare(core->hw);
 196                trace_clk_unprepare_complete(core);
 197        }
 198}
 199
 200static void clk_disable_unused_subtree(struct clk_core *core)
 201{
 202        struct clk_core *child;
 203        unsigned long flags;
 204
 205        lockdep_assert_held(&prepare_lock);
 206
 207        hlist_for_each_entry(child, &core->children, child_node)
 208                clk_disable_unused_subtree(child);
 209
 210        flags = clk_enable_lock();
 211
 212        if (core->enable_count)
 213                goto unlock_out;
 214
 215        if (core->flags & CLK_IGNORE_UNUSED)
 216                goto unlock_out;
 217
 218        /*
 219         * some gate clocks have special needs during the disable-unused
 220         * sequence.  call .disable_unused if available, otherwise fall
 221         * back to .disable
 222         */
 223        if (clk_core_is_enabled(core)) {
 224                trace_clk_disable(core);
 225                if (core->ops->disable_unused)
 226                        core->ops->disable_unused(core->hw);
 227                else if (core->ops->disable)
 228                        core->ops->disable(core->hw);
 229                trace_clk_disable_complete(core);
 230        }
 231
 232unlock_out:
 233        clk_enable_unlock(flags);
 234}
 235
 236static bool clk_ignore_unused;
 237static int __init clk_ignore_unused_setup(char *__unused)
 238{
 239        clk_ignore_unused = true;
 240        return 1;
 241}
 242__setup("clk_ignore_unused", clk_ignore_unused_setup);
 243
 244static int clk_disable_unused(void)
 245{
 246        struct clk_core *core;
 247
 248        if (clk_ignore_unused) {
 249                pr_warn("clk: Not disabling unused clocks\n");
 250                return 0;
 251        }
 252
 253        clk_prepare_lock();
 254
 255        hlist_for_each_entry(core, &clk_root_list, child_node)
 256                clk_disable_unused_subtree(core);
 257
 258        hlist_for_each_entry(core, &clk_orphan_list, child_node)
 259                clk_disable_unused_subtree(core);
 260
 261        hlist_for_each_entry(core, &clk_root_list, child_node)
 262                clk_unprepare_unused_subtree(core);
 263
 264        hlist_for_each_entry(core, &clk_orphan_list, child_node)
 265                clk_unprepare_unused_subtree(core);
 266
 267        clk_prepare_unlock();
 268
 269        return 0;
 270}
 271late_initcall_sync(clk_disable_unused);
 272
 273/***    helper functions   ***/
 274
 275const char *__clk_get_name(const struct clk *clk)
 276{
 277        return !clk ? NULL : clk->core->name;
 278}
 279EXPORT_SYMBOL_GPL(__clk_get_name);
 280
 281const char *clk_hw_get_name(const struct clk_hw *hw)
 282{
 283        return hw->core->name;
 284}
 285EXPORT_SYMBOL_GPL(clk_hw_get_name);
 286
 287struct clk_hw *__clk_get_hw(struct clk *clk)
 288{
 289        return !clk ? NULL : clk->core->hw;
 290}
 291EXPORT_SYMBOL_GPL(__clk_get_hw);
 292
 293unsigned int clk_hw_get_num_parents(const struct clk_hw *hw)
 294{
 295        return hw->core->num_parents;
 296}
 297EXPORT_SYMBOL_GPL(clk_hw_get_num_parents);
 298
 299struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw)
 300{
 301        return hw->core->parent ? hw->core->parent->hw : NULL;
 302}
 303EXPORT_SYMBOL_GPL(clk_hw_get_parent);
 304
 305static struct clk_core *__clk_lookup_subtree(const char *name,
 306                                             struct clk_core *core)
 307{
 308        struct clk_core *child;
 309        struct clk_core *ret;
 310
 311        if (!strcmp(core->name, name))
 312                return core;
 313
 314        hlist_for_each_entry(child, &core->children, child_node) {
 315                ret = __clk_lookup_subtree(name, child);
 316                if (ret)
 317                        return ret;
 318        }
 319
 320        return NULL;
 321}
 322
 323static struct clk_core *clk_core_lookup(const char *name)
 324{
 325        struct clk_core *root_clk;
 326        struct clk_core *ret;
 327
 328        if (!name)
 329                return NULL;
 330
 331        /* search the 'proper' clk tree first */
 332        hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
 333                ret = __clk_lookup_subtree(name, root_clk);
 334                if (ret)
 335                        return ret;
 336        }
 337
 338        /* if not found, then search the orphan tree */
 339        hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
 340                ret = __clk_lookup_subtree(name, root_clk);
 341                if (ret)
 342                        return ret;
 343        }
 344
 345        return NULL;
 346}
 347
 348static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
 349                                                         u8 index)
 350{
 351        if (!core || index >= core->num_parents)
 352                return NULL;
 353
 354        if (!core->parents[index])
 355                core->parents[index] =
 356                                clk_core_lookup(core->parent_names[index]);
 357
 358        return core->parents[index];
 359}
 360
 361struct clk_hw *
 362clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index)
 363{
 364        struct clk_core *parent;
 365
 366        parent = clk_core_get_parent_by_index(hw->core, index);
 367
 368        return !parent ? NULL : parent->hw;
 369}
 370EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index);
 371
 372unsigned int __clk_get_enable_count(struct clk *clk)
 373{
 374        return !clk ? 0 : clk->core->enable_count;
 375}
 376
 377static unsigned long clk_core_get_rate_nolock(struct clk_core *core)
 378{
 379        unsigned long ret;
 380
 381        if (!core) {
 382                ret = 0;
 383                goto out;
 384        }
 385
 386        ret = core->rate;
 387
 388        if (!core->num_parents)
 389                goto out;
 390
 391        if (!core->parent)
 392                ret = 0;
 393
 394out:
 395        return ret;
 396}
 397
 398unsigned long clk_hw_get_rate(const struct clk_hw *hw)
 399{
 400        return clk_core_get_rate_nolock(hw->core);
 401}
 402EXPORT_SYMBOL_GPL(clk_hw_get_rate);
 403
 404static unsigned long __clk_get_accuracy(struct clk_core *core)
 405{
 406        if (!core)
 407                return 0;
 408
 409        return core->accuracy;
 410}
 411
 412unsigned long __clk_get_flags(struct clk *clk)
 413{
 414        return !clk ? 0 : clk->core->flags;
 415}
 416EXPORT_SYMBOL_GPL(__clk_get_flags);
 417
 418unsigned long clk_hw_get_flags(const struct clk_hw *hw)
 419{
 420        return hw->core->flags;
 421}
 422EXPORT_SYMBOL_GPL(clk_hw_get_flags);
 423
 424bool clk_hw_is_prepared(const struct clk_hw *hw)
 425{
 426        return clk_core_is_prepared(hw->core);
 427}
 428
 429bool clk_hw_is_enabled(const struct clk_hw *hw)
 430{
 431        return clk_core_is_enabled(hw->core);
 432}
 433
 434bool __clk_is_enabled(struct clk *clk)
 435{
 436        if (!clk)
 437                return false;
 438
 439        return clk_core_is_enabled(clk->core);
 440}
 441EXPORT_SYMBOL_GPL(__clk_is_enabled);
 442
 443static bool mux_is_better_rate(unsigned long rate, unsigned long now,
 444                           unsigned long best, unsigned long flags)
 445{
 446        if (flags & CLK_MUX_ROUND_CLOSEST)
 447                return abs(now - rate) < abs(best - rate);
 448
 449        return now <= rate && now > best;
 450}
 451
 452static int
 453clk_mux_determine_rate_flags(struct clk_hw *hw, struct clk_rate_request *req,
 454                             unsigned long flags)
 455{
 456        struct clk_core *core = hw->core, *parent, *best_parent = NULL;
 457        int i, num_parents, ret;
 458        unsigned long best = 0;
 459        struct clk_rate_request parent_req = *req;
 460
 461        /* if NO_REPARENT flag set, pass through to current parent */
 462        if (core->flags & CLK_SET_RATE_NO_REPARENT) {
 463                parent = core->parent;
 464                if (core->flags & CLK_SET_RATE_PARENT) {
 465                        ret = __clk_determine_rate(parent ? parent->hw : NULL,
 466                                                   &parent_req);
 467                        if (ret)
 468                                return ret;
 469
 470                        best = parent_req.rate;
 471                } else if (parent) {
 472                        best = clk_core_get_rate_nolock(parent);
 473                } else {
 474                        best = clk_core_get_rate_nolock(core);
 475                }
 476
 477                goto out;
 478        }
 479
 480        /* find the parent that can provide the fastest rate <= rate */
 481        num_parents = core->num_parents;
 482        for (i = 0; i < num_parents; i++) {
 483                parent = clk_core_get_parent_by_index(core, i);
 484                if (!parent)
 485                        continue;
 486
 487                if (core->flags & CLK_SET_RATE_PARENT) {
 488                        parent_req = *req;
 489                        ret = __clk_determine_rate(parent->hw, &parent_req);
 490                        if (ret)
 491                                continue;
 492                } else {
 493                        parent_req.rate = clk_core_get_rate_nolock(parent);
 494                }
 495
 496                if (mux_is_better_rate(req->rate, parent_req.rate,
 497                                       best, flags)) {
 498                        best_parent = parent;
 499                        best = parent_req.rate;
 500                }
 501        }
 502
 503        if (!best_parent)
 504                return -EINVAL;
 505
 506out:
 507        if (best_parent)
 508                req->best_parent_hw = best_parent->hw;
 509        req->best_parent_rate = best;
 510        req->rate = best;
 511
 512        return 0;
 513}
 514
 515struct clk *__clk_lookup(const char *name)
 516{
 517        struct clk_core *core = clk_core_lookup(name);
 518
 519        return !core ? NULL : core->hw->clk;
 520}
 521
 522static void clk_core_get_boundaries(struct clk_core *core,
 523                                    unsigned long *min_rate,
 524                                    unsigned long *max_rate)
 525{
 526        struct clk *clk_user;
 527
 528        *min_rate = core->min_rate;
 529        *max_rate = core->max_rate;
 530
 531        hlist_for_each_entry(clk_user, &core->clks, clks_node)
 532                *min_rate = max(*min_rate, clk_user->min_rate);
 533
 534        hlist_for_each_entry(clk_user, &core->clks, clks_node)
 535                *max_rate = min(*max_rate, clk_user->max_rate);
 536}
 537
 538void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
 539                           unsigned long max_rate)
 540{
 541        hw->core->min_rate = min_rate;
 542        hw->core->max_rate = max_rate;
 543}
 544EXPORT_SYMBOL_GPL(clk_hw_set_rate_range);
 545
 546/*
 547 * Helper for finding best parent to provide a given frequency. This can be used
 548 * directly as a determine_rate callback (e.g. for a mux), or from a more
 549 * complex clock that may combine a mux with other operations.
 550 */
 551int __clk_mux_determine_rate(struct clk_hw *hw,
 552                             struct clk_rate_request *req)
 553{
 554        return clk_mux_determine_rate_flags(hw, req, 0);
 555}
 556EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
 557
 558int __clk_mux_determine_rate_closest(struct clk_hw *hw,
 559                                     struct clk_rate_request *req)
 560{
 561        return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST);
 562}
 563EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
 564
 565/***        clk api        ***/
 566
 567static void clk_core_unprepare(struct clk_core *core)
 568{
 569        lockdep_assert_held(&prepare_lock);
 570
 571        if (!core)
 572                return;
 573
 574        if (WARN_ON(core->prepare_count == 0))
 575                return;
 576
 577        if (--core->prepare_count > 0)
 578                return;
 579
 580        WARN_ON(core->enable_count > 0);
 581
 582        trace_clk_unprepare(core);
 583
 584        if (core->ops->unprepare)
 585                core->ops->unprepare(core->hw);
 586
 587        trace_clk_unprepare_complete(core);
 588        clk_core_unprepare(core->parent);
 589}
 590
 591/**
 592 * clk_unprepare - undo preparation of a clock source
 593 * @clk: the clk being unprepared
 594 *
 595 * clk_unprepare may sleep, which differentiates it from clk_disable.  In a
 596 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
 597 * if the operation may sleep.  One example is a clk which is accessed over
 598 * I2c.  In the complex case a clk gate operation may require a fast and a slow
 599 * part.  It is this reason that clk_unprepare and clk_disable are not mutually
 600 * exclusive.  In fact clk_disable must be called before clk_unprepare.
 601 */
 602void clk_unprepare(struct clk *clk)
 603{
 604        if (IS_ERR_OR_NULL(clk))
 605                return;
 606
 607        clk_prepare_lock();
 608        clk_core_unprepare(clk->core);
 609        clk_prepare_unlock();
 610}
 611EXPORT_SYMBOL_GPL(clk_unprepare);
 612
 613static int clk_core_prepare(struct clk_core *core)
 614{
 615        int ret = 0;
 616
 617        lockdep_assert_held(&prepare_lock);
 618
 619        if (!core)
 620                return 0;
 621
 622        if (core->prepare_count == 0) {
 623                ret = clk_core_prepare(core->parent);
 624                if (ret)
 625                        return ret;
 626
 627                trace_clk_prepare(core);
 628
 629                if (core->ops->prepare)
 630                        ret = core->ops->prepare(core->hw);
 631
 632                trace_clk_prepare_complete(core);
 633
 634                if (ret) {
 635                        clk_core_unprepare(core->parent);
 636                        return ret;
 637                }
 638        }
 639
 640        core->prepare_count++;
 641
 642        return 0;
 643}
 644
 645/**
 646 * clk_prepare - prepare a clock source
 647 * @clk: the clk being prepared
 648 *
 649 * clk_prepare may sleep, which differentiates it from clk_enable.  In a simple
 650 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
 651 * operation may sleep.  One example is a clk which is accessed over I2c.  In
 652 * the complex case a clk ungate operation may require a fast and a slow part.
 653 * It is this reason that clk_prepare and clk_enable are not mutually
 654 * exclusive.  In fact clk_prepare must be called before clk_enable.
 655 * Returns 0 on success, -EERROR otherwise.
 656 */
 657int clk_prepare(struct clk *clk)
 658{
 659        int ret;
 660
 661        if (!clk)
 662                return 0;
 663
 664        clk_prepare_lock();
 665        ret = clk_core_prepare(clk->core);
 666        clk_prepare_unlock();
 667
 668        return ret;
 669}
 670EXPORT_SYMBOL_GPL(clk_prepare);
 671
 672static void clk_core_disable(struct clk_core *core)
 673{
 674        lockdep_assert_held(&enable_lock);
 675
 676        if (!core)
 677                return;
 678
 679        if (WARN_ON(core->enable_count == 0))
 680                return;
 681
 682        if (--core->enable_count > 0)
 683                return;
 684
 685        trace_clk_disable(core);
 686
 687        if (core->ops->disable)
 688                core->ops->disable(core->hw);
 689
 690        trace_clk_disable_complete(core);
 691
 692        clk_core_disable(core->parent);
 693}
 694
 695/**
 696 * clk_disable - gate a clock
 697 * @clk: the clk being gated
 698 *
 699 * clk_disable must not sleep, which differentiates it from clk_unprepare.  In
 700 * a simple case, clk_disable can be used instead of clk_unprepare to gate a
 701 * clk if the operation is fast and will never sleep.  One example is a
 702 * SoC-internal clk which is controlled via simple register writes.  In the
 703 * complex case a clk gate operation may require a fast and a slow part.  It is
 704 * this reason that clk_unprepare and clk_disable are not mutually exclusive.
 705 * In fact clk_disable must be called before clk_unprepare.
 706 */
 707void clk_disable(struct clk *clk)
 708{
 709        unsigned long flags;
 710
 711        if (IS_ERR_OR_NULL(clk))
 712                return;
 713
 714        flags = clk_enable_lock();
 715        clk_core_disable(clk->core);
 716        clk_enable_unlock(flags);
 717}
 718EXPORT_SYMBOL_GPL(clk_disable);
 719
 720static int clk_core_enable(struct clk_core *core)
 721{
 722        int ret = 0;
 723
 724        lockdep_assert_held(&enable_lock);
 725
 726        if (!core)
 727                return 0;
 728
 729        if (WARN_ON(core->prepare_count == 0))
 730                return -ESHUTDOWN;
 731
 732        if (core->enable_count == 0) {
 733                ret = clk_core_enable(core->parent);
 734
 735                if (ret)
 736                        return ret;
 737
 738                trace_clk_enable(core);
 739
 740                if (core->ops->enable)
 741                        ret = core->ops->enable(core->hw);
 742
 743                trace_clk_enable_complete(core);
 744
 745                if (ret) {
 746                        clk_core_disable(core->parent);
 747                        return ret;
 748                }
 749        }
 750
 751        core->enable_count++;
 752        return 0;
 753}
 754
 755/**
 756 * clk_enable - ungate a clock
 757 * @clk: the clk being ungated
 758 *
 759 * clk_enable must not sleep, which differentiates it from clk_prepare.  In a
 760 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
 761 * if the operation will never sleep.  One example is a SoC-internal clk which
 762 * is controlled via simple register writes.  In the complex case a clk ungate
 763 * operation may require a fast and a slow part.  It is this reason that
 764 * clk_enable and clk_prepare are not mutually exclusive.  In fact clk_prepare
 765 * must be called before clk_enable.  Returns 0 on success, -EERROR
 766 * otherwise.
 767 */
 768int clk_enable(struct clk *clk)
 769{
 770        unsigned long flags;
 771        int ret;
 772
 773        if (!clk)
 774                return 0;
 775
 776        flags = clk_enable_lock();
 777        ret = clk_core_enable(clk->core);
 778        clk_enable_unlock(flags);
 779
 780        return ret;
 781}
 782EXPORT_SYMBOL_GPL(clk_enable);
 783
 784static int clk_core_round_rate_nolock(struct clk_core *core,
 785                                      struct clk_rate_request *req)
 786{
 787        struct clk_core *parent;
 788        long rate;
 789
 790        lockdep_assert_held(&prepare_lock);
 791
 792        if (!core)
 793                return 0;
 794
 795        parent = core->parent;
 796        if (parent) {
 797                req->best_parent_hw = parent->hw;
 798                req->best_parent_rate = parent->rate;
 799        } else {
 800                req->best_parent_hw = NULL;
 801                req->best_parent_rate = 0;
 802        }
 803
 804        if (core->ops->determine_rate) {
 805                return core->ops->determine_rate(core->hw, req);
 806        } else if (core->ops->round_rate) {
 807                rate = core->ops->round_rate(core->hw, req->rate,
 808                                             &req->best_parent_rate);
 809                if (rate < 0)
 810                        return rate;
 811
 812                req->rate = rate;
 813        } else if (core->flags & CLK_SET_RATE_PARENT) {
 814                return clk_core_round_rate_nolock(parent, req);
 815        } else {
 816                req->rate = core->rate;
 817        }
 818
 819        return 0;
 820}
 821
 822/**
 823 * __clk_determine_rate - get the closest rate actually supported by a clock
 824 * @hw: determine the rate of this clock
 825 * @rate: target rate
 826 * @min_rate: returned rate must be greater than this rate
 827 * @max_rate: returned rate must be less than this rate
 828 *
 829 * Useful for clk_ops such as .set_rate and .determine_rate.
 830 */
 831int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
 832{
 833        if (!hw) {
 834                req->rate = 0;
 835                return 0;
 836        }
 837
 838        return clk_core_round_rate_nolock(hw->core, req);
 839}
 840EXPORT_SYMBOL_GPL(__clk_determine_rate);
 841
 842unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate)
 843{
 844        int ret;
 845        struct clk_rate_request req;
 846
 847        clk_core_get_boundaries(hw->core, &req.min_rate, &req.max_rate);
 848        req.rate = rate;
 849
 850        ret = clk_core_round_rate_nolock(hw->core, &req);
 851        if (ret)
 852                return 0;
 853
 854        return req.rate;
 855}
 856EXPORT_SYMBOL_GPL(clk_hw_round_rate);
 857
 858/**
 859 * clk_round_rate - round the given rate for a clk
 860 * @clk: the clk for which we are rounding a rate
 861 * @rate: the rate which is to be rounded
 862 *
 863 * Takes in a rate as input and rounds it to a rate that the clk can actually
 864 * use which is then returned.  If clk doesn't support round_rate operation
 865 * then the parent rate is returned.
 866 */
 867long clk_round_rate(struct clk *clk, unsigned long rate)
 868{
 869        struct clk_rate_request req;
 870        int ret;
 871
 872        if (!clk)
 873                return 0;
 874
 875        clk_prepare_lock();
 876
 877        clk_core_get_boundaries(clk->core, &req.min_rate, &req.max_rate);
 878        req.rate = rate;
 879
 880        ret = clk_core_round_rate_nolock(clk->core, &req);
 881        clk_prepare_unlock();
 882
 883        if (ret)
 884                return ret;
 885
 886        return req.rate;
 887}
 888EXPORT_SYMBOL_GPL(clk_round_rate);
 889
 890/**
 891 * __clk_notify - call clk notifier chain
 892 * @core: clk that is changing rate
 893 * @msg: clk notifier type (see include/linux/clk.h)
 894 * @old_rate: old clk rate
 895 * @new_rate: new clk rate
 896 *
 897 * Triggers a notifier call chain on the clk rate-change notification
 898 * for 'clk'.  Passes a pointer to the struct clk and the previous
 899 * and current rates to the notifier callback.  Intended to be called by
 900 * internal clock code only.  Returns NOTIFY_DONE from the last driver
 901 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
 902 * a driver returns that.
 903 */
 904static int __clk_notify(struct clk_core *core, unsigned long msg,
 905                unsigned long old_rate, unsigned long new_rate)
 906{
 907        struct clk_notifier *cn;
 908        struct clk_notifier_data cnd;
 909        int ret = NOTIFY_DONE;
 910
 911        cnd.old_rate = old_rate;
 912        cnd.new_rate = new_rate;
 913
 914        list_for_each_entry(cn, &clk_notifier_list, node) {
 915                if (cn->clk->core == core) {
 916                        cnd.clk = cn->clk;
 917                        ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
 918                                        &cnd);
 919                }
 920        }
 921
 922        return ret;
 923}
 924
 925/**
 926 * __clk_recalc_accuracies
 927 * @core: first clk in the subtree
 928 *
 929 * Walks the subtree of clks starting with clk and recalculates accuracies as
 930 * it goes.  Note that if a clk does not implement the .recalc_accuracy
 931 * callback then it is assumed that the clock will take on the accuracy of its
 932 * parent.
 933 */
 934static void __clk_recalc_accuracies(struct clk_core *core)
 935{
 936        unsigned long parent_accuracy = 0;
 937        struct clk_core *child;
 938
 939        lockdep_assert_held(&prepare_lock);
 940
 941        if (core->parent)
 942                parent_accuracy = core->parent->accuracy;
 943
 944        if (core->ops->recalc_accuracy)
 945                core->accuracy = core->ops->recalc_accuracy(core->hw,
 946                                                          parent_accuracy);
 947        else
 948                core->accuracy = parent_accuracy;
 949
 950        hlist_for_each_entry(child, &core->children, child_node)
 951                __clk_recalc_accuracies(child);
 952}
 953
 954static long clk_core_get_accuracy(struct clk_core *core)
 955{
 956        unsigned long accuracy;
 957
 958        clk_prepare_lock();
 959        if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE))
 960                __clk_recalc_accuracies(core);
 961
 962        accuracy = __clk_get_accuracy(core);
 963        clk_prepare_unlock();
 964
 965        return accuracy;
 966}
 967
 968/**
 969 * clk_get_accuracy - return the accuracy of clk
 970 * @clk: the clk whose accuracy is being returned
 971 *
 972 * Simply returns the cached accuracy of the clk, unless
 973 * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be
 974 * issued.
 975 * If clk is NULL then returns 0.
 976 */
 977long clk_get_accuracy(struct clk *clk)
 978{
 979        if (!clk)
 980                return 0;
 981
 982        return clk_core_get_accuracy(clk->core);
 983}
 984EXPORT_SYMBOL_GPL(clk_get_accuracy);
 985
 986static unsigned long clk_recalc(struct clk_core *core,
 987                                unsigned long parent_rate)
 988{
 989        if (core->ops->recalc_rate)
 990                return core->ops->recalc_rate(core->hw, parent_rate);
 991        return parent_rate;
 992}
 993
 994/**
 995 * __clk_recalc_rates
 996 * @core: first clk in the subtree
 997 * @msg: notification type (see include/linux/clk.h)
 998 *
 999 * Walks the subtree of clks starting with clk and recalculates rates as it
1000 * goes.  Note that if a clk does not implement the .recalc_rate callback then
1001 * it is assumed that the clock will take on the rate of its parent.
1002 *
1003 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
1004 * if necessary.
1005 */
1006static void __clk_recalc_rates(struct clk_core *core, unsigned long msg)
1007{
1008        unsigned long old_rate;
1009        unsigned long parent_rate = 0;
1010        struct clk_core *child;
1011
1012        lockdep_assert_held(&prepare_lock);
1013
1014        old_rate = core->rate;
1015
1016        if (core->parent)
1017                parent_rate = core->parent->rate;
1018
1019        core->rate = clk_recalc(core, parent_rate);
1020
1021        /*
1022         * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
1023         * & ABORT_RATE_CHANGE notifiers
1024         */
1025        if (core->notifier_count && msg)
1026                __clk_notify(core, msg, old_rate, core->rate);
1027
1028        hlist_for_each_entry(child, &core->children, child_node)
1029                __clk_recalc_rates(child, msg);
1030}
1031
1032static unsigned long clk_core_get_rate(struct clk_core *core)
1033{
1034        unsigned long rate;
1035
1036        clk_prepare_lock();
1037
1038        if (core && (core->flags & CLK_GET_RATE_NOCACHE))
1039                __clk_recalc_rates(core, 0);
1040
1041        rate = clk_core_get_rate_nolock(core);
1042        clk_prepare_unlock();
1043
1044        return rate;
1045}
1046
1047/**
1048 * clk_get_rate - return the rate of clk
1049 * @clk: the clk whose rate is being returned
1050 *
1051 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
1052 * is set, which means a recalc_rate will be issued.
1053 * If clk is NULL then returns 0.
1054 */
1055unsigned long clk_get_rate(struct clk *clk)
1056{
1057        if (!clk)
1058                return 0;
1059
1060        return clk_core_get_rate(clk->core);
1061}
1062EXPORT_SYMBOL_GPL(clk_get_rate);
1063
1064static int clk_fetch_parent_index(struct clk_core *core,
1065                                  struct clk_core *parent)
1066{
1067        int i;
1068
1069        if (!parent)
1070                return -EINVAL;
1071
1072        for (i = 0; i < core->num_parents; i++)
1073                if (clk_core_get_parent_by_index(core, i) == parent)
1074                        return i;
1075
1076        return -EINVAL;
1077}
1078
1079/*
1080 * Update the orphan status of @core and all its children.
1081 */
1082static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan)
1083{
1084        struct clk_core *child;
1085
1086        core->orphan = is_orphan;
1087
1088        hlist_for_each_entry(child, &core->children, child_node)
1089                clk_core_update_orphan_status(child, is_orphan);
1090}
1091
1092static void clk_reparent(struct clk_core *core, struct clk_core *new_parent)
1093{
1094        bool was_orphan = core->orphan;
1095
1096        hlist_del(&core->child_node);
1097
1098        if (new_parent) {
1099                bool becomes_orphan = new_parent->orphan;
1100
1101                /* avoid duplicate POST_RATE_CHANGE notifications */
1102                if (new_parent->new_child == core)
1103                        new_parent->new_child = NULL;
1104
1105                hlist_add_head(&core->child_node, &new_parent->children);
1106
1107                if (was_orphan != becomes_orphan)
1108                        clk_core_update_orphan_status(core, becomes_orphan);
1109        } else {
1110                hlist_add_head(&core->child_node, &clk_orphan_list);
1111                if (!was_orphan)
1112                        clk_core_update_orphan_status(core, true);
1113        }
1114
1115        core->parent = new_parent;
1116}
1117
1118static struct clk_core *__clk_set_parent_before(struct clk_core *core,
1119                                           struct clk_core *parent)
1120{
1121        unsigned long flags;
1122        struct clk_core *old_parent = core->parent;
1123
1124        /*
1125         * Migrate prepare state between parents and prevent race with
1126         * clk_enable().
1127         *
1128         * If the clock is not prepared, then a race with
1129         * clk_enable/disable() is impossible since we already have the
1130         * prepare lock (future calls to clk_enable() need to be preceded by
1131         * a clk_prepare()).
1132         *
1133         * If the clock is prepared, migrate the prepared state to the new
1134         * parent and also protect against a race with clk_enable() by
1135         * forcing the clock and the new parent on.  This ensures that all
1136         * future calls to clk_enable() are practically NOPs with respect to
1137         * hardware and software states.
1138         *
1139         * See also: Comment for clk_set_parent() below.
1140         */
1141        if (core->prepare_count) {
1142                clk_core_prepare(parent);
1143                flags = clk_enable_lock();
1144                clk_core_enable(parent);
1145                clk_core_enable(core);
1146                clk_enable_unlock(flags);
1147        }
1148
1149        /* update the clk tree topology */
1150        flags = clk_enable_lock();
1151        clk_reparent(core, parent);
1152        clk_enable_unlock(flags);
1153
1154        return old_parent;
1155}
1156
1157static void __clk_set_parent_after(struct clk_core *core,
1158                                   struct clk_core *parent,
1159                                   struct clk_core *old_parent)
1160{
1161        unsigned long flags;
1162
1163        /*
1164         * Finish the migration of prepare state and undo the changes done
1165         * for preventing a race with clk_enable().
1166         */
1167        if (core->prepare_count) {
1168                flags = clk_enable_lock();
1169                clk_core_disable(core);
1170                clk_core_disable(old_parent);
1171                clk_enable_unlock(flags);
1172                clk_core_unprepare(old_parent);
1173        }
1174}
1175
1176static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
1177                            u8 p_index)
1178{
1179        unsigned long flags;
1180        int ret = 0;
1181        struct clk_core *old_parent;
1182
1183        old_parent = __clk_set_parent_before(core, parent);
1184
1185        trace_clk_set_parent(core, parent);
1186
1187        /* change clock input source */
1188        if (parent && core->ops->set_parent)
1189                ret = core->ops->set_parent(core->hw, p_index);
1190
1191        trace_clk_set_parent_complete(core, parent);
1192
1193        if (ret) {
1194                flags = clk_enable_lock();
1195                clk_reparent(core, old_parent);
1196                clk_enable_unlock(flags);
1197                __clk_set_parent_after(core, old_parent, parent);
1198
1199                return ret;
1200        }
1201
1202        __clk_set_parent_after(core, parent, old_parent);
1203
1204        return 0;
1205}
1206
1207/**
1208 * __clk_speculate_rates
1209 * @core: first clk in the subtree
1210 * @parent_rate: the "future" rate of clk's parent
1211 *
1212 * Walks the subtree of clks starting with clk, speculating rates as it
1213 * goes and firing off PRE_RATE_CHANGE notifications as necessary.
1214 *
1215 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
1216 * pre-rate change notifications and returns early if no clks in the
1217 * subtree have subscribed to the notifications.  Note that if a clk does not
1218 * implement the .recalc_rate callback then it is assumed that the clock will
1219 * take on the rate of its parent.
1220 */
1221static int __clk_speculate_rates(struct clk_core *core,
1222                                 unsigned long parent_rate)
1223{
1224        struct clk_core *child;
1225        unsigned long new_rate;
1226        int ret = NOTIFY_DONE;
1227
1228        lockdep_assert_held(&prepare_lock);
1229
1230        new_rate = clk_recalc(core, parent_rate);
1231
1232        /* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
1233        if (core->notifier_count)
1234                ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate);
1235
1236        if (ret & NOTIFY_STOP_MASK) {
1237                pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
1238                                __func__, core->name, ret);
1239                goto out;
1240        }
1241
1242        hlist_for_each_entry(child, &core->children, child_node) {
1243                ret = __clk_speculate_rates(child, new_rate);
1244                if (ret & NOTIFY_STOP_MASK)
1245                        break;
1246        }
1247
1248out:
1249        return ret;
1250}
1251
1252static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
1253                             struct clk_core *new_parent, u8 p_index)
1254{
1255        struct clk_core *child;
1256
1257        core->new_rate = new_rate;
1258        core->new_parent = new_parent;
1259        core->new_parent_index = p_index;
1260        /* include clk in new parent's PRE_RATE_CHANGE notifications */
1261        core->new_child = NULL;
1262        if (new_parent && new_parent != core->parent)
1263                new_parent->new_child = core;
1264
1265        hlist_for_each_entry(child, &core->children, child_node) {
1266                child->new_rate = clk_recalc(child, new_rate);
1267                clk_calc_subtree(child, child->new_rate, NULL, 0);
1268        }
1269}
1270
1271/*
1272 * calculate the new rates returning the topmost clock that has to be
1273 * changed.
1274 */
1275static struct clk_core *clk_calc_new_rates(struct clk_core *core,
1276                                           unsigned long rate)
1277{
1278        struct clk_core *top = core;
1279        struct clk_core *old_parent, *parent;
1280        unsigned long best_parent_rate = 0;
1281        unsigned long new_rate;
1282        unsigned long min_rate;
1283        unsigned long max_rate;
1284        int p_index = 0;
1285        long ret;
1286
1287        /* sanity */
1288        if (IS_ERR_OR_NULL(core))
1289                return NULL;
1290
1291        /* save parent rate, if it exists */
1292        parent = old_parent = core->parent;
1293        if (parent)
1294                best_parent_rate = parent->rate;
1295
1296        clk_core_get_boundaries(core, &min_rate, &max_rate);
1297
1298        /* find the closest rate and parent clk/rate */
1299        if (core->ops->determine_rate) {
1300                struct clk_rate_request req;
1301
1302                req.rate = rate;
1303                req.min_rate = min_rate;
1304                req.max_rate = max_rate;
1305                if (parent) {
1306                        req.best_parent_hw = parent->hw;
1307                        req.best_parent_rate = parent->rate;
1308                } else {
1309                        req.best_parent_hw = NULL;
1310                        req.best_parent_rate = 0;
1311                }
1312
1313                ret = core->ops->determine_rate(core->hw, &req);
1314                if (ret < 0)
1315                        return NULL;
1316
1317                best_parent_rate = req.best_parent_rate;
1318                new_rate = req.rate;
1319                parent = req.best_parent_hw ? req.best_parent_hw->core : NULL;
1320        } else if (core->ops->round_rate) {
1321                ret = core->ops->round_rate(core->hw, rate,
1322                                            &best_parent_rate);
1323                if (ret < 0)
1324                        return NULL;
1325
1326                new_rate = ret;
1327                if (new_rate < min_rate || new_rate > max_rate)
1328                        return NULL;
1329        } else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) {
1330                /* pass-through clock without adjustable parent */
1331                core->new_rate = core->rate;
1332                return NULL;
1333        } else {
1334                /* pass-through clock with adjustable parent */
1335                top = clk_calc_new_rates(parent, rate);
1336                new_rate = parent->new_rate;
1337                goto out;
1338        }
1339
1340        /* some clocks must be gated to change parent */
1341        if (parent != old_parent &&
1342            (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
1343                pr_debug("%s: %s not gated but wants to reparent\n",
1344                         __func__, core->name);
1345                return NULL;
1346        }
1347
1348        /* try finding the new parent index */
1349        if (parent && core->num_parents > 1) {
1350                p_index = clk_fetch_parent_index(core, parent);
1351                if (p_index < 0) {
1352                        pr_debug("%s: clk %s can not be parent of clk %s\n",
1353                                 __func__, parent->name, core->name);
1354                        return NULL;
1355                }
1356        }
1357
1358        if ((core->flags & CLK_SET_RATE_PARENT) && parent &&
1359            best_parent_rate != parent->rate)
1360                top = clk_calc_new_rates(parent, best_parent_rate);
1361
1362out:
1363        clk_calc_subtree(core, new_rate, parent, p_index);
1364
1365        return top;
1366}
1367
1368/*
1369 * Notify about rate changes in a subtree. Always walk down the whole tree
1370 * so that in case of an error we can walk down the whole tree again and
1371 * abort the change.
1372 */
1373static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
1374                                                  unsigned long event)
1375{
1376        struct clk_core *child, *tmp_clk, *fail_clk = NULL;
1377        int ret = NOTIFY_DONE;
1378
1379        if (core->rate == core->new_rate)
1380                return NULL;
1381
1382        if (core->notifier_count) {
1383                ret = __clk_notify(core, event, core->rate, core->new_rate);
1384                if (ret & NOTIFY_STOP_MASK)
1385                        fail_clk = core;
1386        }
1387
1388        hlist_for_each_entry(child, &core->children, child_node) {
1389                /* Skip children who will be reparented to another clock */
1390                if (child->new_parent && child->new_parent != core)
1391                        continue;
1392                tmp_clk = clk_propagate_rate_change(child, event);
1393                if (tmp_clk)
1394                        fail_clk = tmp_clk;
1395        }
1396
1397        /* handle the new child who might not be in core->children yet */
1398        if (core->new_child) {
1399                tmp_clk = clk_propagate_rate_change(core->new_child, event);
1400                if (tmp_clk)
1401                        fail_clk = tmp_clk;
1402        }
1403
1404        return fail_clk;
1405}
1406
1407/*
1408 * walk down a subtree and set the new rates notifying the rate
1409 * change on the way
1410 */
1411static void clk_change_rate(struct clk_core *core)
1412{
1413        struct clk_core *child;
1414        struct hlist_node *tmp;
1415        unsigned long old_rate;
1416        unsigned long best_parent_rate = 0;
1417        bool skip_set_rate = false;
1418        struct clk_core *old_parent;
1419
1420        old_rate = core->rate;
1421
1422        if (core->new_parent)
1423                best_parent_rate = core->new_parent->rate;
1424        else if (core->parent)
1425                best_parent_rate = core->parent->rate;
1426
1427        if (core->flags & CLK_SET_RATE_UNGATE) {
1428                unsigned long flags;
1429
1430                clk_core_prepare(core);
1431                flags = clk_enable_lock();
1432                clk_core_enable(core);
1433                clk_enable_unlock(flags);
1434        }
1435
1436        if (core->new_parent && core->new_parent != core->parent) {
1437                old_parent = __clk_set_parent_before(core, core->new_parent);
1438                trace_clk_set_parent(core, core->new_parent);
1439
1440                if (core->ops->set_rate_and_parent) {
1441                        skip_set_rate = true;
1442                        core->ops->set_rate_and_parent(core->hw, core->new_rate,
1443                                        best_parent_rate,
1444                                        core->new_parent_index);
1445                } else if (core->ops->set_parent) {
1446                        core->ops->set_parent(core->hw, core->new_parent_index);
1447                }
1448
1449                trace_clk_set_parent_complete(core, core->new_parent);
1450                __clk_set_parent_after(core, core->new_parent, old_parent);
1451        }
1452
1453        trace_clk_set_rate(core, core->new_rate);
1454
1455        if (!skip_set_rate && core->ops->set_rate)
1456                core->ops->set_rate(core->hw, core->new_rate, best_parent_rate);
1457
1458        trace_clk_set_rate_complete(core, core->new_rate);
1459
1460        core->rate = clk_recalc(core, best_parent_rate);
1461
1462        if (core->flags & CLK_SET_RATE_UNGATE) {
1463                unsigned long flags;
1464
1465                flags = clk_enable_lock();
1466                clk_core_disable(core);
1467                clk_enable_unlock(flags);
1468                clk_core_unprepare(core);
1469        }
1470
1471        if (core->notifier_count && old_rate != core->rate)
1472                __clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate);
1473
1474        if (core->flags & CLK_RECALC_NEW_RATES)
1475                (void)clk_calc_new_rates(core, core->new_rate);
1476
1477        /*
1478         * Use safe iteration, as change_rate can actually swap parents
1479         * for certain clock types.
1480         */
1481        hlist_for_each_entry_safe(child, tmp, &core->children, child_node) {
1482                /* Skip children who will be reparented to another clock */
1483                if (child->new_parent && child->new_parent != core)
1484                        continue;
1485                clk_change_rate(child);
1486        }
1487
1488        /* handle the new child who might not be in core->children yet */
1489        if (core->new_child)
1490                clk_change_rate(core->new_child);
1491}
1492
1493static int clk_core_set_rate_nolock(struct clk_core *core,
1494                                    unsigned long req_rate)
1495{
1496        struct clk_core *top, *fail_clk;
1497        unsigned long rate = req_rate;
1498        int ret = 0;
1499
1500        if (!core)
1501                return 0;
1502
1503        /* bail early if nothing to do */
1504        if (rate == clk_core_get_rate_nolock(core))
1505                return 0;
1506
1507        if ((core->flags & CLK_SET_RATE_GATE) && core->prepare_count)
1508                return -EBUSY;
1509
1510        /* calculate new rates and get the topmost changed clock */
1511        top = clk_calc_new_rates(core, rate);
1512        if (!top)
1513                return -EINVAL;
1514
1515        /* notify that we are about to change rates */
1516        fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
1517        if (fail_clk) {
1518                pr_debug("%s: failed to set %s rate\n", __func__,
1519                                fail_clk->name);
1520                clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
1521                return -EBUSY;
1522        }
1523
1524        /* change the rates */
1525        clk_change_rate(top);
1526
1527        core->req_rate = req_rate;
1528
1529        return ret;
1530}
1531
1532/**
1533 * clk_set_rate - specify a new rate for clk
1534 * @clk: the clk whose rate is being changed
1535 * @rate: the new rate for clk
1536 *
1537 * In the simplest case clk_set_rate will only adjust the rate of clk.
1538 *
1539 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
1540 * propagate up to clk's parent; whether or not this happens depends on the
1541 * outcome of clk's .round_rate implementation.  If *parent_rate is unchanged
1542 * after calling .round_rate then upstream parent propagation is ignored.  If
1543 * *parent_rate comes back with a new rate for clk's parent then we propagate
1544 * up to clk's parent and set its rate.  Upward propagation will continue
1545 * until either a clk does not support the CLK_SET_RATE_PARENT flag or
1546 * .round_rate stops requesting changes to clk's parent_rate.
1547 *
1548 * Rate changes are accomplished via tree traversal that also recalculates the
1549 * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
1550 *
1551 * Returns 0 on success, -EERROR otherwise.
1552 */
1553int clk_set_rate(struct clk *clk, unsigned long rate)
1554{
1555        int ret;
1556
1557        if (!clk)
1558                return 0;
1559
1560        /* prevent racing with updates to the clock topology */
1561        clk_prepare_lock();
1562
1563        ret = clk_core_set_rate_nolock(clk->core, rate);
1564
1565        clk_prepare_unlock();
1566
1567        return ret;
1568}
1569EXPORT_SYMBOL_GPL(clk_set_rate);
1570
1571/**
1572 * clk_set_rate_range - set a rate range for a clock source
1573 * @clk: clock source
1574 * @min: desired minimum clock rate in Hz, inclusive
1575 * @max: desired maximum clock rate in Hz, inclusive
1576 *
1577 * Returns success (0) or negative errno.
1578 */
1579int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
1580{
1581        int ret = 0;
1582
1583        if (!clk)
1584                return 0;
1585
1586        if (min > max) {
1587                pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
1588                       __func__, clk->core->name, clk->dev_id, clk->con_id,
1589                       min, max);
1590                return -EINVAL;
1591        }
1592
1593        clk_prepare_lock();
1594
1595        if (min != clk->min_rate || max != clk->max_rate) {
1596                clk->min_rate = min;
1597                clk->max_rate = max;
1598                ret = clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
1599        }
1600
1601        clk_prepare_unlock();
1602
1603        return ret;
1604}
1605EXPORT_SYMBOL_GPL(clk_set_rate_range);
1606
1607/**
1608 * clk_set_min_rate - set a minimum clock rate for a clock source
1609 * @clk: clock source
1610 * @rate: desired minimum clock rate in Hz, inclusive
1611 *
1612 * Returns success (0) or negative errno.
1613 */
1614int clk_set_min_rate(struct clk *clk, unsigned long rate)
1615{
1616        if (!clk)
1617                return 0;
1618
1619        return clk_set_rate_range(clk, rate, clk->max_rate);
1620}
1621EXPORT_SYMBOL_GPL(clk_set_min_rate);
1622
1623/**
1624 * clk_set_max_rate - set a maximum clock rate for a clock source
1625 * @clk: clock source
1626 * @rate: desired maximum clock rate in Hz, inclusive
1627 *
1628 * Returns success (0) or negative errno.
1629 */
1630int clk_set_max_rate(struct clk *clk, unsigned long rate)
1631{
1632        if (!clk)
1633                return 0;
1634
1635        return clk_set_rate_range(clk, clk->min_rate, rate);
1636}
1637EXPORT_SYMBOL_GPL(clk_set_max_rate);
1638
1639/**
1640 * clk_get_parent - return the parent of a clk
1641 * @clk: the clk whose parent gets returned
1642 *
1643 * Simply returns clk->parent.  Returns NULL if clk is NULL.
1644 */
1645struct clk *clk_get_parent(struct clk *clk)
1646{
1647        struct clk *parent;
1648
1649        if (!clk)
1650                return NULL;
1651
1652        clk_prepare_lock();
1653        /* TODO: Create a per-user clk and change callers to call clk_put */
1654        parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk;
1655        clk_prepare_unlock();
1656
1657        return parent;
1658}
1659EXPORT_SYMBOL_GPL(clk_get_parent);
1660
1661static struct clk_core *__clk_init_parent(struct clk_core *core)
1662{
1663        u8 index = 0;
1664
1665        if (core->num_parents > 1 && core->ops->get_parent)
1666                index = core->ops->get_parent(core->hw);
1667
1668        return clk_core_get_parent_by_index(core, index);
1669}
1670
1671static void clk_core_reparent(struct clk_core *core,
1672                                  struct clk_core *new_parent)
1673{
1674        clk_reparent(core, new_parent);
1675        __clk_recalc_accuracies(core);
1676        __clk_recalc_rates(core, POST_RATE_CHANGE);
1677}
1678
1679void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent)
1680{
1681        if (!hw)
1682                return;
1683
1684        clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core);
1685}
1686
1687/**
1688 * clk_has_parent - check if a clock is a possible parent for another
1689 * @clk: clock source
1690 * @parent: parent clock source
1691 *
1692 * This function can be used in drivers that need to check that a clock can be
1693 * the parent of another without actually changing the parent.
1694 *
1695 * Returns true if @parent is a possible parent for @clk, false otherwise.
1696 */
1697bool clk_has_parent(struct clk *clk, struct clk *parent)
1698{
1699        struct clk_core *core, *parent_core;
1700        unsigned int i;
1701
1702        /* NULL clocks should be nops, so return success if either is NULL. */
1703        if (!clk || !parent)
1704                return true;
1705
1706        core = clk->core;
1707        parent_core = parent->core;
1708
1709        /* Optimize for the case where the parent is already the parent. */
1710        if (core->parent == parent_core)
1711                return true;
1712
1713        for (i = 0; i < core->num_parents; i++)
1714                if (strcmp(core->parent_names[i], parent_core->name) == 0)
1715                        return true;
1716
1717        return false;
1718}
1719EXPORT_SYMBOL_GPL(clk_has_parent);
1720
1721static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent)
1722{
1723        int ret = 0;
1724        int p_index = 0;
1725        unsigned long p_rate = 0;
1726
1727        if (!core)
1728                return 0;
1729
1730        /* prevent racing with updates to the clock topology */
1731        clk_prepare_lock();
1732
1733        if (core->parent == parent)
1734                goto out;
1735
1736        /* verify ops for for multi-parent clks */
1737        if ((core->num_parents > 1) && (!core->ops->set_parent)) {
1738                ret = -ENOSYS;
1739                goto out;
1740        }
1741
1742        /* check that we are allowed to re-parent if the clock is in use */
1743        if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
1744                ret = -EBUSY;
1745                goto out;
1746        }
1747
1748        /* try finding the new parent index */
1749        if (parent) {
1750                p_index = clk_fetch_parent_index(core, parent);
1751                if (p_index < 0) {
1752                        pr_debug("%s: clk %s can not be parent of clk %s\n",
1753                                        __func__, parent->name, core->name);
1754                        ret = p_index;
1755                        goto out;
1756                }
1757                p_rate = parent->rate;
1758        }
1759
1760        /* propagate PRE_RATE_CHANGE notifications */
1761        ret = __clk_speculate_rates(core, p_rate);
1762
1763        /* abort if a driver objects */
1764        if (ret & NOTIFY_STOP_MASK)
1765                goto out;
1766
1767        /* do the re-parent */
1768        ret = __clk_set_parent(core, parent, p_index);
1769
1770        /* propagate rate an accuracy recalculation accordingly */
1771        if (ret) {
1772                __clk_recalc_rates(core, ABORT_RATE_CHANGE);
1773        } else {
1774                __clk_recalc_rates(core, POST_RATE_CHANGE);
1775                __clk_recalc_accuracies(core);
1776        }
1777
1778out:
1779        clk_prepare_unlock();
1780
1781        return ret;
1782}
1783
1784/**
1785 * clk_set_parent - switch the parent of a mux clk
1786 * @clk: the mux clk whose input we are switching
1787 * @parent: the new input to clk
1788 *
1789 * Re-parent clk to use parent as its new input source.  If clk is in
1790 * prepared state, the clk will get enabled for the duration of this call. If
1791 * that's not acceptable for a specific clk (Eg: the consumer can't handle
1792 * that, the reparenting is glitchy in hardware, etc), use the
1793 * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
1794 *
1795 * After successfully changing clk's parent clk_set_parent will update the
1796 * clk topology, sysfs topology and propagate rate recalculation via
1797 * __clk_recalc_rates.
1798 *
1799 * Returns 0 on success, -EERROR otherwise.
1800 */
1801int clk_set_parent(struct clk *clk, struct clk *parent)
1802{
1803        if (!clk)
1804                return 0;
1805
1806        return clk_core_set_parent(clk->core, parent ? parent->core : NULL);
1807}
1808EXPORT_SYMBOL_GPL(clk_set_parent);
1809
1810/**
1811 * clk_set_phase - adjust the phase shift of a clock signal
1812 * @clk: clock signal source
1813 * @degrees: number of degrees the signal is shifted
1814 *
1815 * Shifts the phase of a clock signal by the specified
1816 * degrees. Returns 0 on success, -EERROR otherwise.
1817 *
1818 * This function makes no distinction about the input or reference
1819 * signal that we adjust the clock signal phase against. For example
1820 * phase locked-loop clock signal generators we may shift phase with
1821 * respect to feedback clock signal input, but for other cases the
1822 * clock phase may be shifted with respect to some other, unspecified
1823 * signal.
1824 *
1825 * Additionally the concept of phase shift does not propagate through
1826 * the clock tree hierarchy, which sets it apart from clock rates and
1827 * clock accuracy. A parent clock phase attribute does not have an
1828 * impact on the phase attribute of a child clock.
1829 */
1830int clk_set_phase(struct clk *clk, int degrees)
1831{
1832        int ret = -EINVAL;
1833
1834        if (!clk)
1835                return 0;
1836
1837        /* sanity check degrees */
1838        degrees %= 360;
1839        if (degrees < 0)
1840                degrees += 360;
1841
1842        clk_prepare_lock();
1843
1844        /* bail early if nothing to do */
1845        if (degrees == clk->core->phase)
1846                goto out;
1847
1848        trace_clk_set_phase(clk->core, degrees);
1849
1850        if (clk->core->ops->set_phase)
1851                ret = clk->core->ops->set_phase(clk->core->hw, degrees);
1852
1853        trace_clk_set_phase_complete(clk->core, degrees);
1854
1855        if (!ret)
1856                clk->core->phase = degrees;
1857
1858out:
1859        clk_prepare_unlock();
1860
1861        return ret;
1862}
1863EXPORT_SYMBOL_GPL(clk_set_phase);
1864
1865static int clk_core_get_phase(struct clk_core *core)
1866{
1867        int ret;
1868
1869        clk_prepare_lock();
1870        ret = core->phase;
1871        clk_prepare_unlock();
1872
1873        return ret;
1874}
1875
1876/**
1877 * clk_get_phase - return the phase shift of a clock signal
1878 * @clk: clock signal source
1879 *
1880 * Returns the phase shift of a clock node in degrees, otherwise returns
1881 * -EERROR.
1882 */
1883int clk_get_phase(struct clk *clk)
1884{
1885        if (!clk)
1886                return 0;
1887
1888        return clk_core_get_phase(clk->core);
1889}
1890EXPORT_SYMBOL_GPL(clk_get_phase);
1891
1892/**
1893 * clk_is_match - check if two clk's point to the same hardware clock
1894 * @p: clk compared against q
1895 * @q: clk compared against p
1896 *
1897 * Returns true if the two struct clk pointers both point to the same hardware
1898 * clock node. Put differently, returns true if struct clk *p and struct clk *q
1899 * share the same struct clk_core object.
1900 *
1901 * Returns false otherwise. Note that two NULL clks are treated as matching.
1902 */
1903bool clk_is_match(const struct clk *p, const struct clk *q)
1904{
1905        /* trivial case: identical struct clk's or both NULL */
1906        if (p == q)
1907                return true;
1908
1909        /* true if clk->core pointers match. Avoid dereferencing garbage */
1910        if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q))
1911                if (p->core == q->core)
1912                        return true;
1913
1914        return false;
1915}
1916EXPORT_SYMBOL_GPL(clk_is_match);
1917
1918/***        debugfs support        ***/
1919
1920#ifdef CONFIG_DEBUG_FS
1921#include <linux/debugfs.h>
1922
1923static struct dentry *rootdir;
1924static int inited = 0;
1925static DEFINE_MUTEX(clk_debug_lock);
1926static HLIST_HEAD(clk_debug_list);
1927
1928static struct hlist_head *all_lists[] = {
1929        &clk_root_list,
1930        &clk_orphan_list,
1931        NULL,
1932};
1933
1934static struct hlist_head *orphan_list[] = {
1935        &clk_orphan_list,
1936        NULL,
1937};
1938
1939static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
1940                                 int level)
1941{
1942        if (!c)
1943                return;
1944
1945        seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n",
1946                   level * 3 + 1, "",
1947                   30 - level * 3, c->name,
1948                   c->enable_count, c->prepare_count, clk_core_get_rate(c),
1949                   clk_core_get_accuracy(c), clk_core_get_phase(c));
1950}
1951
1952static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
1953                                     int level)
1954{
1955        struct clk_core *child;
1956
1957        if (!c)
1958                return;
1959
1960        clk_summary_show_one(s, c, level);
1961
1962        hlist_for_each_entry(child, &c->children, child_node)
1963                clk_summary_show_subtree(s, child, level + 1);
1964}
1965
1966static int clk_summary_show(struct seq_file *s, void *data)
1967{
1968        struct clk_core *c;
1969        struct hlist_head **lists = (struct hlist_head **)s->private;
1970
1971        seq_puts(s, "   clock                         enable_cnt  prepare_cnt        rate   accuracy   phase\n");
1972        seq_puts(s, "----------------------------------------------------------------------------------------\n");
1973
1974        clk_prepare_lock();
1975
1976        for (; *lists; lists++)
1977                hlist_for_each_entry(c, *lists, child_node)
1978                        clk_summary_show_subtree(s, c, 0);
1979
1980        clk_prepare_unlock();
1981
1982        return 0;
1983}
1984
1985
1986static int clk_summary_open(struct inode *inode, struct file *file)
1987{
1988        return single_open(file, clk_summary_show, inode->i_private);
1989}
1990
1991static const struct file_operations clk_summary_fops = {
1992        .open           = clk_summary_open,
1993        .read           = seq_read,
1994        .llseek         = seq_lseek,
1995        .release        = single_release,
1996};
1997
1998static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
1999{
2000        if (!c)
2001                return;
2002
2003        /* This should be JSON format, i.e. elements separated with a comma */
2004        seq_printf(s, "\"%s\": { ", c->name);
2005        seq_printf(s, "\"enable_count\": %d,", c->enable_count);
2006        seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
2007        seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
2008        seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
2009        seq_printf(s, "\"phase\": %d", clk_core_get_phase(c));
2010}
2011
2012static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
2013{
2014        struct clk_core *child;
2015
2016        if (!c)
2017                return;
2018
2019        clk_dump_one(s, c, level);
2020
2021        hlist_for_each_entry(child, &c->children, child_node) {
2022                seq_printf(s, ",");
2023                clk_dump_subtree(s, child, level + 1);
2024        }
2025
2026        seq_printf(s, "}");
2027}
2028
2029static int clk_dump(struct seq_file *s, void *data)
2030{
2031        struct clk_core *c;
2032        bool first_node = true;
2033        struct hlist_head **lists = (struct hlist_head **)s->private;
2034
2035        seq_printf(s, "{");
2036
2037        clk_prepare_lock();
2038
2039        for (; *lists; lists++) {
2040                hlist_for_each_entry(c, *lists, child_node) {
2041                        if (!first_node)
2042                                seq_puts(s, ",");
2043                        first_node = false;
2044                        clk_dump_subtree(s, c, 0);
2045                }
2046        }
2047
2048        clk_prepare_unlock();
2049
2050        seq_puts(s, "}\n");
2051        return 0;
2052}
2053
2054
2055static int clk_dump_open(struct inode *inode, struct file *file)
2056{
2057        return single_open(file, clk_dump, inode->i_private);
2058}
2059
2060static const struct file_operations clk_dump_fops = {
2061        .open           = clk_dump_open,
2062        .read           = seq_read,
2063        .llseek         = seq_lseek,
2064        .release        = single_release,
2065};
2066
2067static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
2068{
2069        struct dentry *d;
2070        int ret = -ENOMEM;
2071
2072        if (!core || !pdentry) {
2073                ret = -EINVAL;
2074                goto out;
2075        }
2076
2077        d = debugfs_create_dir(core->name, pdentry);
2078        if (!d)
2079                goto out;
2080
2081        core->dentry = d;
2082
2083        d = debugfs_create_u32("clk_rate", S_IRUGO, core->dentry,
2084                        (u32 *)&core->rate);
2085        if (!d)
2086                goto err_out;
2087
2088        d = debugfs_create_u32("clk_accuracy", S_IRUGO, core->dentry,
2089                        (u32 *)&core->accuracy);
2090        if (!d)
2091                goto err_out;
2092
2093        d = debugfs_create_u32("clk_phase", S_IRUGO, core->dentry,
2094                        (u32 *)&core->phase);
2095        if (!d)
2096                goto err_out;
2097
2098        d = debugfs_create_x32("clk_flags", S_IRUGO, core->dentry,
2099                        (u32 *)&core->flags);
2100        if (!d)
2101                goto err_out;
2102
2103        d = debugfs_create_u32("clk_prepare_count", S_IRUGO, core->dentry,
2104                        (u32 *)&core->prepare_count);
2105        if (!d)
2106                goto err_out;
2107
2108        d = debugfs_create_u32("clk_enable_count", S_IRUGO, core->dentry,
2109                        (u32 *)&core->enable_count);
2110        if (!d)
2111                goto err_out;
2112
2113        d = debugfs_create_u32("clk_notifier_count", S_IRUGO, core->dentry,
2114                        (u32 *)&core->notifier_count);
2115        if (!d)
2116                goto err_out;
2117
2118        if (core->ops->debug_init) {
2119                ret = core->ops->debug_init(core->hw, core->dentry);
2120                if (ret)
2121                        goto err_out;
2122        }
2123
2124        ret = 0;
2125        goto out;
2126
2127err_out:
2128        debugfs_remove_recursive(core->dentry);
2129        core->dentry = NULL;
2130out:
2131        return ret;
2132}
2133
2134/**
2135 * clk_debug_register - add a clk node to the debugfs clk directory
2136 * @core: the clk being added to the debugfs clk directory
2137 *
2138 * Dynamically adds a clk to the debugfs clk directory if debugfs has been
2139 * initialized.  Otherwise it bails out early since the debugfs clk directory
2140 * will be created lazily by clk_debug_init as part of a late_initcall.
2141 */
2142static int clk_debug_register(struct clk_core *core)
2143{
2144        int ret = 0;
2145
2146        mutex_lock(&clk_debug_lock);
2147        hlist_add_head(&core->debug_node, &clk_debug_list);
2148
2149        if (!inited)
2150                goto unlock;
2151
2152        ret = clk_debug_create_one(core, rootdir);
2153unlock:
2154        mutex_unlock(&clk_debug_lock);
2155
2156        return ret;
2157}
2158
2159 /**
2160 * clk_debug_unregister - remove a clk node from the debugfs clk directory
2161 * @core: the clk being removed from the debugfs clk directory
2162 *
2163 * Dynamically removes a clk and all its child nodes from the
2164 * debugfs clk directory if clk->dentry points to debugfs created by
2165 * clk_debug_register in __clk_core_init.
2166 */
2167static void clk_debug_unregister(struct clk_core *core)
2168{
2169        mutex_lock(&clk_debug_lock);
2170        hlist_del_init(&core->debug_node);
2171        debugfs_remove_recursive(core->dentry);
2172        core->dentry = NULL;
2173        mutex_unlock(&clk_debug_lock);
2174}
2175
2176struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode,
2177                                void *data, const struct file_operations *fops)
2178{
2179        struct dentry *d = NULL;
2180
2181        if (hw->core->dentry)
2182                d = debugfs_create_file(name, mode, hw->core->dentry, data,
2183                                        fops);
2184
2185        return d;
2186}
2187EXPORT_SYMBOL_GPL(clk_debugfs_add_file);
2188
2189/**
2190 * clk_debug_init - lazily populate the debugfs clk directory
2191 *
2192 * clks are often initialized very early during boot before memory can be
2193 * dynamically allocated and well before debugfs is setup. This function
2194 * populates the debugfs clk directory once at boot-time when we know that
2195 * debugfs is setup. It should only be called once at boot-time, all other clks
2196 * added dynamically will be done so with clk_debug_register.
2197 */
2198static int __init clk_debug_init(void)
2199{
2200        struct clk_core *core;
2201        struct dentry *d;
2202
2203        rootdir = debugfs_create_dir("clk", NULL);
2204
2205        if (!rootdir)
2206                return -ENOMEM;
2207
2208        d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, &all_lists,
2209                                &clk_summary_fops);
2210        if (!d)
2211                return -ENOMEM;
2212
2213        d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, &all_lists,
2214                                &clk_dump_fops);
2215        if (!d)
2216                return -ENOMEM;
2217
2218        d = debugfs_create_file("clk_orphan_summary", S_IRUGO, rootdir,
2219                                &orphan_list, &clk_summary_fops);
2220        if (!d)
2221                return -ENOMEM;
2222
2223        d = debugfs_create_file("clk_orphan_dump", S_IRUGO, rootdir,
2224                                &orphan_list, &clk_dump_fops);
2225        if (!d)
2226                return -ENOMEM;
2227
2228        mutex_lock(&clk_debug_lock);
2229        hlist_for_each_entry(core, &clk_debug_list, debug_node)
2230                clk_debug_create_one(core, rootdir);
2231
2232        inited = 1;
2233        mutex_unlock(&clk_debug_lock);
2234
2235        return 0;
2236}
2237late_initcall(clk_debug_init);
2238#else
2239static inline int clk_debug_register(struct clk_core *core) { return 0; }
2240static inline void clk_debug_reparent(struct clk_core *core,
2241                                      struct clk_core *new_parent)
2242{
2243}
2244static inline void clk_debug_unregister(struct clk_core *core)
2245{
2246}
2247#endif
2248
2249/**
2250 * __clk_core_init - initialize the data structures in a struct clk_core
2251 * @core:       clk_core being initialized
2252 *
2253 * Initializes the lists in struct clk_core, queries the hardware for the
2254 * parent and rate and sets them both.
2255 */
2256static int __clk_core_init(struct clk_core *core)
2257{
2258        int i, ret = 0;
2259        struct clk_core *orphan;
2260        struct hlist_node *tmp2;
2261        unsigned long rate;
2262
2263        if (!core)
2264                return -EINVAL;
2265
2266        clk_prepare_lock();
2267
2268        /* check to see if a clock with this name is already registered */
2269        if (clk_core_lookup(core->name)) {
2270                pr_debug("%s: clk %s already initialized\n",
2271                                __func__, core->name);
2272                ret = -EEXIST;
2273                goto out;
2274        }
2275
2276        /* check that clk_ops are sane.  See Documentation/clk.txt */
2277        if (core->ops->set_rate &&
2278            !((core->ops->round_rate || core->ops->determine_rate) &&
2279              core->ops->recalc_rate)) {
2280                pr_err("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
2281                       __func__, core->name);
2282                ret = -EINVAL;
2283                goto out;
2284        }
2285
2286        if (core->ops->set_parent && !core->ops->get_parent) {
2287                pr_err("%s: %s must implement .get_parent & .set_parent\n",
2288                       __func__, core->name);
2289                ret = -EINVAL;
2290                goto out;
2291        }
2292
2293        if (core->num_parents > 1 && !core->ops->get_parent) {
2294                pr_err("%s: %s must implement .get_parent as it has multi parents\n",
2295                       __func__, core->name);
2296                ret = -EINVAL;
2297                goto out;
2298        }
2299
2300        if (core->ops->set_rate_and_parent &&
2301                        !(core->ops->set_parent && core->ops->set_rate)) {
2302                pr_err("%s: %s must implement .set_parent & .set_rate\n",
2303                                __func__, core->name);
2304                ret = -EINVAL;
2305                goto out;
2306        }
2307
2308        /* throw a WARN if any entries in parent_names are NULL */
2309        for (i = 0; i < core->num_parents; i++)
2310                WARN(!core->parent_names[i],
2311                                "%s: invalid NULL in %s's .parent_names\n",
2312                                __func__, core->name);
2313
2314        core->parent = __clk_init_parent(core);
2315
2316        /*
2317         * Populate core->parent if parent has already been clk_core_init'd. If
2318         * parent has not yet been clk_core_init'd then place clk in the orphan
2319         * list.  If clk doesn't have any parents then place it in the root
2320         * clk list.
2321         *
2322         * Every time a new clk is clk_init'd then we walk the list of orphan
2323         * clocks and re-parent any that are children of the clock currently
2324         * being clk_init'd.
2325         */
2326        if (core->parent) {
2327                hlist_add_head(&core->child_node,
2328                                &core->parent->children);
2329                core->orphan = core->parent->orphan;
2330        } else if (!core->num_parents) {
2331                hlist_add_head(&core->child_node, &clk_root_list);
2332                core->orphan = false;
2333        } else {
2334                hlist_add_head(&core->child_node, &clk_orphan_list);
2335                core->orphan = true;
2336        }
2337
2338        /*
2339         * Set clk's accuracy.  The preferred method is to use
2340         * .recalc_accuracy. For simple clocks and lazy developers the default
2341         * fallback is to use the parent's accuracy.  If a clock doesn't have a
2342         * parent (or is orphaned) then accuracy is set to zero (perfect
2343         * clock).
2344         */
2345        if (core->ops->recalc_accuracy)
2346                core->accuracy = core->ops->recalc_accuracy(core->hw,
2347                                        __clk_get_accuracy(core->parent));
2348        else if (core->parent)
2349                core->accuracy = core->parent->accuracy;
2350        else
2351                core->accuracy = 0;
2352
2353        /*
2354         * Set clk's phase.
2355         * Since a phase is by definition relative to its parent, just
2356         * query the current clock phase, or just assume it's in phase.
2357         */
2358        if (core->ops->get_phase)
2359                core->phase = core->ops->get_phase(core->hw);
2360        else
2361                core->phase = 0;
2362
2363        /*
2364         * Set clk's rate.  The preferred method is to use .recalc_rate.  For
2365         * simple clocks and lazy developers the default fallback is to use the
2366         * parent's rate.  If a clock doesn't have a parent (or is orphaned)
2367         * then rate is set to zero.
2368         */
2369        if (core->ops->recalc_rate)
2370                rate = core->ops->recalc_rate(core->hw,
2371                                clk_core_get_rate_nolock(core->parent));
2372        else if (core->parent)
2373                rate = core->parent->rate;
2374        else
2375                rate = 0;
2376        core->rate = core->req_rate = rate;
2377
2378        /*
2379         * walk the list of orphan clocks and reparent any that newly finds a
2380         * parent.
2381         */
2382        hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
2383                struct clk_core *parent = __clk_init_parent(orphan);
2384
2385                if (parent)
2386                        clk_core_reparent(orphan, parent);
2387        }
2388
2389        /*
2390         * optional platform-specific magic
2391         *
2392         * The .init callback is not used by any of the basic clock types, but
2393         * exists for weird hardware that must perform initialization magic.
2394         * Please consider other ways of solving initialization problems before
2395         * using this callback, as its use is discouraged.
2396         */
2397        if (core->ops->init)
2398                core->ops->init(core->hw);
2399
2400        kref_init(&core->ref);
2401out:
2402        clk_prepare_unlock();
2403
2404        if (!ret)
2405                clk_debug_register(core);
2406
2407        return ret;
2408}
2409
2410struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
2411                             const char *con_id)
2412{
2413        struct clk *clk;
2414
2415        /* This is to allow this function to be chained to others */
2416        if (IS_ERR_OR_NULL(hw))
2417                return (struct clk *) hw;
2418
2419        clk = kzalloc(sizeof(*clk), GFP_KERNEL);
2420        if (!clk)
2421                return ERR_PTR(-ENOMEM);
2422
2423        clk->core = hw->core;
2424        clk->dev_id = dev_id;
2425        clk->con_id = con_id;
2426        clk->max_rate = ULONG_MAX;
2427
2428        clk_prepare_lock();
2429        hlist_add_head(&clk->clks_node, &hw->core->clks);
2430        clk_prepare_unlock();
2431
2432        return clk;
2433}
2434
2435void __clk_free_clk(struct clk *clk)
2436{
2437        clk_prepare_lock();
2438        hlist_del(&clk->clks_node);
2439        clk_prepare_unlock();
2440
2441        kfree(clk);
2442}
2443
2444/**
2445 * clk_register - allocate a new clock, register it and return an opaque cookie
2446 * @dev: device that is registering this clock
2447 * @hw: link to hardware-specific clock data
2448 *
2449 * clk_register is the primary interface for populating the clock tree with new
2450 * clock nodes.  It returns a pointer to the newly allocated struct clk which
2451 * cannot be dereferenced by driver code but may be used in conjunction with the
2452 * rest of the clock API.  In the event of an error clk_register will return an
2453 * error code; drivers must test for an error code after calling clk_register.
2454 */
2455struct clk *clk_register(struct device *dev, struct clk_hw *hw)
2456{
2457        int i, ret;
2458        struct clk_core *core;
2459
2460        core = kzalloc(sizeof(*core), GFP_KERNEL);
2461        if (!core) {
2462                ret = -ENOMEM;
2463                goto fail_out;
2464        }
2465
2466        core->name = kstrdup_const(hw->init->name, GFP_KERNEL);
2467        if (!core->name) {
2468                ret = -ENOMEM;
2469                goto fail_name;
2470        }
2471        core->ops = hw->init->ops;
2472        if (dev && dev->driver)
2473                core->owner = dev->driver->owner;
2474        core->hw = hw;
2475        core->flags = hw->init->flags;
2476        core->num_parents = hw->init->num_parents;
2477        core->min_rate = 0;
2478        core->max_rate = ULONG_MAX;
2479        hw->core = core;
2480
2481        /* allocate local copy in case parent_names is __initdata */
2482        core->parent_names = kcalloc(core->num_parents, sizeof(char *),
2483                                        GFP_KERNEL);
2484
2485        if (!core->parent_names) {
2486                ret = -ENOMEM;
2487                goto fail_parent_names;
2488        }
2489
2490
2491        /* copy each string name in case parent_names is __initdata */
2492        for (i = 0; i < core->num_parents; i++) {
2493                core->parent_names[i] = kstrdup_const(hw->init->parent_names[i],
2494                                                GFP_KERNEL);
2495                if (!core->parent_names[i]) {
2496                        ret = -ENOMEM;
2497                        goto fail_parent_names_copy;
2498                }
2499        }
2500
2501        /* avoid unnecessary string look-ups of clk_core's possible parents. */
2502        core->parents = kcalloc(core->num_parents, sizeof(*core->parents),
2503                                GFP_KERNEL);
2504        if (!core->parents) {
2505                ret = -ENOMEM;
2506                goto fail_parents;
2507        };
2508
2509        INIT_HLIST_HEAD(&core->clks);
2510
2511        hw->clk = __clk_create_clk(hw, NULL, NULL);
2512        if (IS_ERR(hw->clk)) {
2513                ret = PTR_ERR(hw->clk);
2514                goto fail_parents;
2515        }
2516
2517        ret = __clk_core_init(core);
2518        if (!ret)
2519                return hw->clk;
2520
2521        __clk_free_clk(hw->clk);
2522        hw->clk = NULL;
2523
2524fail_parents:
2525        kfree(core->parents);
2526fail_parent_names_copy:
2527        while (--i >= 0)
2528                kfree_const(core->parent_names[i]);
2529        kfree(core->parent_names);
2530fail_parent_names:
2531        kfree_const(core->name);
2532fail_name:
2533        kfree(core);
2534fail_out:
2535        return ERR_PTR(ret);
2536}
2537EXPORT_SYMBOL_GPL(clk_register);
2538
2539/* Free memory allocated for a clock. */
2540static void __clk_release(struct kref *ref)
2541{
2542        struct clk_core *core = container_of(ref, struct clk_core, ref);
2543        int i = core->num_parents;
2544
2545        lockdep_assert_held(&prepare_lock);
2546
2547        kfree(core->parents);
2548        while (--i >= 0)
2549                kfree_const(core->parent_names[i]);
2550
2551        kfree(core->parent_names);
2552        kfree_const(core->name);
2553        kfree(core);
2554}
2555
2556/*
2557 * Empty clk_ops for unregistered clocks. These are used temporarily
2558 * after clk_unregister() was called on a clock and until last clock
2559 * consumer calls clk_put() and the struct clk object is freed.
2560 */
2561static int clk_nodrv_prepare_enable(struct clk_hw *hw)
2562{
2563        return -ENXIO;
2564}
2565
2566static void clk_nodrv_disable_unprepare(struct clk_hw *hw)
2567{
2568        WARN_ON_ONCE(1);
2569}
2570
2571static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate,
2572                                        unsigned long parent_rate)
2573{
2574        return -ENXIO;
2575}
2576
2577static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
2578{
2579        return -ENXIO;
2580}
2581
2582static const struct clk_ops clk_nodrv_ops = {
2583        .enable         = clk_nodrv_prepare_enable,
2584        .disable        = clk_nodrv_disable_unprepare,
2585        .prepare        = clk_nodrv_prepare_enable,
2586        .unprepare      = clk_nodrv_disable_unprepare,
2587        .set_rate       = clk_nodrv_set_rate,
2588        .set_parent     = clk_nodrv_set_parent,
2589};
2590
2591/**
2592 * clk_unregister - unregister a currently registered clock
2593 * @clk: clock to unregister
2594 */
2595void clk_unregister(struct clk *clk)
2596{
2597        unsigned long flags;
2598
2599        if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
2600                return;
2601
2602        clk_debug_unregister(clk->core);
2603
2604        clk_prepare_lock();
2605
2606        if (clk->core->ops == &clk_nodrv_ops) {
2607                pr_err("%s: unregistered clock: %s\n", __func__,
2608                       clk->core->name);
2609                goto unlock;
2610        }
2611        /*
2612         * Assign empty clock ops for consumers that might still hold
2613         * a reference to this clock.
2614         */
2615        flags = clk_enable_lock();
2616        clk->core->ops = &clk_nodrv_ops;
2617        clk_enable_unlock(flags);
2618
2619        if (!hlist_empty(&clk->core->children)) {
2620                struct clk_core *child;
2621                struct hlist_node *t;
2622
2623                /* Reparent all children to the orphan list. */
2624                hlist_for_each_entry_safe(child, t, &clk->core->children,
2625                                          child_node)
2626                        clk_core_set_parent(child, NULL);
2627        }
2628
2629        hlist_del_init(&clk->core->child_node);
2630
2631        if (clk->core->prepare_count)
2632                pr_warn("%s: unregistering prepared clock: %s\n",
2633                                        __func__, clk->core->name);
2634        kref_put(&clk->core->ref, __clk_release);
2635unlock:
2636        clk_prepare_unlock();
2637}
2638EXPORT_SYMBOL_GPL(clk_unregister);
2639
2640static void devm_clk_release(struct device *dev, void *res)
2641{
2642        clk_unregister(*(struct clk **)res);
2643}
2644
2645/**
2646 * devm_clk_register - resource managed clk_register()
2647 * @dev: device that is registering this clock
2648 * @hw: link to hardware-specific clock data
2649 *
2650 * Managed clk_register(). Clocks returned from this function are
2651 * automatically clk_unregister()ed on driver detach. See clk_register() for
2652 * more information.
2653 */
2654struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
2655{
2656        struct clk *clk;
2657        struct clk **clkp;
2658
2659        clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
2660        if (!clkp)
2661                return ERR_PTR(-ENOMEM);
2662
2663        clk = clk_register(dev, hw);
2664        if (!IS_ERR(clk)) {
2665                *clkp = clk;
2666                devres_add(dev, clkp);
2667        } else {
2668                devres_free(clkp);
2669        }
2670
2671        return clk;
2672}
2673EXPORT_SYMBOL_GPL(devm_clk_register);
2674
2675static int devm_clk_match(struct device *dev, void *res, void *data)
2676{
2677        struct clk *c = res;
2678        if (WARN_ON(!c))
2679                return 0;
2680        return c == data;
2681}
2682
2683/**
2684 * devm_clk_unregister - resource managed clk_unregister()
2685 * @clk: clock to unregister
2686 *
2687 * Deallocate a clock allocated with devm_clk_register(). Normally
2688 * this function will not need to be called and the resource management
2689 * code will ensure that the resource is freed.
2690 */
2691void devm_clk_unregister(struct device *dev, struct clk *clk)
2692{
2693        WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
2694}
2695EXPORT_SYMBOL_GPL(devm_clk_unregister);
2696
2697/*
2698 * clkdev helpers
2699 */
2700int __clk_get(struct clk *clk)
2701{
2702        struct clk_core *core = !clk ? NULL : clk->core;
2703
2704        if (core) {
2705                if (!try_module_get(core->owner))
2706                        return 0;
2707
2708                kref_get(&core->ref);
2709        }
2710        return 1;
2711}
2712
2713void __clk_put(struct clk *clk)
2714{
2715        struct module *owner;
2716
2717        if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
2718                return;
2719
2720        clk_prepare_lock();
2721
2722        hlist_del(&clk->clks_node);
2723        if (clk->min_rate > clk->core->req_rate ||
2724            clk->max_rate < clk->core->req_rate)
2725                clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
2726
2727        owner = clk->core->owner;
2728        kref_put(&clk->core->ref, __clk_release);
2729
2730        clk_prepare_unlock();
2731
2732        module_put(owner);
2733
2734        kfree(clk);
2735}
2736
2737/***        clk rate change notifiers        ***/
2738
2739/**
2740 * clk_notifier_register - add a clk rate change notifier
2741 * @clk: struct clk * to watch
2742 * @nb: struct notifier_block * with callback info
2743 *
2744 * Request notification when clk's rate changes.  This uses an SRCU
2745 * notifier because we want it to block and notifier unregistrations are
2746 * uncommon.  The callbacks associated with the notifier must not
2747 * re-enter into the clk framework by calling any top-level clk APIs;
2748 * this will cause a nested prepare_lock mutex.
2749 *
2750 * In all notification cases (pre, post and abort rate change) the original
2751 * clock rate is passed to the callback via struct clk_notifier_data.old_rate
2752 * and the new frequency is passed via struct clk_notifier_data.new_rate.
2753 *
2754 * clk_notifier_register() must be called from non-atomic context.
2755 * Returns -EINVAL if called with null arguments, -ENOMEM upon
2756 * allocation failure; otherwise, passes along the return value of
2757 * srcu_notifier_chain_register().
2758 */
2759int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
2760{
2761        struct clk_notifier *cn;
2762        int ret = -ENOMEM;
2763
2764        if (!clk || !nb)
2765                return -EINVAL;
2766
2767        clk_prepare_lock();
2768
2769        /* search the list of notifiers for this clk */
2770        list_for_each_entry(cn, &clk_notifier_list, node)
2771                if (cn->clk == clk)
2772                        break;
2773
2774        /* if clk wasn't in the notifier list, allocate new clk_notifier */
2775        if (cn->clk != clk) {
2776                cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL);
2777                if (!cn)
2778                        goto out;
2779
2780                cn->clk = clk;
2781                srcu_init_notifier_head(&cn->notifier_head);
2782
2783                list_add(&cn->node, &clk_notifier_list);
2784        }
2785
2786        ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
2787
2788        clk->core->notifier_count++;
2789
2790out:
2791        clk_prepare_unlock();
2792
2793        return ret;
2794}
2795EXPORT_SYMBOL_GPL(clk_notifier_register);
2796
2797/**
2798 * clk_notifier_unregister - remove a clk rate change notifier
2799 * @clk: struct clk *
2800 * @nb: struct notifier_block * with callback info
2801 *
2802 * Request no further notification for changes to 'clk' and frees memory
2803 * allocated in clk_notifier_register.
2804 *
2805 * Returns -EINVAL if called with null arguments; otherwise, passes
2806 * along the return value of srcu_notifier_chain_unregister().
2807 */
2808int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
2809{
2810        struct clk_notifier *cn = NULL;
2811        int ret = -EINVAL;
2812
2813        if (!clk || !nb)
2814                return -EINVAL;
2815
2816        clk_prepare_lock();
2817
2818        list_for_each_entry(cn, &clk_notifier_list, node)
2819                if (cn->clk == clk)
2820                        break;
2821
2822        if (cn->clk == clk) {
2823                ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
2824
2825                clk->core->notifier_count--;
2826
2827                /* XXX the notifier code should handle this better */
2828                if (!cn->notifier_head.head) {
2829                        srcu_cleanup_notifier_head(&cn->notifier_head);
2830                        list_del(&cn->node);
2831                        kfree(cn);
2832                }
2833
2834        } else {
2835                ret = -ENOENT;
2836        }
2837
2838        clk_prepare_unlock();
2839
2840        return ret;
2841}
2842EXPORT_SYMBOL_GPL(clk_notifier_unregister);
2843
2844#ifdef CONFIG_OF
2845/**
2846 * struct of_clk_provider - Clock provider registration structure
2847 * @link: Entry in global list of clock providers
2848 * @node: Pointer to device tree node of clock provider
2849 * @get: Get clock callback.  Returns NULL or a struct clk for the
2850 *       given clock specifier
2851 * @data: context pointer to be passed into @get callback
2852 */
2853struct of_clk_provider {
2854        struct list_head link;
2855
2856        struct device_node *node;
2857        struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
2858        void *data;
2859};
2860
2861static const struct of_device_id __clk_of_table_sentinel
2862        __used __section(__clk_of_table_end);
2863
2864static LIST_HEAD(of_clk_providers);
2865static DEFINE_MUTEX(of_clk_mutex);
2866
2867struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
2868                                     void *data)
2869{
2870        return data;
2871}
2872EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
2873
2874struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
2875{
2876        struct clk_onecell_data *clk_data = data;
2877        unsigned int idx = clkspec->args[0];
2878
2879        if (idx >= clk_data->clk_num) {
2880                pr_err("%s: invalid clock index %u\n", __func__, idx);
2881                return ERR_PTR(-EINVAL);
2882        }
2883
2884        return clk_data->clks[idx];
2885}
2886EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
2887
2888/**
2889 * of_clk_add_provider() - Register a clock provider for a node
2890 * @np: Device node pointer associated with clock provider
2891 * @clk_src_get: callback for decoding clock
2892 * @data: context pointer for @clk_src_get callback.
2893 */
2894int of_clk_add_provider(struct device_node *np,
2895                        struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
2896                                                   void *data),
2897                        void *data)
2898{
2899        struct of_clk_provider *cp;
2900        int ret;
2901
2902        cp = kzalloc(sizeof(struct of_clk_provider), GFP_KERNEL);
2903        if (!cp)
2904                return -ENOMEM;
2905
2906        cp->node = of_node_get(np);
2907        cp->data = data;
2908        cp->get = clk_src_get;
2909
2910        mutex_lock(&of_clk_mutex);
2911        list_add(&cp->link, &of_clk_providers);
2912        mutex_unlock(&of_clk_mutex);
2913        pr_debug("Added clock from %s\n", np->full_name);
2914
2915        ret = of_clk_set_defaults(np, true);
2916        if (ret < 0)
2917                of_clk_del_provider(np);
2918
2919        return ret;
2920}
2921EXPORT_SYMBOL_GPL(of_clk_add_provider);
2922
2923/**
2924 * of_clk_del_provider() - Remove a previously registered clock provider
2925 * @np: Device node pointer associated with clock provider
2926 */
2927void of_clk_del_provider(struct device_node *np)
2928{
2929        struct of_clk_provider *cp;
2930
2931        mutex_lock(&of_clk_mutex);
2932        list_for_each_entry(cp, &of_clk_providers, link) {
2933                if (cp->node == np) {
2934                        list_del(&cp->link);
2935                        of_node_put(cp->node);
2936                        kfree(cp);
2937                        break;
2938                }
2939        }
2940        mutex_unlock(&of_clk_mutex);
2941}
2942EXPORT_SYMBOL_GPL(of_clk_del_provider);
2943
2944struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
2945                                       const char *dev_id, const char *con_id)
2946{
2947        struct of_clk_provider *provider;
2948        struct clk *clk = ERR_PTR(-EPROBE_DEFER);
2949
2950        if (!clkspec)
2951                return ERR_PTR(-EINVAL);
2952
2953        /* Check if we have such a provider in our array */
2954        mutex_lock(&of_clk_mutex);
2955        list_for_each_entry(provider, &of_clk_providers, link) {
2956                if (provider->node == clkspec->np)
2957                        clk = provider->get(clkspec, provider->data);
2958                if (!IS_ERR(clk)) {
2959                        clk = __clk_create_clk(__clk_get_hw(clk), dev_id,
2960                                               con_id);
2961
2962                        if (!IS_ERR(clk) && !__clk_get(clk)) {
2963                                __clk_free_clk(clk);
2964                                clk = ERR_PTR(-ENOENT);
2965                        }
2966
2967                        break;
2968                }
2969        }
2970        mutex_unlock(&of_clk_mutex);
2971
2972        return clk;
2973}
2974
2975/**
2976 * of_clk_get_from_provider() - Lookup a clock from a clock provider
2977 * @clkspec: pointer to a clock specifier data structure
2978 *
2979 * This function looks up a struct clk from the registered list of clock
2980 * providers, an input is a clock specifier data structure as returned
2981 * from the of_parse_phandle_with_args() function call.
2982 */
2983struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
2984{
2985        return __of_clk_get_from_provider(clkspec, NULL, __func__);
2986}
2987EXPORT_SYMBOL_GPL(of_clk_get_from_provider);
2988
2989/**
2990 * of_clk_get_parent_count() - Count the number of clocks a device node has
2991 * @np: device node to count
2992 *
2993 * Returns: The number of clocks that are possible parents of this node
2994 */
2995unsigned int of_clk_get_parent_count(struct device_node *np)
2996{
2997        int count;
2998
2999        count = of_count_phandle_with_args(np, "clocks", "#clock-cells");
3000        if (count < 0)
3001                return 0;
3002
3003        return count;
3004}
3005EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
3006
3007const char *of_clk_get_parent_name(struct device_node *np, int index)
3008{
3009        struct of_phandle_args clkspec;
3010        struct property *prop;
3011        const char *clk_name;
3012        const __be32 *vp;
3013        u32 pv;
3014        int rc;
3015        int count;
3016        struct clk *clk;
3017
3018        rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
3019                                        &clkspec);
3020        if (rc)
3021                return NULL;
3022
3023        index = clkspec.args_count ? clkspec.args[0] : 0;
3024        count = 0;
3025
3026        /* if there is an indices property, use it to transfer the index
3027         * specified into an array offset for the clock-output-names property.
3028         */
3029        of_property_for_each_u32(clkspec.np, "clock-indices", prop, vp, pv) {
3030                if (index == pv) {
3031                        index = count;
3032                        break;
3033                }
3034                count++;
3035        }
3036        /* We went off the end of 'clock-indices' without finding it */
3037        if (prop && !vp)
3038                return NULL;
3039
3040        if (of_property_read_string_index(clkspec.np, "clock-output-names",
3041                                          index,
3042                                          &clk_name) < 0) {
3043                /*
3044                 * Best effort to get the name if the clock has been
3045                 * registered with the framework. If the clock isn't
3046                 * registered, we return the node name as the name of
3047                 * the clock as long as #clock-cells = 0.
3048                 */
3049                clk = of_clk_get_from_provider(&clkspec);
3050                if (IS_ERR(clk)) {
3051                        if (clkspec.args_count == 0)
3052                                clk_name = clkspec.np->name;
3053                        else
3054                                clk_name = NULL;
3055                } else {
3056                        clk_name = __clk_get_name(clk);
3057                        clk_put(clk);
3058                }
3059        }
3060
3061
3062        of_node_put(clkspec.np);
3063        return clk_name;
3064}
3065EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
3066
3067/**
3068 * of_clk_parent_fill() - Fill @parents with names of @np's parents and return
3069 * number of parents
3070 * @np: Device node pointer associated with clock provider
3071 * @parents: pointer to char array that hold the parents' names
3072 * @size: size of the @parents array
3073 *
3074 * Return: number of parents for the clock node.
3075 */
3076int of_clk_parent_fill(struct device_node *np, const char **parents,
3077                       unsigned int size)
3078{
3079        unsigned int i = 0;
3080
3081        while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL)
3082                i++;
3083
3084        return i;
3085}
3086EXPORT_SYMBOL_GPL(of_clk_parent_fill);
3087
3088struct clock_provider {
3089        of_clk_init_cb_t clk_init_cb;
3090        struct device_node *np;
3091        struct list_head node;
3092};
3093
3094/*
3095 * This function looks for a parent clock. If there is one, then it
3096 * checks that the provider for this parent clock was initialized, in
3097 * this case the parent clock will be ready.
3098 */
3099static int parent_ready(struct device_node *np)
3100{
3101        int i = 0;
3102
3103        while (true) {
3104                struct clk *clk = of_clk_get(np, i);
3105
3106                /* this parent is ready we can check the next one */
3107                if (!IS_ERR(clk)) {
3108                        clk_put(clk);
3109                        i++;
3110                        continue;
3111                }
3112
3113                /* at least one parent is not ready, we exit now */
3114                if (PTR_ERR(clk) == -EPROBE_DEFER)
3115                        return 0;
3116
3117                /*
3118                 * Here we make assumption that the device tree is
3119                 * written correctly. So an error means that there is
3120                 * no more parent. As we didn't exit yet, then the
3121                 * previous parent are ready. If there is no clock
3122                 * parent, no need to wait for them, then we can
3123                 * consider their absence as being ready
3124                 */
3125                return 1;
3126        }
3127}
3128
3129/**
3130 * of_clk_init() - Scan and init clock providers from the DT
3131 * @matches: array of compatible values and init functions for providers.
3132 *
3133 * This function scans the device tree for matching clock providers
3134 * and calls their initialization functions. It also does it by trying
3135 * to follow the dependencies.
3136 */
3137void __init of_clk_init(const struct of_device_id *matches)
3138{
3139        const struct of_device_id *match;
3140        struct device_node *np;
3141        struct clock_provider *clk_provider, *next;
3142        bool is_init_done;
3143        bool force = false;
3144        LIST_HEAD(clk_provider_list);
3145
3146        if (!matches)
3147                matches = &__clk_of_table;
3148
3149        /* First prepare the list of the clocks providers */
3150        for_each_matching_node_and_match(np, matches, &match) {
3151                struct clock_provider *parent;
3152
3153                if (!of_device_is_available(np))
3154                        continue;
3155
3156                parent = kzalloc(sizeof(*parent), GFP_KERNEL);
3157                if (!parent) {
3158                        list_for_each_entry_safe(clk_provider, next,
3159                                                 &clk_provider_list, node) {
3160                                list_del(&clk_provider->node);
3161                                of_node_put(clk_provider->np);
3162                                kfree(clk_provider);
3163                        }
3164                        of_node_put(np);
3165                        return;
3166                }
3167
3168                parent->clk_init_cb = match->data;
3169                parent->np = of_node_get(np);
3170                list_add_tail(&parent->node, &clk_provider_list);
3171        }
3172
3173        while (!list_empty(&clk_provider_list)) {
3174                is_init_done = false;
3175                list_for_each_entry_safe(clk_provider, next,
3176                                        &clk_provider_list, node) {
3177                        if (force || parent_ready(clk_provider->np)) {
3178
3179                                clk_provider->clk_init_cb(clk_provider->np);
3180                                of_clk_set_defaults(clk_provider->np, true);
3181
3182                                list_del(&clk_provider->node);
3183                                of_node_put(clk_provider->np);
3184                                kfree(clk_provider);
3185                                is_init_done = true;
3186                        }
3187                }
3188
3189                /*
3190                 * We didn't manage to initialize any of the
3191                 * remaining providers during the last loop, so now we
3192                 * initialize all the remaining ones unconditionally
3193                 * in case the clock parent was not mandatory
3194                 */
3195                if (!is_init_done)
3196                        force = true;
3197        }
3198}
3199#endif
3200