linux/drivers/sh/clk/core.c
<<
>>
Prefs
   1/*
   2 * SuperH clock framework
   3 *
   4 *  Copyright (C) 2005 - 2010  Paul Mundt
   5 *
   6 * This clock framework is derived from the OMAP version by:
   7 *
   8 *      Copyright (C) 2004 - 2008 Nokia Corporation
   9 *      Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
  10 *
  11 *  Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
  12 *
  13 * This file is subject to the terms and conditions of the GNU General Public
  14 * License.  See the file "COPYING" in the main directory of this archive
  15 * for more details.
  16 */
  17#define pr_fmt(fmt) "clock: " fmt
  18
  19#include <linux/kernel.h>
  20#include <linux/init.h>
  21#include <linux/module.h>
  22#include <linux/mutex.h>
  23#include <linux/list.h>
  24#include <linux/syscore_ops.h>
  25#include <linux/seq_file.h>
  26#include <linux/err.h>
  27#include <linux/io.h>
  28#include <linux/cpufreq.h>
  29#include <linux/clk.h>
  30#include <linux/sh_clk.h>
  31
  32static LIST_HEAD(clock_list);
  33static DEFINE_SPINLOCK(clock_lock);
  34static DEFINE_MUTEX(clock_list_sem);
  35
  36/* clock disable operations are not passed on to hardware during boot */
  37static int allow_disable;
  38
  39void clk_rate_table_build(struct clk *clk,
  40                          struct cpufreq_frequency_table *freq_table,
  41                          int nr_freqs,
  42                          struct clk_div_mult_table *src_table,
  43                          unsigned long *bitmap)
  44{
  45        unsigned long mult, div;
  46        unsigned long freq;
  47        int i;
  48
  49        clk->nr_freqs = nr_freqs;
  50
  51        for (i = 0; i < nr_freqs; i++) {
  52                div = 1;
  53                mult = 1;
  54
  55                if (src_table->divisors && i < src_table->nr_divisors)
  56                        div = src_table->divisors[i];
  57
  58                if (src_table->multipliers && i < src_table->nr_multipliers)
  59                        mult = src_table->multipliers[i];
  60
  61                if (!div || !mult || (bitmap && !test_bit(i, bitmap)))
  62                        freq = CPUFREQ_ENTRY_INVALID;
  63                else
  64                        freq = clk->parent->rate * mult / div;
  65
  66                freq_table[i].driver_data = i;
  67                freq_table[i].frequency = freq;
  68        }
  69
  70        /* Termination entry */
  71        freq_table[i].driver_data = i;
  72        freq_table[i].frequency = CPUFREQ_TABLE_END;
  73}
  74
  75struct clk_rate_round_data;
  76
  77struct clk_rate_round_data {
  78        unsigned long rate;
  79        unsigned int min, max;
  80        long (*func)(unsigned int, struct clk_rate_round_data *);
  81        void *arg;
  82};
  83
  84#define for_each_frequency(pos, r, freq)                        \
  85        for (pos = r->min, freq = r->func(pos, r);              \
  86             pos <= r->max; pos++, freq = r->func(pos, r))      \
  87                if (unlikely(freq == 0))                        \
  88                        ;                                       \
  89                else
  90
  91static long clk_rate_round_helper(struct clk_rate_round_data *rounder)
  92{
  93        unsigned long rate_error, rate_error_prev = ~0UL;
  94        unsigned long highest, lowest, freq;
  95        long rate_best_fit = -ENOENT;
  96        int i;
  97
  98        highest = 0;
  99        lowest = ~0UL;
 100
 101        for_each_frequency(i, rounder, freq) {
 102                if (freq > highest)
 103                        highest = freq;
 104                if (freq < lowest)
 105                        lowest = freq;
 106
 107                rate_error = abs(freq - rounder->rate);
 108                if (rate_error < rate_error_prev) {
 109                        rate_best_fit = freq;
 110                        rate_error_prev = rate_error;
 111                }
 112
 113                if (rate_error == 0)
 114                        break;
 115        }
 116
 117        if (rounder->rate >= highest)
 118                rate_best_fit = highest;
 119        if (rounder->rate <= lowest)
 120                rate_best_fit = lowest;
 121
 122        return rate_best_fit;
 123}
 124
 125static long clk_rate_table_iter(unsigned int pos,
 126                                struct clk_rate_round_data *rounder)
 127{
 128        struct cpufreq_frequency_table *freq_table = rounder->arg;
 129        unsigned long freq = freq_table[pos].frequency;
 130
 131        if (freq == CPUFREQ_ENTRY_INVALID)
 132                freq = 0;
 133
 134        return freq;
 135}
 136
 137long clk_rate_table_round(struct clk *clk,
 138                          struct cpufreq_frequency_table *freq_table,
 139                          unsigned long rate)
 140{
 141        struct clk_rate_round_data table_round = {
 142                .min    = 0,
 143                .max    = clk->nr_freqs - 1,
 144                .func   = clk_rate_table_iter,
 145                .arg    = freq_table,
 146                .rate   = rate,
 147        };
 148
 149        if (clk->nr_freqs < 1)
 150                return -ENOSYS;
 151
 152        return clk_rate_round_helper(&table_round);
 153}
 154
 155static long clk_rate_div_range_iter(unsigned int pos,
 156                                    struct clk_rate_round_data *rounder)
 157{
 158        return clk_get_rate(rounder->arg) / pos;
 159}
 160
 161long clk_rate_div_range_round(struct clk *clk, unsigned int div_min,
 162                              unsigned int div_max, unsigned long rate)
 163{
 164        struct clk_rate_round_data div_range_round = {
 165                .min    = div_min,
 166                .max    = div_max,
 167                .func   = clk_rate_div_range_iter,
 168                .arg    = clk_get_parent(clk),
 169                .rate   = rate,
 170        };
 171
 172        return clk_rate_round_helper(&div_range_round);
 173}
 174
 175static long clk_rate_mult_range_iter(unsigned int pos,
 176                                      struct clk_rate_round_data *rounder)
 177{
 178        return clk_get_rate(rounder->arg) * pos;
 179}
 180
 181long clk_rate_mult_range_round(struct clk *clk, unsigned int mult_min,
 182                               unsigned int mult_max, unsigned long rate)
 183{
 184        struct clk_rate_round_data mult_range_round = {
 185                .min    = mult_min,
 186                .max    = mult_max,
 187                .func   = clk_rate_mult_range_iter,
 188                .arg    = clk_get_parent(clk),
 189                .rate   = rate,
 190        };
 191
 192        return clk_rate_round_helper(&mult_range_round);
 193}
 194
 195int clk_rate_table_find(struct clk *clk,
 196                        struct cpufreq_frequency_table *freq_table,
 197                        unsigned long rate)
 198{
 199        struct cpufreq_frequency_table *pos;
 200
 201        cpufreq_for_each_valid_entry(pos, freq_table)
 202                if (pos->frequency == rate)
 203                        return pos - freq_table;
 204
 205        return -ENOENT;
 206}
 207
 208/* Used for clocks that always have same value as the parent clock */
 209unsigned long followparent_recalc(struct clk *clk)
 210{
 211        return clk->parent ? clk->parent->rate : 0;
 212}
 213
 214int clk_reparent(struct clk *child, struct clk *parent)
 215{
 216        list_del_init(&child->sibling);
 217        if (parent)
 218                list_add(&child->sibling, &parent->children);
 219        child->parent = parent;
 220
 221        return 0;
 222}
 223
 224/* Propagate rate to children */
 225void propagate_rate(struct clk *tclk)
 226{
 227        struct clk *clkp;
 228
 229        list_for_each_entry(clkp, &tclk->children, sibling) {
 230                if (clkp->ops && clkp->ops->recalc)
 231                        clkp->rate = clkp->ops->recalc(clkp);
 232
 233                propagate_rate(clkp);
 234        }
 235}
 236
 237static void __clk_disable(struct clk *clk)
 238{
 239        if (WARN(!clk->usecount, "Trying to disable clock %p with 0 usecount\n",
 240                 clk))
 241                return;
 242
 243        if (!(--clk->usecount)) {
 244                if (likely(allow_disable && clk->ops && clk->ops->disable))
 245                        clk->ops->disable(clk);
 246                if (likely(clk->parent))
 247                        __clk_disable(clk->parent);
 248        }
 249}
 250
 251void clk_disable(struct clk *clk)
 252{
 253        unsigned long flags;
 254
 255        if (!clk)
 256                return;
 257
 258        spin_lock_irqsave(&clock_lock, flags);
 259        __clk_disable(clk);
 260        spin_unlock_irqrestore(&clock_lock, flags);
 261}
 262EXPORT_SYMBOL_GPL(clk_disable);
 263
 264static int __clk_enable(struct clk *clk)
 265{
 266        int ret = 0;
 267
 268        if (clk->usecount++ == 0) {
 269                if (clk->parent) {
 270                        ret = __clk_enable(clk->parent);
 271                        if (unlikely(ret))
 272                                goto err;
 273                }
 274
 275                if (clk->ops && clk->ops->enable) {
 276                        ret = clk->ops->enable(clk);
 277                        if (ret) {
 278                                if (clk->parent)
 279                                        __clk_disable(clk->parent);
 280                                goto err;
 281                        }
 282                }
 283        }
 284
 285        return ret;
 286err:
 287        clk->usecount--;
 288        return ret;
 289}
 290
 291int clk_enable(struct clk *clk)
 292{
 293        unsigned long flags;
 294        int ret;
 295
 296        if (!clk)
 297                return -EINVAL;
 298
 299        spin_lock_irqsave(&clock_lock, flags);
 300        ret = __clk_enable(clk);
 301        spin_unlock_irqrestore(&clock_lock, flags);
 302
 303        return ret;
 304}
 305EXPORT_SYMBOL_GPL(clk_enable);
 306
 307static LIST_HEAD(root_clks);
 308
 309/**
 310 * recalculate_root_clocks - recalculate and propagate all root clocks
 311 *
 312 * Recalculates all root clocks (clocks with no parent), which if the
 313 * clock's .recalc is set correctly, should also propagate their rates.
 314 * Called at init.
 315 */
 316void recalculate_root_clocks(void)
 317{
 318        struct clk *clkp;
 319
 320        list_for_each_entry(clkp, &root_clks, sibling) {
 321                if (clkp->ops && clkp->ops->recalc)
 322                        clkp->rate = clkp->ops->recalc(clkp);
 323                propagate_rate(clkp);
 324        }
 325}
 326
 327static struct clk_mapping dummy_mapping;
 328
 329static struct clk *lookup_root_clock(struct clk *clk)
 330{
 331        while (clk->parent)
 332                clk = clk->parent;
 333
 334        return clk;
 335}
 336
 337static int clk_establish_mapping(struct clk *clk)
 338{
 339        struct clk_mapping *mapping = clk->mapping;
 340
 341        /*
 342         * Propagate mappings.
 343         */
 344        if (!mapping) {
 345                struct clk *clkp;
 346
 347                /*
 348                 * dummy mapping for root clocks with no specified ranges
 349                 */
 350                if (!clk->parent) {
 351                        clk->mapping = &dummy_mapping;
 352                        goto out;
 353                }
 354
 355                /*
 356                 * If we're on a child clock and it provides no mapping of its
 357                 * own, inherit the mapping from its root clock.
 358                 */
 359                clkp = lookup_root_clock(clk);
 360                mapping = clkp->mapping;
 361                BUG_ON(!mapping);
 362        }
 363
 364        /*
 365         * Establish initial mapping.
 366         */
 367        if (!mapping->base && mapping->phys) {
 368                kref_init(&mapping->ref);
 369
 370                mapping->base = ioremap_nocache(mapping->phys, mapping->len);
 371                if (unlikely(!mapping->base))
 372                        return -ENXIO;
 373        } else if (mapping->base) {
 374                /*
 375                 * Bump the refcount for an existing mapping
 376                 */
 377                kref_get(&mapping->ref);
 378        }
 379
 380        clk->mapping = mapping;
 381out:
 382        clk->mapped_reg = clk->mapping->base;
 383        clk->mapped_reg += (phys_addr_t)clk->enable_reg - clk->mapping->phys;
 384        return 0;
 385}
 386
 387static void clk_destroy_mapping(struct kref *kref)
 388{
 389        struct clk_mapping *mapping;
 390
 391        mapping = container_of(kref, struct clk_mapping, ref);
 392
 393        iounmap(mapping->base);
 394}
 395
 396static void clk_teardown_mapping(struct clk *clk)
 397{
 398        struct clk_mapping *mapping = clk->mapping;
 399
 400        /* Nothing to do */
 401        if (mapping == &dummy_mapping)
 402                goto out;
 403
 404        kref_put(&mapping->ref, clk_destroy_mapping);
 405        clk->mapping = NULL;
 406out:
 407        clk->mapped_reg = NULL;
 408}
 409
 410int clk_register(struct clk *clk)
 411{
 412        int ret;
 413
 414        if (IS_ERR_OR_NULL(clk))
 415                return -EINVAL;
 416
 417        /*
 418         * trap out already registered clocks
 419         */
 420        if (clk->node.next || clk->node.prev)
 421                return 0;
 422
 423        mutex_lock(&clock_list_sem);
 424
 425        INIT_LIST_HEAD(&clk->children);
 426        clk->usecount = 0;
 427
 428        ret = clk_establish_mapping(clk);
 429        if (unlikely(ret))
 430                goto out_unlock;
 431
 432        if (clk->parent)
 433                list_add(&clk->sibling, &clk->parent->children);
 434        else
 435                list_add(&clk->sibling, &root_clks);
 436
 437        list_add(&clk->node, &clock_list);
 438
 439#ifdef CONFIG_SH_CLK_CPG_LEGACY
 440        if (clk->ops && clk->ops->init)
 441                clk->ops->init(clk);
 442#endif
 443
 444out_unlock:
 445        mutex_unlock(&clock_list_sem);
 446
 447        return ret;
 448}
 449EXPORT_SYMBOL_GPL(clk_register);
 450
 451void clk_unregister(struct clk *clk)
 452{
 453        mutex_lock(&clock_list_sem);
 454        list_del(&clk->sibling);
 455        list_del(&clk->node);
 456        clk_teardown_mapping(clk);
 457        mutex_unlock(&clock_list_sem);
 458}
 459EXPORT_SYMBOL_GPL(clk_unregister);
 460
 461void clk_enable_init_clocks(void)
 462{
 463        struct clk *clkp;
 464
 465        list_for_each_entry(clkp, &clock_list, node)
 466                if (clkp->flags & CLK_ENABLE_ON_INIT)
 467                        clk_enable(clkp);
 468}
 469
 470unsigned long clk_get_rate(struct clk *clk)
 471{
 472        if (!clk)
 473                return 0;
 474
 475        return clk->rate;
 476}
 477EXPORT_SYMBOL_GPL(clk_get_rate);
 478
 479int clk_set_rate(struct clk *clk, unsigned long rate)
 480{
 481        int ret = -EOPNOTSUPP;
 482        unsigned long flags;
 483
 484        if (!clk)
 485                return 0;
 486
 487        spin_lock_irqsave(&clock_lock, flags);
 488
 489        if (likely(clk->ops && clk->ops->set_rate)) {
 490                ret = clk->ops->set_rate(clk, rate);
 491                if (ret != 0)
 492                        goto out_unlock;
 493        } else {
 494                clk->rate = rate;
 495                ret = 0;
 496        }
 497
 498        if (clk->ops && clk->ops->recalc)
 499                clk->rate = clk->ops->recalc(clk);
 500
 501        propagate_rate(clk);
 502
 503out_unlock:
 504        spin_unlock_irqrestore(&clock_lock, flags);
 505
 506        return ret;
 507}
 508EXPORT_SYMBOL_GPL(clk_set_rate);
 509
 510int clk_set_parent(struct clk *clk, struct clk *parent)
 511{
 512        unsigned long flags;
 513        int ret = -EINVAL;
 514
 515        if (!parent || !clk)
 516                return ret;
 517        if (clk->parent == parent)
 518                return 0;
 519
 520        spin_lock_irqsave(&clock_lock, flags);
 521        if (clk->usecount == 0) {
 522                if (clk->ops->set_parent)
 523                        ret = clk->ops->set_parent(clk, parent);
 524                else
 525                        ret = clk_reparent(clk, parent);
 526
 527                if (ret == 0) {
 528                        if (clk->ops->recalc)
 529                                clk->rate = clk->ops->recalc(clk);
 530                        pr_debug("set parent of %p to %p (new rate %ld)\n",
 531                                 clk, clk->parent, clk->rate);
 532                        propagate_rate(clk);
 533                }
 534        } else
 535                ret = -EBUSY;
 536        spin_unlock_irqrestore(&clock_lock, flags);
 537
 538        return ret;
 539}
 540EXPORT_SYMBOL_GPL(clk_set_parent);
 541
 542struct clk *clk_get_parent(struct clk *clk)
 543{
 544        if (!clk)
 545                return NULL;
 546
 547        return clk->parent;
 548}
 549EXPORT_SYMBOL_GPL(clk_get_parent);
 550
 551long clk_round_rate(struct clk *clk, unsigned long rate)
 552{
 553        if (!clk)
 554                return 0;
 555
 556        if (likely(clk->ops && clk->ops->round_rate)) {
 557                unsigned long flags, rounded;
 558
 559                spin_lock_irqsave(&clock_lock, flags);
 560                rounded = clk->ops->round_rate(clk, rate);
 561                spin_unlock_irqrestore(&clock_lock, flags);
 562
 563                return rounded;
 564        }
 565
 566        return clk_get_rate(clk);
 567}
 568EXPORT_SYMBOL_GPL(clk_round_rate);
 569
 570#ifdef CONFIG_PM
 571static void clks_core_resume(void)
 572{
 573        struct clk *clkp;
 574
 575        list_for_each_entry(clkp, &clock_list, node) {
 576                if (likely(clkp->usecount && clkp->ops)) {
 577                        unsigned long rate = clkp->rate;
 578
 579                        if (likely(clkp->ops->set_parent))
 580                                clkp->ops->set_parent(clkp,
 581                                        clkp->parent);
 582                        if (likely(clkp->ops->set_rate))
 583                                clkp->ops->set_rate(clkp, rate);
 584                        else if (likely(clkp->ops->recalc))
 585                                clkp->rate = clkp->ops->recalc(clkp);
 586                }
 587        }
 588}
 589
 590static struct syscore_ops clks_syscore_ops = {
 591        .resume = clks_core_resume,
 592};
 593
 594static int __init clk_syscore_init(void)
 595{
 596        register_syscore_ops(&clks_syscore_ops);
 597
 598        return 0;
 599}
 600subsys_initcall(clk_syscore_init);
 601#endif
 602
 603static int __init clk_late_init(void)
 604{
 605        unsigned long flags;
 606        struct clk *clk;
 607
 608        /* disable all clocks with zero use count */
 609        mutex_lock(&clock_list_sem);
 610        spin_lock_irqsave(&clock_lock, flags);
 611
 612        list_for_each_entry(clk, &clock_list, node)
 613                if (!clk->usecount && clk->ops && clk->ops->disable)
 614                        clk->ops->disable(clk);
 615
 616        /* from now on allow clock disable operations */
 617        allow_disable = 1;
 618
 619        spin_unlock_irqrestore(&clock_lock, flags);
 620        mutex_unlock(&clock_list_sem);
 621        return 0;
 622}
 623late_initcall(clk_late_init);
 624