linux/drivers/sh/clk/core.c
<<
>>
Prefs
   1/*
   2 * SuperH clock framework
   3 *
   4 *  Copyright (C) 2005 - 2010  Paul Mundt
   5 *
   6 * This clock framework is derived from the OMAP version by:
   7 *
   8 *      Copyright (C) 2004 - 2008 Nokia Corporation
   9 *      Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
  10 *
  11 *  Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
  12 *
  13 * This file is subject to the terms and conditions of the GNU General Public
  14 * License.  See the file "COPYING" in the main directory of this archive
  15 * for more details.
  16 */
  17#define pr_fmt(fmt) "clock: " fmt
  18
  19#include <linux/kernel.h>
  20#include <linux/init.h>
  21#include <linux/module.h>
  22#include <linux/mutex.h>
  23#include <linux/list.h>
  24#include <linux/syscore_ops.h>
  25#include <linux/seq_file.h>
  26#include <linux/err.h>
  27#include <linux/io.h>
  28#include <linux/cpufreq.h>
  29#include <linux/clk.h>
  30#include <linux/sh_clk.h>
  31
  32static LIST_HEAD(clock_list);
  33static DEFINE_SPINLOCK(clock_lock);
  34static DEFINE_MUTEX(clock_list_sem);
  35
  36/* clock disable operations are not passed on to hardware during boot */
  37static int allow_disable;
  38
  39void clk_rate_table_build(struct clk *clk,
  40                          struct cpufreq_frequency_table *freq_table,
  41                          int nr_freqs,
  42                          struct clk_div_mult_table *src_table,
  43                          unsigned long *bitmap)
  44{
  45        unsigned long mult, div;
  46        unsigned long freq;
  47        int i;
  48
  49        clk->nr_freqs = nr_freqs;
  50
  51        for (i = 0; i < nr_freqs; i++) {
  52                div = 1;
  53                mult = 1;
  54
  55                if (src_table->divisors && i < src_table->nr_divisors)
  56                        div = src_table->divisors[i];
  57
  58                if (src_table->multipliers && i < src_table->nr_multipliers)
  59                        mult = src_table->multipliers[i];
  60
  61                if (!div || !mult || (bitmap && !test_bit(i, bitmap)))
  62                        freq = CPUFREQ_ENTRY_INVALID;
  63                else
  64                        freq = clk->parent->rate * mult / div;
  65
  66                freq_table[i].driver_data = i;
  67                freq_table[i].frequency = freq;
  68        }
  69
  70        /* Termination entry */
  71        freq_table[i].driver_data = i;
  72        freq_table[i].frequency = CPUFREQ_TABLE_END;
  73}
  74
  75struct clk_rate_round_data;
  76
  77struct clk_rate_round_data {
  78        unsigned long rate;
  79        unsigned int min, max;
  80        long (*func)(unsigned int, struct clk_rate_round_data *);
  81        void *arg;
  82};
  83
  84#define for_each_frequency(pos, r, freq)                        \
  85        for (pos = r->min, freq = r->func(pos, r);              \
  86             pos <= r->max; pos++, freq = r->func(pos, r))      \
  87                if (unlikely(freq == 0))                        \
  88                        ;                                       \
  89                else
  90
  91static long clk_rate_round_helper(struct clk_rate_round_data *rounder)
  92{
  93        unsigned long rate_error, rate_error_prev = ~0UL;
  94        unsigned long highest, lowest, freq;
  95        long rate_best_fit = -ENOENT;
  96        int i;
  97
  98        highest = 0;
  99        lowest = ~0UL;
 100
 101        for_each_frequency(i, rounder, freq) {
 102                if (freq > highest)
 103                        highest = freq;
 104                if (freq < lowest)
 105                        lowest = freq;
 106
 107                rate_error = abs(freq - rounder->rate);
 108                if (rate_error < rate_error_prev) {
 109                        rate_best_fit = freq;
 110                        rate_error_prev = rate_error;
 111                }
 112
 113                if (rate_error == 0)
 114                        break;
 115        }
 116
 117        if (rounder->rate >= highest)
 118                rate_best_fit = highest;
 119        if (rounder->rate <= lowest)
 120                rate_best_fit = lowest;
 121
 122        return rate_best_fit;
 123}
 124
 125static long clk_rate_table_iter(unsigned int pos,
 126                                struct clk_rate_round_data *rounder)
 127{
 128        struct cpufreq_frequency_table *freq_table = rounder->arg;
 129        unsigned long freq = freq_table[pos].frequency;
 130
 131        if (freq == CPUFREQ_ENTRY_INVALID)
 132                freq = 0;
 133
 134        return freq;
 135}
 136
 137long clk_rate_table_round(struct clk *clk,
 138                          struct cpufreq_frequency_table *freq_table,
 139                          unsigned long rate)
 140{
 141        struct clk_rate_round_data table_round = {
 142                .min    = 0,
 143                .max    = clk->nr_freqs - 1,
 144                .func   = clk_rate_table_iter,
 145                .arg    = freq_table,
 146                .rate   = rate,
 147        };
 148
 149        if (clk->nr_freqs < 1)
 150                return -ENOSYS;
 151
 152        return clk_rate_round_helper(&table_round);
 153}
 154
 155static long clk_rate_div_range_iter(unsigned int pos,
 156                                    struct clk_rate_round_data *rounder)
 157{
 158        return clk_get_rate(rounder->arg) / pos;
 159}
 160
 161long clk_rate_div_range_round(struct clk *clk, unsigned int div_min,
 162                              unsigned int div_max, unsigned long rate)
 163{
 164        struct clk_rate_round_data div_range_round = {
 165                .min    = div_min,
 166                .max    = div_max,
 167                .func   = clk_rate_div_range_iter,
 168                .arg    = clk_get_parent(clk),
 169                .rate   = rate,
 170        };
 171
 172        return clk_rate_round_helper(&div_range_round);
 173}
 174
 175static long clk_rate_mult_range_iter(unsigned int pos,
 176                                      struct clk_rate_round_data *rounder)
 177{
 178        return clk_get_rate(rounder->arg) * pos;
 179}
 180
 181long clk_rate_mult_range_round(struct clk *clk, unsigned int mult_min,
 182                               unsigned int mult_max, unsigned long rate)
 183{
 184        struct clk_rate_round_data mult_range_round = {
 185                .min    = mult_min,
 186                .max    = mult_max,
 187                .func   = clk_rate_mult_range_iter,
 188                .arg    = clk_get_parent(clk),
 189                .rate   = rate,
 190        };
 191
 192        return clk_rate_round_helper(&mult_range_round);
 193}
 194
 195int clk_rate_table_find(struct clk *clk,
 196                        struct cpufreq_frequency_table *freq_table,
 197                        unsigned long rate)
 198{
 199        struct cpufreq_frequency_table *pos;
 200
 201        cpufreq_for_each_valid_entry(pos, freq_table)
 202                if (pos->frequency == rate)
 203                        return pos - freq_table;
 204
 205        return -ENOENT;
 206}
 207
 208/* Used for clocks that always have same value as the parent clock */
 209unsigned long followparent_recalc(struct clk *clk)
 210{
 211        return clk->parent ? clk->parent->rate : 0;
 212}
 213
 214int clk_reparent(struct clk *child, struct clk *parent)
 215{
 216        list_del_init(&child->sibling);
 217        if (parent)
 218                list_add(&child->sibling, &parent->children);
 219        child->parent = parent;
 220
 221        return 0;
 222}
 223
 224/* Propagate rate to children */
 225void propagate_rate(struct clk *tclk)
 226{
 227        struct clk *clkp;
 228
 229        list_for_each_entry(clkp, &tclk->children, sibling) {
 230                if (clkp->ops && clkp->ops->recalc)
 231                        clkp->rate = clkp->ops->recalc(clkp);
 232
 233                propagate_rate(clkp);
 234        }
 235}
 236
 237static void __clk_disable(struct clk *clk)
 238{
 239        if (WARN(!clk->usecount, "Trying to disable clock %p with 0 usecount\n",
 240                 clk))
 241                return;
 242
 243        if (!(--clk->usecount)) {
 244                if (likely(allow_disable && clk->ops && clk->ops->disable))
 245                        clk->ops->disable(clk);
 246                if (likely(clk->parent))
 247                        __clk_disable(clk->parent);
 248        }
 249}
 250
 251void clk_disable(struct clk *clk)
 252{
 253        unsigned long flags;
 254
 255        if (!clk)
 256                return;
 257
 258        spin_lock_irqsave(&clock_lock, flags);
 259        __clk_disable(clk);
 260        spin_unlock_irqrestore(&clock_lock, flags);
 261}
 262EXPORT_SYMBOL_GPL(clk_disable);
 263
 264static int __clk_enable(struct clk *clk)
 265{
 266        int ret = 0;
 267
 268        if (clk->usecount++ == 0) {
 269                if (clk->parent) {
 270                        ret = __clk_enable(clk->parent);
 271                        if (unlikely(ret))
 272                                goto err;
 273                }
 274
 275                if (clk->ops && clk->ops->enable) {
 276                        ret = clk->ops->enable(clk);
 277                        if (ret) {
 278                                if (clk->parent)
 279                                        __clk_disable(clk->parent);
 280                                goto err;
 281                        }
 282                }
 283        }
 284
 285        return ret;
 286err:
 287        clk->usecount--;
 288        return ret;
 289}
 290
 291int clk_enable(struct clk *clk)
 292{
 293        unsigned long flags;
 294        int ret;
 295
 296        if (!clk)
 297                return -EINVAL;
 298
 299        spin_lock_irqsave(&clock_lock, flags);
 300        ret = __clk_enable(clk);
 301        spin_unlock_irqrestore(&clock_lock, flags);
 302
 303        return ret;
 304}
 305EXPORT_SYMBOL_GPL(clk_enable);
 306
 307static LIST_HEAD(root_clks);
 308
 309/**
 310 * recalculate_root_clocks - recalculate and propagate all root clocks
 311 *
 312 * Recalculates all root clocks (clocks with no parent), which if the
 313 * clock's .recalc is set correctly, should also propagate their rates.
 314 * Called at init.
 315 */
 316void recalculate_root_clocks(void)
 317{
 318        struct clk *clkp;
 319
 320        list_for_each_entry(clkp, &root_clks, sibling) {
 321                if (clkp->ops && clkp->ops->recalc)
 322                        clkp->rate = clkp->ops->recalc(clkp);
 323                propagate_rate(clkp);
 324        }
 325}
 326
 327static struct clk_mapping dummy_mapping;
 328
 329static struct clk *lookup_root_clock(struct clk *clk)
 330{
 331        while (clk->parent)
 332                clk = clk->parent;
 333
 334        return clk;
 335}
 336
 337static int clk_establish_mapping(struct clk *clk)
 338{
 339        struct clk_mapping *mapping = clk->mapping;
 340
 341        /*
 342         * Propagate mappings.
 343         */
 344        if (!mapping) {
 345                struct clk *clkp;
 346
 347                /*
 348                 * dummy mapping for root clocks with no specified ranges
 349                 */
 350                if (!clk->parent) {
 351                        clk->mapping = &dummy_mapping;
 352                        goto out;
 353                }
 354
 355                /*
 356                 * If we're on a child clock and it provides no mapping of its
 357                 * own, inherit the mapping from its root clock.
 358                 */
 359                clkp = lookup_root_clock(clk);
 360                mapping = clkp->mapping;
 361                BUG_ON(!mapping);
 362        }
 363
 364        /*
 365         * Establish initial mapping.
 366         */
 367        if (!mapping->base && mapping->phys) {
 368                kref_init(&mapping->ref);
 369
 370                mapping->base = ioremap_nocache(mapping->phys, mapping->len);
 371                if (unlikely(!mapping->base))
 372                        return -ENXIO;
 373        } else if (mapping->base) {
 374                /*
 375                 * Bump the refcount for an existing mapping
 376                 */
 377                kref_get(&mapping->ref);
 378        }
 379
 380        clk->mapping = mapping;
 381out:
 382        clk->mapped_reg = clk->mapping->base;
 383        clk->mapped_reg += (phys_addr_t)clk->enable_reg - clk->mapping->phys;
 384        return 0;
 385}
 386
 387static void clk_destroy_mapping(struct kref *kref)
 388{
 389        struct clk_mapping *mapping;
 390
 391        mapping = container_of(kref, struct clk_mapping, ref);
 392
 393        iounmap(mapping->base);
 394}
 395
 396static void clk_teardown_mapping(struct clk *clk)
 397{
 398        struct clk_mapping *mapping = clk->mapping;
 399
 400        /* Nothing to do */
 401        if (mapping == &dummy_mapping)
 402                goto out;
 403
 404        kref_put(&mapping->ref, clk_destroy_mapping);
 405        clk->mapping = NULL;
 406out:
 407        clk->mapped_reg = NULL;
 408}
 409
 410int clk_register(struct clk *clk)
 411{
 412        int ret;
 413
 414        if (IS_ERR_OR_NULL(clk))
 415                return -EINVAL;
 416
 417        /*
 418         * trap out already registered clocks
 419         */
 420        if (clk->node.next || clk->node.prev)
 421                return 0;
 422
 423        mutex_lock(&clock_list_sem);
 424
 425        INIT_LIST_HEAD(&clk->children);
 426        clk->usecount = 0;
 427
 428        ret = clk_establish_mapping(clk);
 429        if (unlikely(ret))
 430                goto out_unlock;
 431
 432        if (clk->parent)
 433                list_add(&clk->sibling, &clk->parent->children);
 434        else
 435                list_add(&clk->sibling, &root_clks);
 436
 437        list_add(&clk->node, &clock_list);
 438
 439#ifdef CONFIG_SH_CLK_CPG_LEGACY
 440        if (clk->ops && clk->ops->init)
 441                clk->ops->init(clk);
 442#endif
 443
 444out_unlock:
 445        mutex_unlock(&clock_list_sem);
 446
 447        return ret;
 448}
 449EXPORT_SYMBOL_GPL(clk_register);
 450
 451void clk_unregister(struct clk *clk)
 452{
 453        mutex_lock(&clock_list_sem);
 454        list_del(&clk->sibling);
 455        list_del(&clk->node);
 456        clk_teardown_mapping(clk);
 457        mutex_unlock(&clock_list_sem);
 458}
 459EXPORT_SYMBOL_GPL(clk_unregister);
 460
 461void clk_enable_init_clocks(void)
 462{
 463        struct clk *clkp;
 464
 465        list_for_each_entry(clkp, &clock_list, node)
 466                if (clkp->flags & CLK_ENABLE_ON_INIT)
 467                        clk_enable(clkp);
 468}
 469
 470unsigned long clk_get_rate(struct clk *clk)
 471{
 472        return clk->rate;
 473}
 474EXPORT_SYMBOL_GPL(clk_get_rate);
 475
 476int clk_set_rate(struct clk *clk, unsigned long rate)
 477{
 478        int ret = -EOPNOTSUPP;
 479        unsigned long flags;
 480
 481        spin_lock_irqsave(&clock_lock, flags);
 482
 483        if (likely(clk->ops && clk->ops->set_rate)) {
 484                ret = clk->ops->set_rate(clk, rate);
 485                if (ret != 0)
 486                        goto out_unlock;
 487        } else {
 488                clk->rate = rate;
 489                ret = 0;
 490        }
 491
 492        if (clk->ops && clk->ops->recalc)
 493                clk->rate = clk->ops->recalc(clk);
 494
 495        propagate_rate(clk);
 496
 497out_unlock:
 498        spin_unlock_irqrestore(&clock_lock, flags);
 499
 500        return ret;
 501}
 502EXPORT_SYMBOL_GPL(clk_set_rate);
 503
 504int clk_set_parent(struct clk *clk, struct clk *parent)
 505{
 506        unsigned long flags;
 507        int ret = -EINVAL;
 508
 509        if (!parent || !clk)
 510                return ret;
 511        if (clk->parent == parent)
 512                return 0;
 513
 514        spin_lock_irqsave(&clock_lock, flags);
 515        if (clk->usecount == 0) {
 516                if (clk->ops->set_parent)
 517                        ret = clk->ops->set_parent(clk, parent);
 518                else
 519                        ret = clk_reparent(clk, parent);
 520
 521                if (ret == 0) {
 522                        if (clk->ops->recalc)
 523                                clk->rate = clk->ops->recalc(clk);
 524                        pr_debug("set parent of %p to %p (new rate %ld)\n",
 525                                 clk, clk->parent, clk->rate);
 526                        propagate_rate(clk);
 527                }
 528        } else
 529                ret = -EBUSY;
 530        spin_unlock_irqrestore(&clock_lock, flags);
 531
 532        return ret;
 533}
 534EXPORT_SYMBOL_GPL(clk_set_parent);
 535
 536struct clk *clk_get_parent(struct clk *clk)
 537{
 538        return clk->parent;
 539}
 540EXPORT_SYMBOL_GPL(clk_get_parent);
 541
 542long clk_round_rate(struct clk *clk, unsigned long rate)
 543{
 544        if (likely(clk->ops && clk->ops->round_rate)) {
 545                unsigned long flags, rounded;
 546
 547                spin_lock_irqsave(&clock_lock, flags);
 548                rounded = clk->ops->round_rate(clk, rate);
 549                spin_unlock_irqrestore(&clock_lock, flags);
 550
 551                return rounded;
 552        }
 553
 554        return clk_get_rate(clk);
 555}
 556EXPORT_SYMBOL_GPL(clk_round_rate);
 557
 558long clk_round_parent(struct clk *clk, unsigned long target,
 559                      unsigned long *best_freq, unsigned long *parent_freq,
 560                      unsigned int div_min, unsigned int div_max)
 561{
 562        struct cpufreq_frequency_table *freq, *best = NULL;
 563        unsigned long error = ULONG_MAX, freq_high, freq_low, div;
 564        struct clk *parent = clk_get_parent(clk);
 565
 566        if (!parent) {
 567                *parent_freq = 0;
 568                *best_freq = clk_round_rate(clk, target);
 569                return abs(target - *best_freq);
 570        }
 571
 572        cpufreq_for_each_valid_entry(freq, parent->freq_table) {
 573                if (unlikely(freq->frequency / target <= div_min - 1)) {
 574                        unsigned long freq_max;
 575
 576                        freq_max = (freq->frequency + div_min / 2) / div_min;
 577                        if (error > target - freq_max) {
 578                                error = target - freq_max;
 579                                best = freq;
 580                                if (best_freq)
 581                                        *best_freq = freq_max;
 582                        }
 583
 584                        pr_debug("too low freq %u, error %lu\n", freq->frequency,
 585                                 target - freq_max);
 586
 587                        if (!error)
 588                                break;
 589
 590                        continue;
 591                }
 592
 593                if (unlikely(freq->frequency / target >= div_max)) {
 594                        unsigned long freq_min;
 595
 596                        freq_min = (freq->frequency + div_max / 2) / div_max;
 597                        if (error > freq_min - target) {
 598                                error = freq_min - target;
 599                                best = freq;
 600                                if (best_freq)
 601                                        *best_freq = freq_min;
 602                        }
 603
 604                        pr_debug("too high freq %u, error %lu\n", freq->frequency,
 605                                 freq_min - target);
 606
 607                        if (!error)
 608                                break;
 609
 610                        continue;
 611                }
 612
 613                div = freq->frequency / target;
 614                freq_high = freq->frequency / div;
 615                freq_low = freq->frequency / (div + 1);
 616
 617                if (freq_high - target < error) {
 618                        error = freq_high - target;
 619                        best = freq;
 620                        if (best_freq)
 621                                *best_freq = freq_high;
 622                }
 623
 624                if (target - freq_low < error) {
 625                        error = target - freq_low;
 626                        best = freq;
 627                        if (best_freq)
 628                                *best_freq = freq_low;
 629                }
 630
 631                pr_debug("%u / %lu = %lu, / %lu = %lu, best %lu, parent %u\n",
 632                         freq->frequency, div, freq_high, div + 1, freq_low,
 633                         *best_freq, best->frequency);
 634
 635                if (!error)
 636                        break;
 637        }
 638
 639        if (parent_freq)
 640                *parent_freq = best->frequency;
 641
 642        return error;
 643}
 644EXPORT_SYMBOL_GPL(clk_round_parent);
 645
 646#ifdef CONFIG_PM
 647static void clks_core_resume(void)
 648{
 649        struct clk *clkp;
 650
 651        list_for_each_entry(clkp, &clock_list, node) {
 652                if (likely(clkp->usecount && clkp->ops)) {
 653                        unsigned long rate = clkp->rate;
 654
 655                        if (likely(clkp->ops->set_parent))
 656                                clkp->ops->set_parent(clkp,
 657                                        clkp->parent);
 658                        if (likely(clkp->ops->set_rate))
 659                                clkp->ops->set_rate(clkp, rate);
 660                        else if (likely(clkp->ops->recalc))
 661                                clkp->rate = clkp->ops->recalc(clkp);
 662                }
 663        }
 664}
 665
 666static struct syscore_ops clks_syscore_ops = {
 667        .resume = clks_core_resume,
 668};
 669
 670static int __init clk_syscore_init(void)
 671{
 672        register_syscore_ops(&clks_syscore_ops);
 673
 674        return 0;
 675}
 676subsys_initcall(clk_syscore_init);
 677#endif
 678
 679static int __init clk_late_init(void)
 680{
 681        unsigned long flags;
 682        struct clk *clk;
 683
 684        /* disable all clocks with zero use count */
 685        mutex_lock(&clock_list_sem);
 686        spin_lock_irqsave(&clock_lock, flags);
 687
 688        list_for_each_entry(clk, &clock_list, node)
 689                if (!clk->usecount && clk->ops && clk->ops->disable)
 690                        clk->ops->disable(clk);
 691
 692        /* from now on allow clock disable operations */
 693        allow_disable = 1;
 694
 695        spin_unlock_irqrestore(&clock_lock, flags);
 696        mutex_unlock(&clock_list_sem);
 697        return 0;
 698}
 699late_initcall(clk_late_init);
 700