linux/drivers/sh/clk/core.c
<<
>>
Prefs
   1/*
   2 * SuperH clock framework
   3 *
   4 *  Copyright (C) 2005 - 2010  Paul Mundt
   5 *
   6 * This clock framework is derived from the OMAP version by:
   7 *
   8 *      Copyright (C) 2004 - 2008 Nokia Corporation
   9 *      Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
  10 *
  11 *  Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
  12 *
  13 * This file is subject to the terms and conditions of the GNU General Public
  14 * License.  See the file "COPYING" in the main directory of this archive
  15 * for more details.
  16 */
  17#define pr_fmt(fmt) "clock: " fmt
  18
  19#include <linux/kernel.h>
  20#include <linux/init.h>
  21#include <linux/module.h>
  22#include <linux/mutex.h>
  23#include <linux/list.h>
  24#include <linux/syscore_ops.h>
  25#include <linux/seq_file.h>
  26#include <linux/err.h>
  27#include <linux/io.h>
  28#include <linux/debugfs.h>
  29#include <linux/cpufreq.h>
  30#include <linux/clk.h>
  31#include <linux/sh_clk.h>
  32
  33static LIST_HEAD(clock_list);
  34static DEFINE_SPINLOCK(clock_lock);
  35static DEFINE_MUTEX(clock_list_sem);
  36
  37/* clock disable operations are not passed on to hardware during boot */
  38static int allow_disable;
  39
  40void clk_rate_table_build(struct clk *clk,
  41                          struct cpufreq_frequency_table *freq_table,
  42                          int nr_freqs,
  43                          struct clk_div_mult_table *src_table,
  44                          unsigned long *bitmap)
  45{
  46        unsigned long mult, div;
  47        unsigned long freq;
  48        int i;
  49
  50        clk->nr_freqs = nr_freqs;
  51
  52        for (i = 0; i < nr_freqs; i++) {
  53                div = 1;
  54                mult = 1;
  55
  56                if (src_table->divisors && i < src_table->nr_divisors)
  57                        div = src_table->divisors[i];
  58
  59                if (src_table->multipliers && i < src_table->nr_multipliers)
  60                        mult = src_table->multipliers[i];
  61
  62                if (!div || !mult || (bitmap && !test_bit(i, bitmap)))
  63                        freq = CPUFREQ_ENTRY_INVALID;
  64                else
  65                        freq = clk->parent->rate * mult / div;
  66
  67                freq_table[i].index = i;
  68                freq_table[i].frequency = freq;
  69        }
  70
  71        /* Termination entry */
  72        freq_table[i].index = i;
  73        freq_table[i].frequency = CPUFREQ_TABLE_END;
  74}
  75
  76struct clk_rate_round_data;
  77
  78struct clk_rate_round_data {
  79        unsigned long rate;
  80        unsigned int min, max;
  81        long (*func)(unsigned int, struct clk_rate_round_data *);
  82        void *arg;
  83};
  84
  85#define for_each_frequency(pos, r, freq)                        \
  86        for (pos = r->min, freq = r->func(pos, r);              \
  87             pos <= r->max; pos++, freq = r->func(pos, r))      \
  88                if (unlikely(freq == 0))                        \
  89                        ;                                       \
  90                else
  91
  92static long clk_rate_round_helper(struct clk_rate_round_data *rounder)
  93{
  94        unsigned long rate_error, rate_error_prev = ~0UL;
  95        unsigned long highest, lowest, freq;
  96        long rate_best_fit = -ENOENT;
  97        int i;
  98
  99        highest = 0;
 100        lowest = ~0UL;
 101
 102        for_each_frequency(i, rounder, freq) {
 103                if (freq > highest)
 104                        highest = freq;
 105                if (freq < lowest)
 106                        lowest = freq;
 107
 108                rate_error = abs(freq - rounder->rate);
 109                if (rate_error < rate_error_prev) {
 110                        rate_best_fit = freq;
 111                        rate_error_prev = rate_error;
 112                }
 113
 114                if (rate_error == 0)
 115                        break;
 116        }
 117
 118        if (rounder->rate >= highest)
 119                rate_best_fit = highest;
 120        if (rounder->rate <= lowest)
 121                rate_best_fit = lowest;
 122
 123        return rate_best_fit;
 124}
 125
 126static long clk_rate_table_iter(unsigned int pos,
 127                                struct clk_rate_round_data *rounder)
 128{
 129        struct cpufreq_frequency_table *freq_table = rounder->arg;
 130        unsigned long freq = freq_table[pos].frequency;
 131
 132        if (freq == CPUFREQ_ENTRY_INVALID)
 133                freq = 0;
 134
 135        return freq;
 136}
 137
 138long clk_rate_table_round(struct clk *clk,
 139                          struct cpufreq_frequency_table *freq_table,
 140                          unsigned long rate)
 141{
 142        struct clk_rate_round_data table_round = {
 143                .min    = 0,
 144                .max    = clk->nr_freqs - 1,
 145                .func   = clk_rate_table_iter,
 146                .arg    = freq_table,
 147                .rate   = rate,
 148        };
 149
 150        if (clk->nr_freqs < 1)
 151                return -ENOSYS;
 152
 153        return clk_rate_round_helper(&table_round);
 154}
 155
 156static long clk_rate_div_range_iter(unsigned int pos,
 157                                    struct clk_rate_round_data *rounder)
 158{
 159        return clk_get_rate(rounder->arg) / pos;
 160}
 161
 162long clk_rate_div_range_round(struct clk *clk, unsigned int div_min,
 163                              unsigned int div_max, unsigned long rate)
 164{
 165        struct clk_rate_round_data div_range_round = {
 166                .min    = div_min,
 167                .max    = div_max,
 168                .func   = clk_rate_div_range_iter,
 169                .arg    = clk_get_parent(clk),
 170                .rate   = rate,
 171        };
 172
 173        return clk_rate_round_helper(&div_range_round);
 174}
 175
 176int clk_rate_table_find(struct clk *clk,
 177                        struct cpufreq_frequency_table *freq_table,
 178                        unsigned long rate)
 179{
 180        int i;
 181
 182        for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
 183                unsigned long freq = freq_table[i].frequency;
 184
 185                if (freq == CPUFREQ_ENTRY_INVALID)
 186                        continue;
 187
 188                if (freq == rate)
 189                        return i;
 190        }
 191
 192        return -ENOENT;
 193}
 194
 195/* Used for clocks that always have same value as the parent clock */
 196unsigned long followparent_recalc(struct clk *clk)
 197{
 198        return clk->parent ? clk->parent->rate : 0;
 199}
 200
 201int clk_reparent(struct clk *child, struct clk *parent)
 202{
 203        list_del_init(&child->sibling);
 204        if (parent)
 205                list_add(&child->sibling, &parent->children);
 206        child->parent = parent;
 207
 208        /* now do the debugfs renaming to reattach the child
 209           to the proper parent */
 210
 211        return 0;
 212}
 213
 214/* Propagate rate to children */
 215void propagate_rate(struct clk *tclk)
 216{
 217        struct clk *clkp;
 218
 219        list_for_each_entry(clkp, &tclk->children, sibling) {
 220                if (clkp->ops && clkp->ops->recalc)
 221                        clkp->rate = clkp->ops->recalc(clkp);
 222
 223                propagate_rate(clkp);
 224        }
 225}
 226
 227static void __clk_disable(struct clk *clk)
 228{
 229        if (WARN(!clk->usecount, "Trying to disable clock %p with 0 usecount\n",
 230                 clk))
 231                return;
 232
 233        if (!(--clk->usecount)) {
 234                if (likely(allow_disable && clk->ops && clk->ops->disable))
 235                        clk->ops->disable(clk);
 236                if (likely(clk->parent))
 237                        __clk_disable(clk->parent);
 238        }
 239}
 240
 241void clk_disable(struct clk *clk)
 242{
 243        unsigned long flags;
 244
 245        if (!clk)
 246                return;
 247
 248        spin_lock_irqsave(&clock_lock, flags);
 249        __clk_disable(clk);
 250        spin_unlock_irqrestore(&clock_lock, flags);
 251}
 252EXPORT_SYMBOL_GPL(clk_disable);
 253
 254static int __clk_enable(struct clk *clk)
 255{
 256        int ret = 0;
 257
 258        if (clk->usecount++ == 0) {
 259                if (clk->parent) {
 260                        ret = __clk_enable(clk->parent);
 261                        if (unlikely(ret))
 262                                goto err;
 263                }
 264
 265                if (clk->ops && clk->ops->enable) {
 266                        ret = clk->ops->enable(clk);
 267                        if (ret) {
 268                                if (clk->parent)
 269                                        __clk_disable(clk->parent);
 270                                goto err;
 271                        }
 272                }
 273        }
 274
 275        return ret;
 276err:
 277        clk->usecount--;
 278        return ret;
 279}
 280
 281int clk_enable(struct clk *clk)
 282{
 283        unsigned long flags;
 284        int ret;
 285
 286        if (!clk)
 287                return -EINVAL;
 288
 289        spin_lock_irqsave(&clock_lock, flags);
 290        ret = __clk_enable(clk);
 291        spin_unlock_irqrestore(&clock_lock, flags);
 292
 293        return ret;
 294}
 295EXPORT_SYMBOL_GPL(clk_enable);
 296
 297static LIST_HEAD(root_clks);
 298
 299/**
 300 * recalculate_root_clocks - recalculate and propagate all root clocks
 301 *
 302 * Recalculates all root clocks (clocks with no parent), which if the
 303 * clock's .recalc is set correctly, should also propagate their rates.
 304 * Called at init.
 305 */
 306void recalculate_root_clocks(void)
 307{
 308        struct clk *clkp;
 309
 310        list_for_each_entry(clkp, &root_clks, sibling) {
 311                if (clkp->ops && clkp->ops->recalc)
 312                        clkp->rate = clkp->ops->recalc(clkp);
 313                propagate_rate(clkp);
 314        }
 315}
 316
 317static struct clk_mapping dummy_mapping;
 318
 319static struct clk *lookup_root_clock(struct clk *clk)
 320{
 321        while (clk->parent)
 322                clk = clk->parent;
 323
 324        return clk;
 325}
 326
 327static int clk_establish_mapping(struct clk *clk)
 328{
 329        struct clk_mapping *mapping = clk->mapping;
 330
 331        /*
 332         * Propagate mappings.
 333         */
 334        if (!mapping) {
 335                struct clk *clkp;
 336
 337                /*
 338                 * dummy mapping for root clocks with no specified ranges
 339                 */
 340                if (!clk->parent) {
 341                        clk->mapping = &dummy_mapping;
 342                        return 0;
 343                }
 344
 345                /*
 346                 * If we're on a child clock and it provides no mapping of its
 347                 * own, inherit the mapping from its root clock.
 348                 */
 349                clkp = lookup_root_clock(clk);
 350                mapping = clkp->mapping;
 351                BUG_ON(!mapping);
 352        }
 353
 354        /*
 355         * Establish initial mapping.
 356         */
 357        if (!mapping->base && mapping->phys) {
 358                kref_init(&mapping->ref);
 359
 360                mapping->base = ioremap_nocache(mapping->phys, mapping->len);
 361                if (unlikely(!mapping->base))
 362                        return -ENXIO;
 363        } else if (mapping->base) {
 364                /*
 365                 * Bump the refcount for an existing mapping
 366                 */
 367                kref_get(&mapping->ref);
 368        }
 369
 370        clk->mapping = mapping;
 371        return 0;
 372}
 373
 374static void clk_destroy_mapping(struct kref *kref)
 375{
 376        struct clk_mapping *mapping;
 377
 378        mapping = container_of(kref, struct clk_mapping, ref);
 379
 380        iounmap(mapping->base);
 381}
 382
 383static void clk_teardown_mapping(struct clk *clk)
 384{
 385        struct clk_mapping *mapping = clk->mapping;
 386
 387        /* Nothing to do */
 388        if (mapping == &dummy_mapping)
 389                return;
 390
 391        kref_put(&mapping->ref, clk_destroy_mapping);
 392        clk->mapping = NULL;
 393}
 394
 395int clk_register(struct clk *clk)
 396{
 397        int ret;
 398
 399        if (IS_ERR_OR_NULL(clk))
 400                return -EINVAL;
 401
 402        /*
 403         * trap out already registered clocks
 404         */
 405        if (clk->node.next || clk->node.prev)
 406                return 0;
 407
 408        mutex_lock(&clock_list_sem);
 409
 410        INIT_LIST_HEAD(&clk->children);
 411        clk->usecount = 0;
 412
 413        ret = clk_establish_mapping(clk);
 414        if (unlikely(ret))
 415                goto out_unlock;
 416
 417        if (clk->parent)
 418                list_add(&clk->sibling, &clk->parent->children);
 419        else
 420                list_add(&clk->sibling, &root_clks);
 421
 422        list_add(&clk->node, &clock_list);
 423
 424#ifdef CONFIG_SH_CLK_CPG_LEGACY
 425        if (clk->ops && clk->ops->init)
 426                clk->ops->init(clk);
 427#endif
 428
 429out_unlock:
 430        mutex_unlock(&clock_list_sem);
 431
 432        return ret;
 433}
 434EXPORT_SYMBOL_GPL(clk_register);
 435
 436void clk_unregister(struct clk *clk)
 437{
 438        mutex_lock(&clock_list_sem);
 439        list_del(&clk->sibling);
 440        list_del(&clk->node);
 441        clk_teardown_mapping(clk);
 442        mutex_unlock(&clock_list_sem);
 443}
 444EXPORT_SYMBOL_GPL(clk_unregister);
 445
 446void clk_enable_init_clocks(void)
 447{
 448        struct clk *clkp;
 449
 450        list_for_each_entry(clkp, &clock_list, node)
 451                if (clkp->flags & CLK_ENABLE_ON_INIT)
 452                        clk_enable(clkp);
 453}
 454
 455unsigned long clk_get_rate(struct clk *clk)
 456{
 457        return clk->rate;
 458}
 459EXPORT_SYMBOL_GPL(clk_get_rate);
 460
 461int clk_set_rate(struct clk *clk, unsigned long rate)
 462{
 463        int ret = -EOPNOTSUPP;
 464        unsigned long flags;
 465
 466        spin_lock_irqsave(&clock_lock, flags);
 467
 468        if (likely(clk->ops && clk->ops->set_rate)) {
 469                ret = clk->ops->set_rate(clk, rate);
 470                if (ret != 0)
 471                        goto out_unlock;
 472        } else {
 473                clk->rate = rate;
 474                ret = 0;
 475        }
 476
 477        if (clk->ops && clk->ops->recalc)
 478                clk->rate = clk->ops->recalc(clk);
 479
 480        propagate_rate(clk);
 481
 482out_unlock:
 483        spin_unlock_irqrestore(&clock_lock, flags);
 484
 485        return ret;
 486}
 487EXPORT_SYMBOL_GPL(clk_set_rate);
 488
 489int clk_set_parent(struct clk *clk, struct clk *parent)
 490{
 491        unsigned long flags;
 492        int ret = -EINVAL;
 493
 494        if (!parent || !clk)
 495                return ret;
 496        if (clk->parent == parent)
 497                return 0;
 498
 499        spin_lock_irqsave(&clock_lock, flags);
 500        if (clk->usecount == 0) {
 501                if (clk->ops->set_parent)
 502                        ret = clk->ops->set_parent(clk, parent);
 503                else
 504                        ret = clk_reparent(clk, parent);
 505
 506                if (ret == 0) {
 507                        if (clk->ops->recalc)
 508                                clk->rate = clk->ops->recalc(clk);
 509                        pr_debug("set parent of %p to %p (new rate %ld)\n",
 510                                 clk, clk->parent, clk->rate);
 511                        propagate_rate(clk);
 512                }
 513        } else
 514                ret = -EBUSY;
 515        spin_unlock_irqrestore(&clock_lock, flags);
 516
 517        return ret;
 518}
 519EXPORT_SYMBOL_GPL(clk_set_parent);
 520
 521struct clk *clk_get_parent(struct clk *clk)
 522{
 523        return clk->parent;
 524}
 525EXPORT_SYMBOL_GPL(clk_get_parent);
 526
 527long clk_round_rate(struct clk *clk, unsigned long rate)
 528{
 529        if (likely(clk->ops && clk->ops->round_rate)) {
 530                unsigned long flags, rounded;
 531
 532                spin_lock_irqsave(&clock_lock, flags);
 533                rounded = clk->ops->round_rate(clk, rate);
 534                spin_unlock_irqrestore(&clock_lock, flags);
 535
 536                return rounded;
 537        }
 538
 539        return clk_get_rate(clk);
 540}
 541EXPORT_SYMBOL_GPL(clk_round_rate);
 542
 543long clk_round_parent(struct clk *clk, unsigned long target,
 544                      unsigned long *best_freq, unsigned long *parent_freq,
 545                      unsigned int div_min, unsigned int div_max)
 546{
 547        struct cpufreq_frequency_table *freq, *best = NULL;
 548        unsigned long error = ULONG_MAX, freq_high, freq_low, div;
 549        struct clk *parent = clk_get_parent(clk);
 550
 551        if (!parent) {
 552                *parent_freq = 0;
 553                *best_freq = clk_round_rate(clk, target);
 554                return abs(target - *best_freq);
 555        }
 556
 557        for (freq = parent->freq_table; freq->frequency != CPUFREQ_TABLE_END;
 558             freq++) {
 559                if (freq->frequency == CPUFREQ_ENTRY_INVALID)
 560                        continue;
 561
 562                if (unlikely(freq->frequency / target <= div_min - 1)) {
 563                        unsigned long freq_max;
 564
 565                        freq_max = (freq->frequency + div_min / 2) / div_min;
 566                        if (error > target - freq_max) {
 567                                error = target - freq_max;
 568                                best = freq;
 569                                if (best_freq)
 570                                        *best_freq = freq_max;
 571                        }
 572
 573                        pr_debug("too low freq %u, error %lu\n", freq->frequency,
 574                                 target - freq_max);
 575
 576                        if (!error)
 577                                break;
 578
 579                        continue;
 580                }
 581
 582                if (unlikely(freq->frequency / target >= div_max)) {
 583                        unsigned long freq_min;
 584
 585                        freq_min = (freq->frequency + div_max / 2) / div_max;
 586                        if (error > freq_min - target) {
 587                                error = freq_min - target;
 588                                best = freq;
 589                                if (best_freq)
 590                                        *best_freq = freq_min;
 591                        }
 592
 593                        pr_debug("too high freq %u, error %lu\n", freq->frequency,
 594                                 freq_min - target);
 595
 596                        if (!error)
 597                                break;
 598
 599                        continue;
 600                }
 601
 602                div = freq->frequency / target;
 603                freq_high = freq->frequency / div;
 604                freq_low = freq->frequency / (div + 1);
 605
 606                if (freq_high - target < error) {
 607                        error = freq_high - target;
 608                        best = freq;
 609                        if (best_freq)
 610                                *best_freq = freq_high;
 611                }
 612
 613                if (target - freq_low < error) {
 614                        error = target - freq_low;
 615                        best = freq;
 616                        if (best_freq)
 617                                *best_freq = freq_low;
 618                }
 619
 620                pr_debug("%u / %lu = %lu, / %lu = %lu, best %lu, parent %u\n",
 621                         freq->frequency, div, freq_high, div + 1, freq_low,
 622                         *best_freq, best->frequency);
 623
 624                if (!error)
 625                        break;
 626        }
 627
 628        if (parent_freq)
 629                *parent_freq = best->frequency;
 630
 631        return error;
 632}
 633EXPORT_SYMBOL_GPL(clk_round_parent);
 634
 635#ifdef CONFIG_PM
 636static void clks_core_resume(void)
 637{
 638        struct clk *clkp;
 639
 640        list_for_each_entry(clkp, &clock_list, node) {
 641                if (likely(clkp->usecount && clkp->ops)) {
 642                        unsigned long rate = clkp->rate;
 643
 644                        if (likely(clkp->ops->set_parent))
 645                                clkp->ops->set_parent(clkp,
 646                                        clkp->parent);
 647                        if (likely(clkp->ops->set_rate))
 648                                clkp->ops->set_rate(clkp, rate);
 649                        else if (likely(clkp->ops->recalc))
 650                                clkp->rate = clkp->ops->recalc(clkp);
 651                }
 652        }
 653}
 654
 655static struct syscore_ops clks_syscore_ops = {
 656        .resume = clks_core_resume,
 657};
 658
 659static int __init clk_syscore_init(void)
 660{
 661        register_syscore_ops(&clks_syscore_ops);
 662
 663        return 0;
 664}
 665subsys_initcall(clk_syscore_init);
 666#endif
 667
 668/*
 669 *      debugfs support to trace clock tree hierarchy and attributes
 670 */
 671static struct dentry *clk_debugfs_root;
 672
 673static int clk_debugfs_register_one(struct clk *c)
 674{
 675        int err;
 676        struct dentry *d;
 677        struct clk *pa = c->parent;
 678        char s[255];
 679        char *p = s;
 680
 681        p += sprintf(p, "%p", c);
 682        d = debugfs_create_dir(s, pa ? pa->dentry : clk_debugfs_root);
 683        if (!d)
 684                return -ENOMEM;
 685        c->dentry = d;
 686
 687        d = debugfs_create_u8("usecount", S_IRUGO, c->dentry, (u8 *)&c->usecount);
 688        if (!d) {
 689                err = -ENOMEM;
 690                goto err_out;
 691        }
 692        d = debugfs_create_u32("rate", S_IRUGO, c->dentry, (u32 *)&c->rate);
 693        if (!d) {
 694                err = -ENOMEM;
 695                goto err_out;
 696        }
 697        d = debugfs_create_x32("flags", S_IRUGO, c->dentry, (u32 *)&c->flags);
 698        if (!d) {
 699                err = -ENOMEM;
 700                goto err_out;
 701        }
 702        return 0;
 703
 704err_out:
 705        debugfs_remove_recursive(c->dentry);
 706        return err;
 707}
 708
 709static int clk_debugfs_register(struct clk *c)
 710{
 711        int err;
 712        struct clk *pa = c->parent;
 713
 714        if (pa && !pa->dentry) {
 715                err = clk_debugfs_register(pa);
 716                if (err)
 717                        return err;
 718        }
 719
 720        if (!c->dentry) {
 721                err = clk_debugfs_register_one(c);
 722                if (err)
 723                        return err;
 724        }
 725        return 0;
 726}
 727
 728static int __init clk_debugfs_init(void)
 729{
 730        struct clk *c;
 731        struct dentry *d;
 732        int err;
 733
 734        d = debugfs_create_dir("clock", NULL);
 735        if (!d)
 736                return -ENOMEM;
 737        clk_debugfs_root = d;
 738
 739        list_for_each_entry(c, &clock_list, node) {
 740                err = clk_debugfs_register(c);
 741                if (err)
 742                        goto err_out;
 743        }
 744        return 0;
 745err_out:
 746        debugfs_remove_recursive(clk_debugfs_root);
 747        return err;
 748}
 749late_initcall(clk_debugfs_init);
 750
 751static int __init clk_late_init(void)
 752{
 753        unsigned long flags;
 754        struct clk *clk;
 755
 756        /* disable all clocks with zero use count */
 757        mutex_lock(&clock_list_sem);
 758        spin_lock_irqsave(&clock_lock, flags);
 759
 760        list_for_each_entry(clk, &clock_list, node)
 761                if (!clk->usecount && clk->ops && clk->ops->disable)
 762                        clk->ops->disable(clk);
 763
 764        /* from now on allow clock disable operations */
 765        allow_disable = 1;
 766
 767        spin_unlock_irqrestore(&clock_lock, flags);
 768        mutex_unlock(&clock_list_sem);
 769        return 0;
 770}
 771late_initcall(clk_late_init);
 772