linux/arch/arm/mach-davinci/clock.c
<<
>>
Prefs
   1/*
   2 * Clock and PLL control for DaVinci devices
   3 *
   4 * Copyright (C) 2006-2007 Texas Instruments.
   5 * Copyright (C) 2008-2009 Deep Root Systems, LLC
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License as published by
   9 * the Free Software Foundation; either version 2 of the License, or
  10 * (at your option) any later version.
  11 */
  12
  13#include <linux/module.h>
  14#include <linux/kernel.h>
  15#include <linux/init.h>
  16#include <linux/errno.h>
  17#include <linux/clk.h>
  18#include <linux/err.h>
  19#include <linux/mutex.h>
  20#include <linux/io.h>
  21#include <linux/delay.h>
  22
  23#include <mach/hardware.h>
  24
  25#include <mach/clock.h>
  26#include "psc.h"
  27#include <mach/cputype.h>
  28#include "clock.h"
  29
  30static LIST_HEAD(clocks);
  31static DEFINE_MUTEX(clocks_mutex);
  32static DEFINE_SPINLOCK(clockfw_lock);
  33
  34static void __clk_enable(struct clk *clk)
  35{
  36        if (clk->parent)
  37                __clk_enable(clk->parent);
  38        if (clk->usecount++ == 0) {
  39                if (clk->flags & CLK_PSC)
  40                        davinci_psc_config(clk->domain, clk->gpsc, clk->lpsc,
  41                                           true, clk->flags);
  42                else if (clk->clk_enable)
  43                        clk->clk_enable(clk);
  44        }
  45}
  46
  47static void __clk_disable(struct clk *clk)
  48{
  49        if (WARN_ON(clk->usecount == 0))
  50                return;
  51        if (--clk->usecount == 0) {
  52                if (!(clk->flags & CLK_PLL) && (clk->flags & CLK_PSC))
  53                        davinci_psc_config(clk->domain, clk->gpsc, clk->lpsc,
  54                                           false, clk->flags);
  55                else if (clk->clk_disable)
  56                        clk->clk_disable(clk);
  57        }
  58        if (clk->parent)
  59                __clk_disable(clk->parent);
  60}
  61
  62int davinci_clk_reset(struct clk *clk, bool reset)
  63{
  64        unsigned long flags;
  65
  66        if (clk == NULL || IS_ERR(clk))
  67                return -EINVAL;
  68
  69        spin_lock_irqsave(&clockfw_lock, flags);
  70        if (clk->flags & CLK_PSC)
  71                davinci_psc_reset(clk->gpsc, clk->lpsc, reset);
  72        spin_unlock_irqrestore(&clockfw_lock, flags);
  73
  74        return 0;
  75}
  76EXPORT_SYMBOL(davinci_clk_reset);
  77
  78int davinci_clk_reset_assert(struct clk *clk)
  79{
  80        if (clk == NULL || IS_ERR(clk) || !clk->reset)
  81                return -EINVAL;
  82
  83        return clk->reset(clk, true);
  84}
  85EXPORT_SYMBOL(davinci_clk_reset_assert);
  86
  87int davinci_clk_reset_deassert(struct clk *clk)
  88{
  89        if (clk == NULL || IS_ERR(clk) || !clk->reset)
  90                return -EINVAL;
  91
  92        return clk->reset(clk, false);
  93}
  94EXPORT_SYMBOL(davinci_clk_reset_deassert);
  95
  96int clk_enable(struct clk *clk)
  97{
  98        unsigned long flags;
  99
 100        if (!clk)
 101                return 0;
 102        else if (IS_ERR(clk))
 103                return -EINVAL;
 104
 105        spin_lock_irqsave(&clockfw_lock, flags);
 106        __clk_enable(clk);
 107        spin_unlock_irqrestore(&clockfw_lock, flags);
 108
 109        return 0;
 110}
 111EXPORT_SYMBOL(clk_enable);
 112
 113void clk_disable(struct clk *clk)
 114{
 115        unsigned long flags;
 116
 117        if (clk == NULL || IS_ERR(clk))
 118                return;
 119
 120        spin_lock_irqsave(&clockfw_lock, flags);
 121        __clk_disable(clk);
 122        spin_unlock_irqrestore(&clockfw_lock, flags);
 123}
 124EXPORT_SYMBOL(clk_disable);
 125
 126unsigned long clk_get_rate(struct clk *clk)
 127{
 128        if (clk == NULL || IS_ERR(clk))
 129                return 0;
 130
 131        return clk->rate;
 132}
 133EXPORT_SYMBOL(clk_get_rate);
 134
 135long clk_round_rate(struct clk *clk, unsigned long rate)
 136{
 137        if (clk == NULL || IS_ERR(clk))
 138                return 0;
 139
 140        if (clk->round_rate)
 141                return clk->round_rate(clk, rate);
 142
 143        return clk->rate;
 144}
 145EXPORT_SYMBOL(clk_round_rate);
 146
 147/* Propagate rate to children */
 148static void propagate_rate(struct clk *root)
 149{
 150        struct clk *clk;
 151
 152        list_for_each_entry(clk, &root->children, childnode) {
 153                if (clk->recalc)
 154                        clk->rate = clk->recalc(clk);
 155                propagate_rate(clk);
 156        }
 157}
 158
 159int clk_set_rate(struct clk *clk, unsigned long rate)
 160{
 161        unsigned long flags;
 162        int ret = -EINVAL;
 163
 164        if (!clk)
 165                return 0;
 166        else if (IS_ERR(clk))
 167                return -EINVAL;
 168
 169        if (clk->set_rate)
 170                ret = clk->set_rate(clk, rate);
 171
 172        spin_lock_irqsave(&clockfw_lock, flags);
 173        if (ret == 0) {
 174                if (clk->recalc)
 175                        clk->rate = clk->recalc(clk);
 176                propagate_rate(clk);
 177        }
 178        spin_unlock_irqrestore(&clockfw_lock, flags);
 179
 180        return ret;
 181}
 182EXPORT_SYMBOL(clk_set_rate);
 183
 184int clk_set_parent(struct clk *clk, struct clk *parent)
 185{
 186        unsigned long flags;
 187
 188        if (!clk)
 189                return 0;
 190        else if (IS_ERR(clk))
 191                return -EINVAL;
 192
 193        /* Cannot change parent on enabled clock */
 194        if (WARN_ON(clk->usecount))
 195                return -EINVAL;
 196
 197        mutex_lock(&clocks_mutex);
 198        if (clk->set_parent) {
 199                int ret = clk->set_parent(clk, parent);
 200
 201                if (ret) {
 202                        mutex_unlock(&clocks_mutex);
 203                        return ret;
 204                }
 205        }
 206        clk->parent = parent;
 207        list_del_init(&clk->childnode);
 208        list_add(&clk->childnode, &clk->parent->children);
 209        mutex_unlock(&clocks_mutex);
 210
 211        spin_lock_irqsave(&clockfw_lock, flags);
 212        if (clk->recalc)
 213                clk->rate = clk->recalc(clk);
 214        propagate_rate(clk);
 215        spin_unlock_irqrestore(&clockfw_lock, flags);
 216
 217        return 0;
 218}
 219EXPORT_SYMBOL(clk_set_parent);
 220
 221int clk_register(struct clk *clk)
 222{
 223        if (clk == NULL || IS_ERR(clk))
 224                return -EINVAL;
 225
 226        if (WARN(clk->parent && !clk->parent->rate,
 227                        "CLK: %s parent %s has no rate!\n",
 228                        clk->name, clk->parent->name))
 229                return -EINVAL;
 230
 231        INIT_LIST_HEAD(&clk->children);
 232
 233        mutex_lock(&clocks_mutex);
 234        list_add_tail(&clk->node, &clocks);
 235        if (clk->parent) {
 236                if (clk->set_parent) {
 237                        int ret = clk->set_parent(clk, clk->parent);
 238
 239                        if (ret) {
 240                                mutex_unlock(&clocks_mutex);
 241                                return ret;
 242                        }
 243                }
 244                list_add_tail(&clk->childnode, &clk->parent->children);
 245        }
 246        mutex_unlock(&clocks_mutex);
 247
 248        /* If rate is already set, use it */
 249        if (clk->rate)
 250                return 0;
 251
 252        /* Else, see if there is a way to calculate it */
 253        if (clk->recalc)
 254                clk->rate = clk->recalc(clk);
 255
 256        /* Otherwise, default to parent rate */
 257        else if (clk->parent)
 258                clk->rate = clk->parent->rate;
 259
 260        return 0;
 261}
 262EXPORT_SYMBOL(clk_register);
 263
 264void clk_unregister(struct clk *clk)
 265{
 266        if (clk == NULL || IS_ERR(clk))
 267                return;
 268
 269        mutex_lock(&clocks_mutex);
 270        list_del(&clk->node);
 271        list_del(&clk->childnode);
 272        mutex_unlock(&clocks_mutex);
 273}
 274EXPORT_SYMBOL(clk_unregister);
 275
 276#ifdef CONFIG_DAVINCI_RESET_CLOCKS
 277/*
 278 * Disable any unused clocks left on by the bootloader
 279 */
 280int __init davinci_clk_disable_unused(void)
 281{
 282        struct clk *ck;
 283
 284        spin_lock_irq(&clockfw_lock);
 285        list_for_each_entry(ck, &clocks, node) {
 286                if (ck->usecount > 0)
 287                        continue;
 288                if (!(ck->flags & CLK_PSC))
 289                        continue;
 290
 291                /* ignore if in Disabled or SwRstDisable states */
 292                if (!davinci_psc_is_clk_active(ck->gpsc, ck->lpsc))
 293                        continue;
 294
 295                pr_debug("Clocks: disable unused %s\n", ck->name);
 296
 297                davinci_psc_config(ck->domain, ck->gpsc, ck->lpsc,
 298                                false, ck->flags);
 299        }
 300        spin_unlock_irq(&clockfw_lock);
 301
 302        return 0;
 303}
 304#endif
 305
 306static unsigned long clk_sysclk_recalc(struct clk *clk)
 307{
 308        u32 v, plldiv;
 309        struct pll_data *pll;
 310        unsigned long rate = clk->rate;
 311
 312        /* If this is the PLL base clock, no more calculations needed */
 313        if (clk->pll_data)
 314                return rate;
 315
 316        if (WARN_ON(!clk->parent))
 317                return rate;
 318
 319        rate = clk->parent->rate;
 320
 321        /* Otherwise, the parent must be a PLL */
 322        if (WARN_ON(!clk->parent->pll_data))
 323                return rate;
 324
 325        pll = clk->parent->pll_data;
 326
 327        /* If pre-PLL, source clock is before the multiplier and divider(s) */
 328        if (clk->flags & PRE_PLL)
 329                rate = pll->input_rate;
 330
 331        if (!clk->div_reg)
 332                return rate;
 333
 334        v = __raw_readl(pll->base + clk->div_reg);
 335        if (v & PLLDIV_EN) {
 336                plldiv = (v & pll->div_ratio_mask) + 1;
 337                if (plldiv)
 338                        rate /= plldiv;
 339        }
 340
 341        return rate;
 342}
 343
 344int davinci_set_sysclk_rate(struct clk *clk, unsigned long rate)
 345{
 346        unsigned v;
 347        struct pll_data *pll;
 348        unsigned long input;
 349        unsigned ratio = 0;
 350
 351        /* If this is the PLL base clock, wrong function to call */
 352        if (clk->pll_data)
 353                return -EINVAL;
 354
 355        /* There must be a parent... */
 356        if (WARN_ON(!clk->parent))
 357                return -EINVAL;
 358
 359        /* ... the parent must be a PLL... */
 360        if (WARN_ON(!clk->parent->pll_data))
 361                return -EINVAL;
 362
 363        /* ... and this clock must have a divider. */
 364        if (WARN_ON(!clk->div_reg))
 365                return -EINVAL;
 366
 367        pll = clk->parent->pll_data;
 368
 369        input = clk->parent->rate;
 370
 371        /* If pre-PLL, source clock is before the multiplier and divider(s) */
 372        if (clk->flags & PRE_PLL)
 373                input = pll->input_rate;
 374
 375        if (input > rate) {
 376                /*
 377                 * Can afford to provide an output little higher than requested
 378                 * only if maximum rate supported by hardware on this sysclk
 379                 * is known.
 380                 */
 381                if (clk->maxrate) {
 382                        ratio = DIV_ROUND_CLOSEST(input, rate);
 383                        if (input / ratio > clk->maxrate)
 384                                ratio = 0;
 385                }
 386
 387                if (ratio == 0)
 388                        ratio = DIV_ROUND_UP(input, rate);
 389
 390                ratio--;
 391        }
 392
 393        if (ratio > pll->div_ratio_mask)
 394                return -EINVAL;
 395
 396        do {
 397                v = __raw_readl(pll->base + PLLSTAT);
 398        } while (v & PLLSTAT_GOSTAT);
 399
 400        v = __raw_readl(pll->base + clk->div_reg);
 401        v &= ~pll->div_ratio_mask;
 402        v |= ratio | PLLDIV_EN;
 403        __raw_writel(v, pll->base + clk->div_reg);
 404
 405        v = __raw_readl(pll->base + PLLCMD);
 406        v |= PLLCMD_GOSET;
 407        __raw_writel(v, pll->base + PLLCMD);
 408
 409        do {
 410                v = __raw_readl(pll->base + PLLSTAT);
 411        } while (v & PLLSTAT_GOSTAT);
 412
 413        return 0;
 414}
 415EXPORT_SYMBOL(davinci_set_sysclk_rate);
 416
 417static unsigned long clk_leafclk_recalc(struct clk *clk)
 418{
 419        if (WARN_ON(!clk->parent))
 420                return clk->rate;
 421
 422        return clk->parent->rate;
 423}
 424
 425int davinci_simple_set_rate(struct clk *clk, unsigned long rate)
 426{
 427        clk->rate = rate;
 428        return 0;
 429}
 430
 431static unsigned long clk_pllclk_recalc(struct clk *clk)
 432{
 433        u32 ctrl, mult = 1, prediv = 1, postdiv = 1;
 434        u8 bypass;
 435        struct pll_data *pll = clk->pll_data;
 436        unsigned long rate = clk->rate;
 437
 438        ctrl = __raw_readl(pll->base + PLLCTL);
 439        rate = pll->input_rate = clk->parent->rate;
 440
 441        if (ctrl & PLLCTL_PLLEN) {
 442                bypass = 0;
 443                mult = __raw_readl(pll->base + PLLM);
 444                if (cpu_is_davinci_dm365())
 445                        mult = 2 * (mult & PLLM_PLLM_MASK);
 446                else
 447                        mult = (mult & PLLM_PLLM_MASK) + 1;
 448        } else
 449                bypass = 1;
 450
 451        if (pll->flags & PLL_HAS_PREDIV) {
 452                prediv = __raw_readl(pll->base + PREDIV);
 453                if (prediv & PLLDIV_EN)
 454                        prediv = (prediv & pll->div_ratio_mask) + 1;
 455                else
 456                        prediv = 1;
 457        }
 458
 459        /* pre-divider is fixed, but (some?) chips won't report that */
 460        if (cpu_is_davinci_dm355() && pll->num == 1)
 461                prediv = 8;
 462
 463        if (pll->flags & PLL_HAS_POSTDIV) {
 464                postdiv = __raw_readl(pll->base + POSTDIV);
 465                if (postdiv & PLLDIV_EN)
 466                        postdiv = (postdiv & pll->div_ratio_mask) + 1;
 467                else
 468                        postdiv = 1;
 469        }
 470
 471        if (!bypass) {
 472                rate /= prediv;
 473                rate *= mult;
 474                rate /= postdiv;
 475        }
 476
 477        pr_debug("PLL%d: input = %lu MHz [ ",
 478                 pll->num, clk->parent->rate / 1000000);
 479        if (bypass)
 480                pr_debug("bypass ");
 481        if (prediv > 1)
 482                pr_debug("/ %d ", prediv);
 483        if (mult > 1)
 484                pr_debug("* %d ", mult);
 485        if (postdiv > 1)
 486                pr_debug("/ %d ", postdiv);
 487        pr_debug("] --> %lu MHz output.\n", rate / 1000000);
 488
 489        return rate;
 490}
 491
 492/**
 493 * davinci_set_pllrate - set the output rate of a given PLL.
 494 *
 495 * Note: Currently tested to work with OMAP-L138 only.
 496 *
 497 * @pll: pll whose rate needs to be changed.
 498 * @prediv: The pre divider value. Passing 0 disables the pre-divider.
 499 * @pllm: The multiplier value. Passing 0 leads to multiply-by-one.
 500 * @postdiv: The post divider value. Passing 0 disables the post-divider.
 501 */
 502int davinci_set_pllrate(struct pll_data *pll, unsigned int prediv,
 503                                        unsigned int mult, unsigned int postdiv)
 504{
 505        u32 ctrl;
 506        unsigned int locktime;
 507        unsigned long flags;
 508
 509        if (pll->base == NULL)
 510                return -EINVAL;
 511
 512        /*
 513         *  PLL lock time required per OMAP-L138 datasheet is
 514         * (2000 * prediv)/sqrt(pllm) OSCIN cycles. We approximate sqrt(pllm)
 515         * as 4 and OSCIN cycle as 25 MHz.
 516         */
 517        if (prediv) {
 518                locktime = ((2000 * prediv) / 100);
 519                prediv = (prediv - 1) | PLLDIV_EN;
 520        } else {
 521                locktime = PLL_LOCK_TIME;
 522        }
 523        if (postdiv)
 524                postdiv = (postdiv - 1) | PLLDIV_EN;
 525        if (mult)
 526                mult = mult - 1;
 527
 528        /* Protect against simultaneous calls to PLL setting seqeunce */
 529        spin_lock_irqsave(&clockfw_lock, flags);
 530
 531        ctrl = __raw_readl(pll->base + PLLCTL);
 532
 533        /* Switch the PLL to bypass mode */
 534        ctrl &= ~(PLLCTL_PLLENSRC | PLLCTL_PLLEN);
 535        __raw_writel(ctrl, pll->base + PLLCTL);
 536
 537        udelay(PLL_BYPASS_TIME);
 538
 539        /* Reset and enable PLL */
 540        ctrl &= ~(PLLCTL_PLLRST | PLLCTL_PLLDIS);
 541        __raw_writel(ctrl, pll->base + PLLCTL);
 542
 543        if (pll->flags & PLL_HAS_PREDIV)
 544                __raw_writel(prediv, pll->base + PREDIV);
 545
 546        __raw_writel(mult, pll->base + PLLM);
 547
 548        if (pll->flags & PLL_HAS_POSTDIV)
 549                __raw_writel(postdiv, pll->base + POSTDIV);
 550
 551        udelay(PLL_RESET_TIME);
 552
 553        /* Bring PLL out of reset */
 554        ctrl |= PLLCTL_PLLRST;
 555        __raw_writel(ctrl, pll->base + PLLCTL);
 556
 557        udelay(locktime);
 558
 559        /* Remove PLL from bypass mode */
 560        ctrl |= PLLCTL_PLLEN;
 561        __raw_writel(ctrl, pll->base + PLLCTL);
 562
 563        spin_unlock_irqrestore(&clockfw_lock, flags);
 564
 565        return 0;
 566}
 567EXPORT_SYMBOL(davinci_set_pllrate);
 568
 569/**
 570 * davinci_set_refclk_rate() - Set the reference clock rate
 571 * @rate:       The new rate.
 572 *
 573 * Sets the reference clock rate to a given value. This will most likely
 574 * result in the entire clock tree getting updated.
 575 *
 576 * This is used to support boards which use a reference clock different
 577 * than that used by default in <soc>.c file. The reference clock rate
 578 * should be updated early in the boot process; ideally soon after the
 579 * clock tree has been initialized once with the default reference clock
 580 * rate (davinci_clk_init()).
 581 *
 582 * Returns 0 on success, error otherwise.
 583 */
 584int davinci_set_refclk_rate(unsigned long rate)
 585{
 586        struct clk *refclk;
 587
 588        refclk = clk_get(NULL, "ref");
 589        if (IS_ERR(refclk)) {
 590                pr_err("%s: failed to get reference clock\n", __func__);
 591                return PTR_ERR(refclk);
 592        }
 593
 594        clk_set_rate(refclk, rate);
 595
 596        clk_put(refclk);
 597
 598        return 0;
 599}
 600
 601int __init davinci_clk_init(struct clk_lookup *clocks)
 602{
 603        struct clk_lookup *c;
 604        struct clk *clk;
 605        size_t num_clocks = 0;
 606
 607        for (c = clocks; c->clk; c++) {
 608                clk = c->clk;
 609
 610                if (!clk->recalc) {
 611
 612                        /* Check if clock is a PLL */
 613                        if (clk->pll_data)
 614                                clk->recalc = clk_pllclk_recalc;
 615
 616                        /* Else, if it is a PLL-derived clock */
 617                        else if (clk->flags & CLK_PLL)
 618                                clk->recalc = clk_sysclk_recalc;
 619
 620                        /* Otherwise, it is a leaf clock (PSC clock) */
 621                        else if (clk->parent)
 622                                clk->recalc = clk_leafclk_recalc;
 623                }
 624
 625                if (clk->pll_data) {
 626                        struct pll_data *pll = clk->pll_data;
 627
 628                        if (!pll->div_ratio_mask)
 629                                pll->div_ratio_mask = PLLDIV_RATIO_MASK;
 630
 631                        if (pll->phys_base && !pll->base) {
 632                                pll->base = ioremap(pll->phys_base, SZ_4K);
 633                                WARN_ON(!pll->base);
 634                        }
 635                }
 636
 637                if (clk->recalc)
 638                        clk->rate = clk->recalc(clk);
 639
 640                if (clk->lpsc)
 641                        clk->flags |= CLK_PSC;
 642
 643                if (clk->flags & PSC_LRST)
 644                        clk->reset = davinci_clk_reset;
 645
 646                clk_register(clk);
 647                num_clocks++;
 648
 649                /* Turn on clocks that Linux doesn't otherwise manage */
 650                if (clk->flags & ALWAYS_ENABLED)
 651                        clk_enable(clk);
 652        }
 653
 654        clkdev_add_table(clocks, num_clocks);
 655
 656        return 0;
 657}
 658
 659#ifdef CONFIG_DEBUG_FS
 660
 661#include <linux/debugfs.h>
 662#include <linux/seq_file.h>
 663
 664#define CLKNAME_MAX     10              /* longest clock name */
 665#define NEST_DELTA      2
 666#define NEST_MAX        4
 667
 668static void
 669dump_clock(struct seq_file *s, unsigned nest, struct clk *parent)
 670{
 671        char            *state;
 672        char            buf[CLKNAME_MAX + NEST_DELTA * NEST_MAX];
 673        struct clk      *clk;
 674        unsigned        i;
 675
 676        if (parent->flags & CLK_PLL)
 677                state = "pll";
 678        else if (parent->flags & CLK_PSC)
 679                state = "psc";
 680        else
 681                state = "";
 682
 683        /* <nest spaces> name <pad to end> */
 684        memset(buf, ' ', sizeof(buf) - 1);
 685        buf[sizeof(buf) - 1] = 0;
 686        i = strlen(parent->name);
 687        memcpy(buf + nest, parent->name,
 688                        min(i, (unsigned)(sizeof(buf) - 1 - nest)));
 689
 690        seq_printf(s, "%s users=%2d %-3s %9ld Hz\n",
 691                   buf, parent->usecount, state, clk_get_rate(parent));
 692        /* REVISIT show device associations too */
 693
 694        /* cost is now small, but not linear... */
 695        list_for_each_entry(clk, &parent->children, childnode) {
 696                dump_clock(s, nest + NEST_DELTA, clk);
 697        }
 698}
 699
 700static int davinci_ck_show(struct seq_file *m, void *v)
 701{
 702        struct clk *clk;
 703
 704        /*
 705         * Show clock tree; We trust nonzero usecounts equate to PSC enables...
 706         */
 707        mutex_lock(&clocks_mutex);
 708        list_for_each_entry(clk, &clocks, node)
 709                if (!clk->parent)
 710                        dump_clock(m, 0, clk);
 711        mutex_unlock(&clocks_mutex);
 712
 713        return 0;
 714}
 715
 716static int davinci_ck_open(struct inode *inode, struct file *file)
 717{
 718        return single_open(file, davinci_ck_show, NULL);
 719}
 720
 721static const struct file_operations davinci_ck_operations = {
 722        .open           = davinci_ck_open,
 723        .read           = seq_read,
 724        .llseek         = seq_lseek,
 725        .release        = single_release,
 726};
 727
 728static int __init davinci_clk_debugfs_init(void)
 729{
 730        debugfs_create_file("davinci_clocks", S_IFREG | S_IRUGO, NULL, NULL,
 731                                                &davinci_ck_operations);
 732        return 0;
 733
 734}
 735device_initcall(davinci_clk_debugfs_init);
 736#endif /* CONFIG_DEBUG_FS */
 737