linux/drivers/clk/clk-xgene.c
<<
>>
Prefs
   1/*
   2 * clk-xgene.c - AppliedMicro X-Gene Clock Interface
   3 *
   4 * Copyright (c) 2013, Applied Micro Circuits Corporation
   5 * Author: Loc Ho <lho@apm.com>
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License as
   9 * published by the Free Software Foundation; either version 2 of
  10 * the License, or (at your option) any later version.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * along with this program; if not, write to the Free Software
  19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
  20 * MA 02111-1307 USA
  21 *
  22 */
  23#include <linux/module.h>
  24#include <linux/spinlock.h>
  25#include <linux/io.h>
  26#include <linux/of.h>
  27#include <linux/clkdev.h>
  28#include <linux/clk-provider.h>
  29#include <linux/of_address.h>
  30
  31/* Register SCU_PCPPLL bit fields */
  32#define N_DIV_RD(src)                   ((src) & 0x000001ff)
  33#define SC_N_DIV_RD(src)                ((src) & 0x0000007f)
  34#define SC_OUTDIV2(src)                 (((src) & 0x00000100) >> 8)
  35
  36/* Register SCU_SOCPLL bit fields */
  37#define CLKR_RD(src)                    (((src) & 0x07000000)>>24)
  38#define CLKOD_RD(src)                   (((src) & 0x00300000)>>20)
  39#define REGSPEC_RESET_F1_MASK           0x00010000
  40#define CLKF_RD(src)                    (((src) & 0x000001ff))
  41
  42#define XGENE_CLK_DRIVER_VER            "0.1"
  43
  44static DEFINE_SPINLOCK(clk_lock);
  45
  46static inline u32 xgene_clk_read(void __iomem *csr)
  47{
  48        return readl_relaxed(csr);
  49}
  50
  51static inline void xgene_clk_write(u32 data, void __iomem *csr)
  52{
  53        writel_relaxed(data, csr);
  54}
  55
  56/* PLL Clock */
  57enum xgene_pll_type {
  58        PLL_TYPE_PCP = 0,
  59        PLL_TYPE_SOC = 1,
  60};
  61
  62struct xgene_clk_pll {
  63        struct clk_hw   hw;
  64        void __iomem    *reg;
  65        spinlock_t      *lock;
  66        u32             pll_offset;
  67        enum xgene_pll_type     type;
  68        int             version;
  69};
  70
  71#define to_xgene_clk_pll(_hw) container_of(_hw, struct xgene_clk_pll, hw)
  72
  73static int xgene_clk_pll_is_enabled(struct clk_hw *hw)
  74{
  75        struct xgene_clk_pll *pllclk = to_xgene_clk_pll(hw);
  76        u32 data;
  77
  78        data = xgene_clk_read(pllclk->reg + pllclk->pll_offset);
  79        pr_debug("%s pll %s\n", clk_hw_get_name(hw),
  80                data & REGSPEC_RESET_F1_MASK ? "disabled" : "enabled");
  81
  82        return data & REGSPEC_RESET_F1_MASK ? 0 : 1;
  83}
  84
  85static unsigned long xgene_clk_pll_recalc_rate(struct clk_hw *hw,
  86                                unsigned long parent_rate)
  87{
  88        struct xgene_clk_pll *pllclk = to_xgene_clk_pll(hw);
  89        unsigned long fref;
  90        unsigned long fvco;
  91        u32 pll;
  92        u32 nref;
  93        u32 nout;
  94        u32 nfb;
  95
  96        pll = xgene_clk_read(pllclk->reg + pllclk->pll_offset);
  97
  98        if (pllclk->version <= 1) {
  99                if (pllclk->type == PLL_TYPE_PCP) {
 100                        /*
 101                        * PLL VCO = Reference clock * NF
 102                        * PCP PLL = PLL_VCO / 2
 103                        */
 104                        nout = 2;
 105                        fvco = parent_rate * (N_DIV_RD(pll) + 4);
 106                } else {
 107                        /*
 108                        * Fref = Reference Clock / NREF;
 109                        * Fvco = Fref * NFB;
 110                        * Fout = Fvco / NOUT;
 111                        */
 112                        nref = CLKR_RD(pll) + 1;
 113                        nout = CLKOD_RD(pll) + 1;
 114                        nfb = CLKF_RD(pll);
 115                        fref = parent_rate / nref;
 116                        fvco = fref * nfb;
 117                }
 118        } else {
 119                /*
 120                 * fvco = Reference clock * FBDIVC
 121                 * PLL freq = fvco / NOUT
 122                 */
 123                nout = SC_OUTDIV2(pll) ? 2 : 3;
 124                fvco = parent_rate * SC_N_DIV_RD(pll);
 125        }
 126        pr_debug("%s pll recalc rate %ld parent %ld version %d\n",
 127                 clk_hw_get_name(hw), fvco / nout, parent_rate,
 128                 pllclk->version);
 129
 130        return fvco / nout;
 131}
 132
 133static const struct clk_ops xgene_clk_pll_ops = {
 134        .is_enabled = xgene_clk_pll_is_enabled,
 135        .recalc_rate = xgene_clk_pll_recalc_rate,
 136};
 137
 138static struct clk *xgene_register_clk_pll(struct device *dev,
 139        const char *name, const char *parent_name,
 140        unsigned long flags, void __iomem *reg, u32 pll_offset,
 141        u32 type, spinlock_t *lock, int version)
 142{
 143        struct xgene_clk_pll *apmclk;
 144        struct clk *clk;
 145        struct clk_init_data init;
 146
 147        /* allocate the APM clock structure */
 148        apmclk = kzalloc(sizeof(*apmclk), GFP_KERNEL);
 149        if (!apmclk) {
 150                pr_err("%s: could not allocate APM clk\n", __func__);
 151                return ERR_PTR(-ENOMEM);
 152        }
 153
 154        init.name = name;
 155        init.ops = &xgene_clk_pll_ops;
 156        init.flags = flags;
 157        init.parent_names = parent_name ? &parent_name : NULL;
 158        init.num_parents = parent_name ? 1 : 0;
 159
 160        apmclk->version = version;
 161        apmclk->reg = reg;
 162        apmclk->lock = lock;
 163        apmclk->pll_offset = pll_offset;
 164        apmclk->type = type;
 165        apmclk->hw.init = &init;
 166
 167        /* Register the clock */
 168        clk = clk_register(dev, &apmclk->hw);
 169        if (IS_ERR(clk)) {
 170                pr_err("%s: could not register clk %s\n", __func__, name);
 171                kfree(apmclk);
 172                return NULL;
 173        }
 174        return clk;
 175}
 176
 177static int xgene_pllclk_version(struct device_node *np)
 178{
 179        if (of_device_is_compatible(np, "apm,xgene-socpll-clock"))
 180                return 1;
 181        if (of_device_is_compatible(np, "apm,xgene-pcppll-clock"))
 182                return 1;
 183        return 2;
 184}
 185
 186static void xgene_pllclk_init(struct device_node *np, enum xgene_pll_type pll_type)
 187{
 188        const char *clk_name = np->full_name;
 189        struct clk *clk;
 190        void __iomem *reg;
 191        int version = xgene_pllclk_version(np);
 192
 193        reg = of_iomap(np, 0);
 194        if (reg == NULL) {
 195                pr_err("Unable to map CSR register for %s\n", np->full_name);
 196                return;
 197        }
 198        of_property_read_string(np, "clock-output-names", &clk_name);
 199        clk = xgene_register_clk_pll(NULL,
 200                        clk_name, of_clk_get_parent_name(np, 0),
 201                        0, reg, 0, pll_type, &clk_lock,
 202                        version);
 203        if (!IS_ERR(clk)) {
 204                of_clk_add_provider(np, of_clk_src_simple_get, clk);
 205                clk_register_clkdev(clk, clk_name, NULL);
 206                pr_debug("Add %s clock PLL\n", clk_name);
 207        }
 208}
 209
 210static void xgene_socpllclk_init(struct device_node *np)
 211{
 212        xgene_pllclk_init(np, PLL_TYPE_SOC);
 213}
 214
 215static void xgene_pcppllclk_init(struct device_node *np)
 216{
 217        xgene_pllclk_init(np, PLL_TYPE_PCP);
 218}
 219
 220/**
 221 * struct xgene_clk_pmd - PMD clock
 222 *
 223 * @hw:         handle between common and hardware-specific interfaces
 224 * @reg:        register containing the fractional scale multiplier (scaler)
 225 * @shift:      shift to the unit bit field
 226 * @denom:      1/denominator unit
 227 * @lock:       register lock
 228 * Flags:
 229 * XGENE_CLK_PMD_SCALE_INVERTED - By default the scaler is the value read
 230 *      from the register plus one. For example,
 231 *              0 for (0 + 1) / denom,
 232 *              1 for (1 + 1) / denom and etc.
 233 *      If this flag is set, it is
 234 *              0 for (denom - 0) / denom,
 235 *              1 for (denom - 1) / denom and etc.
 236 *
 237 */
 238struct xgene_clk_pmd {
 239        struct clk_hw   hw;
 240        void __iomem    *reg;
 241        u8              shift;
 242        u32             mask;
 243        u64             denom;
 244        u32             flags;
 245        spinlock_t      *lock;
 246};
 247
 248#define to_xgene_clk_pmd(_hw) container_of(_hw, struct xgene_clk_pmd, hw)
 249
 250#define XGENE_CLK_PMD_SCALE_INVERTED    BIT(0)
 251#define XGENE_CLK_PMD_SHIFT             8
 252#define XGENE_CLK_PMD_WIDTH             3
 253
 254static unsigned long xgene_clk_pmd_recalc_rate(struct clk_hw *hw,
 255                                               unsigned long parent_rate)
 256{
 257        struct xgene_clk_pmd *fd = to_xgene_clk_pmd(hw);
 258        unsigned long flags = 0;
 259        u64 ret, scale;
 260        u32 val;
 261
 262        if (fd->lock)
 263                spin_lock_irqsave(fd->lock, flags);
 264        else
 265                __acquire(fd->lock);
 266
 267        val = clk_readl(fd->reg);
 268
 269        if (fd->lock)
 270                spin_unlock_irqrestore(fd->lock, flags);
 271        else
 272                __release(fd->lock);
 273
 274        ret = (u64)parent_rate;
 275
 276        scale = (val & fd->mask) >> fd->shift;
 277        if (fd->flags & XGENE_CLK_PMD_SCALE_INVERTED)
 278                scale = fd->denom - scale;
 279        else
 280                scale++;
 281
 282        /* freq = parent_rate * scaler / denom */
 283        do_div(ret, fd->denom);
 284        ret *= scale;
 285        if (ret == 0)
 286                ret = (u64)parent_rate;
 287
 288        return ret;
 289}
 290
 291static long xgene_clk_pmd_round_rate(struct clk_hw *hw, unsigned long rate,
 292                                     unsigned long *parent_rate)
 293{
 294        struct xgene_clk_pmd *fd = to_xgene_clk_pmd(hw);
 295        u64 ret, scale;
 296
 297        if (!rate || rate >= *parent_rate)
 298                return *parent_rate;
 299
 300        /* freq = parent_rate * scaler / denom */
 301        ret = rate * fd->denom;
 302        scale = DIV_ROUND_UP_ULL(ret, *parent_rate);
 303
 304        ret = (u64)*parent_rate * scale;
 305        do_div(ret, fd->denom);
 306
 307        return ret;
 308}
 309
 310static int xgene_clk_pmd_set_rate(struct clk_hw *hw, unsigned long rate,
 311                                  unsigned long parent_rate)
 312{
 313        struct xgene_clk_pmd *fd = to_xgene_clk_pmd(hw);
 314        unsigned long flags = 0;
 315        u64 scale, ret;
 316        u32 val;
 317
 318        /*
 319         * Compute the scaler:
 320         *
 321         * freq = parent_rate * scaler / denom, or
 322         * scaler = freq * denom / parent_rate
 323         */
 324        ret = rate * fd->denom;
 325        scale = DIV_ROUND_UP_ULL(ret, (u64)parent_rate);
 326
 327        /* Check if inverted */
 328        if (fd->flags & XGENE_CLK_PMD_SCALE_INVERTED)
 329                scale = fd->denom - scale;
 330        else
 331                scale--;
 332
 333        if (fd->lock)
 334                spin_lock_irqsave(fd->lock, flags);
 335        else
 336                __acquire(fd->lock);
 337
 338        val = clk_readl(fd->reg);
 339        val &= ~fd->mask;
 340        val |= (scale << fd->shift);
 341        clk_writel(val, fd->reg);
 342
 343        if (fd->lock)
 344                spin_unlock_irqrestore(fd->lock, flags);
 345        else
 346                __release(fd->lock);
 347
 348        return 0;
 349}
 350
 351static const struct clk_ops xgene_clk_pmd_ops = {
 352        .recalc_rate = xgene_clk_pmd_recalc_rate,
 353        .round_rate = xgene_clk_pmd_round_rate,
 354        .set_rate = xgene_clk_pmd_set_rate,
 355};
 356
 357static struct clk *
 358xgene_register_clk_pmd(struct device *dev,
 359                       const char *name, const char *parent_name,
 360                       unsigned long flags, void __iomem *reg, u8 shift,
 361                       u8 width, u64 denom, u32 clk_flags, spinlock_t *lock)
 362{
 363        struct xgene_clk_pmd *fd;
 364        struct clk_init_data init;
 365        struct clk *clk;
 366
 367        fd = kzalloc(sizeof(*fd), GFP_KERNEL);
 368        if (!fd)
 369                return ERR_PTR(-ENOMEM);
 370
 371        init.name = name;
 372        init.ops = &xgene_clk_pmd_ops;
 373        init.flags = flags;
 374        init.parent_names = parent_name ? &parent_name : NULL;
 375        init.num_parents = parent_name ? 1 : 0;
 376
 377        fd->reg = reg;
 378        fd->shift = shift;
 379        fd->mask = (BIT(width) - 1) << shift;
 380        fd->denom = denom;
 381        fd->flags = clk_flags;
 382        fd->lock = lock;
 383        fd->hw.init = &init;
 384
 385        clk = clk_register(dev, &fd->hw);
 386        if (IS_ERR(clk)) {
 387                pr_err("%s: could not register clk %s\n", __func__, name);
 388                kfree(fd);
 389                return NULL;
 390        }
 391
 392        return clk;
 393}
 394
 395static void xgene_pmdclk_init(struct device_node *np)
 396{
 397        const char *clk_name = np->full_name;
 398        void __iomem *csr_reg;
 399        struct resource res;
 400        struct clk *clk;
 401        u64 denom;
 402        u32 flags = 0;
 403        int rc;
 404
 405        /* Check if the entry is disabled */
 406        if (!of_device_is_available(np))
 407                return;
 408
 409        /* Parse the DTS register for resource */
 410        rc = of_address_to_resource(np, 0, &res);
 411        if (rc != 0) {
 412                pr_err("no DTS register for %s\n", np->full_name);
 413                return;
 414        }
 415        csr_reg = of_iomap(np, 0);
 416        if (!csr_reg) {
 417                pr_err("Unable to map resource for %s\n", np->full_name);
 418                return;
 419        }
 420        of_property_read_string(np, "clock-output-names", &clk_name);
 421
 422        denom = BIT(XGENE_CLK_PMD_WIDTH);
 423        flags |= XGENE_CLK_PMD_SCALE_INVERTED;
 424
 425        clk = xgene_register_clk_pmd(NULL, clk_name,
 426                                     of_clk_get_parent_name(np, 0), 0,
 427                                     csr_reg, XGENE_CLK_PMD_SHIFT,
 428                                     XGENE_CLK_PMD_WIDTH, denom,
 429                                     flags, &clk_lock);
 430        if (!IS_ERR(clk)) {
 431                of_clk_add_provider(np, of_clk_src_simple_get, clk);
 432                clk_register_clkdev(clk, clk_name, NULL);
 433                pr_debug("Add %s clock\n", clk_name);
 434        } else {
 435                if (csr_reg)
 436                        iounmap(csr_reg);
 437        }
 438}
 439
 440/* IP Clock */
 441struct xgene_dev_parameters {
 442        void __iomem *csr_reg;          /* CSR for IP clock */
 443        u32 reg_clk_offset;             /* Offset to clock enable CSR */
 444        u32 reg_clk_mask;               /* Mask bit for clock enable */
 445        u32 reg_csr_offset;             /* Offset to CSR reset */
 446        u32 reg_csr_mask;               /* Mask bit for disable CSR reset */
 447        void __iomem *divider_reg;      /* CSR for divider */
 448        u32 reg_divider_offset;         /* Offset to divider register */
 449        u32 reg_divider_shift;          /* Bit shift to divider field */
 450        u32 reg_divider_width;          /* Width of the bit to divider field */
 451};
 452
 453struct xgene_clk {
 454        struct clk_hw   hw;
 455        spinlock_t      *lock;
 456        struct xgene_dev_parameters     param;
 457};
 458
 459#define to_xgene_clk(_hw) container_of(_hw, struct xgene_clk, hw)
 460
 461static int xgene_clk_enable(struct clk_hw *hw)
 462{
 463        struct xgene_clk *pclk = to_xgene_clk(hw);
 464        unsigned long flags = 0;
 465        u32 data;
 466
 467        if (pclk->lock)
 468                spin_lock_irqsave(pclk->lock, flags);
 469
 470        if (pclk->param.csr_reg != NULL) {
 471                pr_debug("%s clock enabled\n", clk_hw_get_name(hw));
 472                /* First enable the clock */
 473                data = xgene_clk_read(pclk->param.csr_reg +
 474                                        pclk->param.reg_clk_offset);
 475                data |= pclk->param.reg_clk_mask;
 476                xgene_clk_write(data, pclk->param.csr_reg +
 477                                        pclk->param.reg_clk_offset);
 478                pr_debug("%s clk offset 0x%08X mask 0x%08X value 0x%08X\n",
 479                        clk_hw_get_name(hw),
 480                        pclk->param.reg_clk_offset, pclk->param.reg_clk_mask,
 481                        data);
 482
 483                /* Second enable the CSR */
 484                data = xgene_clk_read(pclk->param.csr_reg +
 485                                        pclk->param.reg_csr_offset);
 486                data &= ~pclk->param.reg_csr_mask;
 487                xgene_clk_write(data, pclk->param.csr_reg +
 488                                        pclk->param.reg_csr_offset);
 489                pr_debug("%s csr offset 0x%08X mask 0x%08X value 0x%08X\n",
 490                        clk_hw_get_name(hw),
 491                        pclk->param.reg_csr_offset, pclk->param.reg_csr_mask,
 492                        data);
 493        }
 494
 495        if (pclk->lock)
 496                spin_unlock_irqrestore(pclk->lock, flags);
 497
 498        return 0;
 499}
 500
 501static void xgene_clk_disable(struct clk_hw *hw)
 502{
 503        struct xgene_clk *pclk = to_xgene_clk(hw);
 504        unsigned long flags = 0;
 505        u32 data;
 506
 507        if (pclk->lock)
 508                spin_lock_irqsave(pclk->lock, flags);
 509
 510        if (pclk->param.csr_reg != NULL) {
 511                pr_debug("%s clock disabled\n", clk_hw_get_name(hw));
 512                /* First put the CSR in reset */
 513                data = xgene_clk_read(pclk->param.csr_reg +
 514                                        pclk->param.reg_csr_offset);
 515                data |= pclk->param.reg_csr_mask;
 516                xgene_clk_write(data, pclk->param.csr_reg +
 517                                        pclk->param.reg_csr_offset);
 518
 519                /* Second disable the clock */
 520                data = xgene_clk_read(pclk->param.csr_reg +
 521                                        pclk->param.reg_clk_offset);
 522                data &= ~pclk->param.reg_clk_mask;
 523                xgene_clk_write(data, pclk->param.csr_reg +
 524                                        pclk->param.reg_clk_offset);
 525        }
 526
 527        if (pclk->lock)
 528                spin_unlock_irqrestore(pclk->lock, flags);
 529}
 530
 531static int xgene_clk_is_enabled(struct clk_hw *hw)
 532{
 533        struct xgene_clk *pclk = to_xgene_clk(hw);
 534        u32 data = 0;
 535
 536        if (pclk->param.csr_reg != NULL) {
 537                pr_debug("%s clock checking\n", clk_hw_get_name(hw));
 538                data = xgene_clk_read(pclk->param.csr_reg +
 539                                        pclk->param.reg_clk_offset);
 540                pr_debug("%s clock is %s\n", clk_hw_get_name(hw),
 541                        data & pclk->param.reg_clk_mask ? "enabled" :
 542                                                        "disabled");
 543        }
 544
 545        if (pclk->param.csr_reg == NULL)
 546                return 1;
 547        return data & pclk->param.reg_clk_mask ? 1 : 0;
 548}
 549
 550static unsigned long xgene_clk_recalc_rate(struct clk_hw *hw,
 551                                unsigned long parent_rate)
 552{
 553        struct xgene_clk *pclk = to_xgene_clk(hw);
 554        u32 data;
 555
 556        if (pclk->param.divider_reg) {
 557                data = xgene_clk_read(pclk->param.divider_reg +
 558                                        pclk->param.reg_divider_offset);
 559                data >>= pclk->param.reg_divider_shift;
 560                data &= (1 << pclk->param.reg_divider_width) - 1;
 561
 562                pr_debug("%s clock recalc rate %ld parent %ld\n",
 563                        clk_hw_get_name(hw),
 564                        parent_rate / data, parent_rate);
 565
 566                return parent_rate / data;
 567        } else {
 568                pr_debug("%s clock recalc rate %ld parent %ld\n",
 569                        clk_hw_get_name(hw), parent_rate, parent_rate);
 570                return parent_rate;
 571        }
 572}
 573
 574static int xgene_clk_set_rate(struct clk_hw *hw, unsigned long rate,
 575                                unsigned long parent_rate)
 576{
 577        struct xgene_clk *pclk = to_xgene_clk(hw);
 578        unsigned long flags = 0;
 579        u32 data;
 580        u32 divider;
 581        u32 divider_save;
 582
 583        if (pclk->lock)
 584                spin_lock_irqsave(pclk->lock, flags);
 585
 586        if (pclk->param.divider_reg) {
 587                /* Let's compute the divider */
 588                if (rate > parent_rate)
 589                        rate = parent_rate;
 590                divider_save = divider = parent_rate / rate; /* Rounded down */
 591                divider &= (1 << pclk->param.reg_divider_width) - 1;
 592                divider <<= pclk->param.reg_divider_shift;
 593
 594                /* Set new divider */
 595                data = xgene_clk_read(pclk->param.divider_reg +
 596                                pclk->param.reg_divider_offset);
 597                data &= ~(((1 << pclk->param.reg_divider_width) - 1)
 598                                << pclk->param.reg_divider_shift);
 599                data |= divider;
 600                xgene_clk_write(data, pclk->param.divider_reg +
 601                                        pclk->param.reg_divider_offset);
 602                pr_debug("%s clock set rate %ld\n", clk_hw_get_name(hw),
 603                        parent_rate / divider_save);
 604        } else {
 605                divider_save = 1;
 606        }
 607
 608        if (pclk->lock)
 609                spin_unlock_irqrestore(pclk->lock, flags);
 610
 611        return parent_rate / divider_save;
 612}
 613
 614static long xgene_clk_round_rate(struct clk_hw *hw, unsigned long rate,
 615                                unsigned long *prate)
 616{
 617        struct xgene_clk *pclk = to_xgene_clk(hw);
 618        unsigned long parent_rate = *prate;
 619        u32 divider;
 620
 621        if (pclk->param.divider_reg) {
 622                /* Let's compute the divider */
 623                if (rate > parent_rate)
 624                        rate = parent_rate;
 625                divider = parent_rate / rate;   /* Rounded down */
 626        } else {
 627                divider = 1;
 628        }
 629
 630        return parent_rate / divider;
 631}
 632
 633static const struct clk_ops xgene_clk_ops = {
 634        .enable = xgene_clk_enable,
 635        .disable = xgene_clk_disable,
 636        .is_enabled = xgene_clk_is_enabled,
 637        .recalc_rate = xgene_clk_recalc_rate,
 638        .set_rate = xgene_clk_set_rate,
 639        .round_rate = xgene_clk_round_rate,
 640};
 641
 642static struct clk *xgene_register_clk(struct device *dev,
 643                const char *name, const char *parent_name,
 644                struct xgene_dev_parameters *parameters, spinlock_t *lock)
 645{
 646        struct xgene_clk *apmclk;
 647        struct clk *clk;
 648        struct clk_init_data init;
 649        int rc;
 650
 651        /* allocate the APM clock structure */
 652        apmclk = kzalloc(sizeof(*apmclk), GFP_KERNEL);
 653        if (!apmclk) {
 654                pr_err("%s: could not allocate APM clk\n", __func__);
 655                return ERR_PTR(-ENOMEM);
 656        }
 657
 658        init.name = name;
 659        init.ops = &xgene_clk_ops;
 660        init.flags = 0;
 661        init.parent_names = parent_name ? &parent_name : NULL;
 662        init.num_parents = parent_name ? 1 : 0;
 663
 664        apmclk->lock = lock;
 665        apmclk->hw.init = &init;
 666        apmclk->param = *parameters;
 667
 668        /* Register the clock */
 669        clk = clk_register(dev, &apmclk->hw);
 670        if (IS_ERR(clk)) {
 671                pr_err("%s: could not register clk %s\n", __func__, name);
 672                kfree(apmclk);
 673                return clk;
 674        }
 675
 676        /* Register the clock for lookup */
 677        rc = clk_register_clkdev(clk, name, NULL);
 678        if (rc != 0) {
 679                pr_err("%s: could not register lookup clk %s\n",
 680                        __func__, name);
 681        }
 682        return clk;
 683}
 684
 685static void __init xgene_devclk_init(struct device_node *np)
 686{
 687        const char *clk_name = np->full_name;
 688        struct clk *clk;
 689        struct resource res;
 690        int rc;
 691        struct xgene_dev_parameters parameters;
 692        int i;
 693
 694        /* Check if the entry is disabled */
 695        if (!of_device_is_available(np))
 696                return;
 697
 698        /* Parse the DTS register for resource */
 699        parameters.csr_reg = NULL;
 700        parameters.divider_reg = NULL;
 701        for (i = 0; i < 2; i++) {
 702                void __iomem *map_res;
 703                rc = of_address_to_resource(np, i, &res);
 704                if (rc != 0) {
 705                        if (i == 0) {
 706                                pr_err("no DTS register for %s\n",
 707                                        np->full_name);
 708                                return;
 709                        }
 710                        break;
 711                }
 712                map_res = of_iomap(np, i);
 713                if (map_res == NULL) {
 714                        pr_err("Unable to map resource %d for %s\n",
 715                                i, np->full_name);
 716                        goto err;
 717                }
 718                if (strcmp(res.name, "div-reg") == 0)
 719                        parameters.divider_reg = map_res;
 720                else /* if (strcmp(res->name, "csr-reg") == 0) */
 721                        parameters.csr_reg = map_res;
 722        }
 723        if (of_property_read_u32(np, "csr-offset", &parameters.reg_csr_offset))
 724                parameters.reg_csr_offset = 0;
 725        if (of_property_read_u32(np, "csr-mask", &parameters.reg_csr_mask))
 726                parameters.reg_csr_mask = 0xF;
 727        if (of_property_read_u32(np, "enable-offset",
 728                                &parameters.reg_clk_offset))
 729                parameters.reg_clk_offset = 0x8;
 730        if (of_property_read_u32(np, "enable-mask", &parameters.reg_clk_mask))
 731                parameters.reg_clk_mask = 0xF;
 732        if (of_property_read_u32(np, "divider-offset",
 733                                &parameters.reg_divider_offset))
 734                parameters.reg_divider_offset = 0;
 735        if (of_property_read_u32(np, "divider-width",
 736                                &parameters.reg_divider_width))
 737                parameters.reg_divider_width = 0;
 738        if (of_property_read_u32(np, "divider-shift",
 739                                &parameters.reg_divider_shift))
 740                parameters.reg_divider_shift = 0;
 741        of_property_read_string(np, "clock-output-names", &clk_name);
 742
 743        clk = xgene_register_clk(NULL, clk_name,
 744                of_clk_get_parent_name(np, 0), &parameters, &clk_lock);
 745        if (IS_ERR(clk))
 746                goto err;
 747        pr_debug("Add %s clock\n", clk_name);
 748        rc = of_clk_add_provider(np, of_clk_src_simple_get, clk);
 749        if (rc != 0)
 750                pr_err("%s: could register provider clk %s\n", __func__,
 751                        np->full_name);
 752
 753        return;
 754
 755err:
 756        if (parameters.csr_reg)
 757                iounmap(parameters.csr_reg);
 758        if (parameters.divider_reg)
 759                iounmap(parameters.divider_reg);
 760}
 761
 762CLK_OF_DECLARE(xgene_socpll_clock, "apm,xgene-socpll-clock", xgene_socpllclk_init);
 763CLK_OF_DECLARE(xgene_pcppll_clock, "apm,xgene-pcppll-clock", xgene_pcppllclk_init);
 764CLK_OF_DECLARE(xgene_pmd_clock, "apm,xgene-pmd-clock", xgene_pmdclk_init);
 765CLK_OF_DECLARE(xgene_socpll_v2_clock, "apm,xgene-socpll-v2-clock",
 766               xgene_socpllclk_init);
 767CLK_OF_DECLARE(xgene_pcppll_v2_clock, "apm,xgene-pcppll-v2-clock",
 768               xgene_pcppllclk_init);
 769CLK_OF_DECLARE(xgene_dev_clock, "apm,xgene-device-clock", xgene_devclk_init);
 770