linux/drivers/clk/mvebu/clk-cpu.c
<<
>>
Prefs
   1/*
   2 * Marvell MVEBU CPU clock handling.
   3 *
   4 * Copyright (C) 2012 Marvell
   5 *
   6 * Gregory CLEMENT <gregory.clement@free-electrons.com>
   7 *
   8 * This file is licensed under the terms of the GNU General Public
   9 * License version 2.  This program is licensed "as is" without any
  10 * warranty of any kind, whether express or implied.
  11 */
  12#include <linux/kernel.h>
  13#include <linux/slab.h>
  14#include <linux/clk.h>
  15#include <linux/clk-provider.h>
  16#include <linux/of_address.h>
  17#include <linux/io.h>
  18#include <linux/of.h>
  19#include <linux/delay.h>
  20#include <linux/mvebu-pmsu.h>
  21#include <asm/smp_plat.h>
  22
  23#define SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET               0x0
  24#define   SYS_CTRL_CLK_DIVIDER_CTRL_RESET_ALL          0xff
  25#define   SYS_CTRL_CLK_DIVIDER_CTRL_RESET_SHIFT        8
  26#define SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET              0x8
  27#define   SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT 16
  28#define SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET              0xC
  29#define SYS_CTRL_CLK_DIVIDER_MASK                      0x3F
  30
  31#define PMU_DFS_RATIO_SHIFT 16
  32#define PMU_DFS_RATIO_MASK  0x3F
  33
  34#define MAX_CPU     4
  35struct cpu_clk {
  36        struct clk_hw hw;
  37        int cpu;
  38        const char *clk_name;
  39        const char *parent_name;
  40        void __iomem *reg_base;
  41        void __iomem *pmu_dfs;
  42};
  43
  44static struct clk **clks;
  45
  46static struct clk_onecell_data clk_data;
  47
  48#define to_cpu_clk(p) container_of(p, struct cpu_clk, hw)
  49
  50static unsigned long clk_cpu_recalc_rate(struct clk_hw *hwclk,
  51                                         unsigned long parent_rate)
  52{
  53        struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
  54        u32 reg, div;
  55
  56        reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET);
  57        div = (reg >> (cpuclk->cpu * 8)) & SYS_CTRL_CLK_DIVIDER_MASK;
  58        return parent_rate / div;
  59}
  60
  61static long clk_cpu_round_rate(struct clk_hw *hwclk, unsigned long rate,
  62                               unsigned long *parent_rate)
  63{
  64        /* Valid ratio are 1:1, 1:2 and 1:3 */
  65        u32 div;
  66
  67        div = *parent_rate / rate;
  68        if (div == 0)
  69                div = 1;
  70        else if (div > 3)
  71                div = 3;
  72
  73        return *parent_rate / div;
  74}
  75
  76static int clk_cpu_off_set_rate(struct clk_hw *hwclk, unsigned long rate,
  77                                unsigned long parent_rate)
  78
  79{
  80        struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
  81        u32 reg, div;
  82        u32 reload_mask;
  83
  84        div = parent_rate / rate;
  85        reg = (readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET)
  86                & (~(SYS_CTRL_CLK_DIVIDER_MASK << (cpuclk->cpu * 8))))
  87                | (div << (cpuclk->cpu * 8));
  88        writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_VALUE_OFFSET);
  89        /* Set clock divider reload smooth bit mask */
  90        reload_mask = 1 << (20 + cpuclk->cpu);
  91
  92        reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET)
  93            | reload_mask;
  94        writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
  95
  96        /* Now trigger the clock update */
  97        reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET)
  98            | 1 << 24;
  99        writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
 100
 101        /* Wait for clocks to settle down then clear reload request */
 102        udelay(1000);
 103        reg &= ~(reload_mask | 1 << 24);
 104        writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
 105        udelay(1000);
 106
 107        return 0;
 108}
 109
 110static int clk_cpu_on_set_rate(struct clk_hw *hwclk, unsigned long rate,
 111                               unsigned long parent_rate)
 112{
 113        u32 reg;
 114        unsigned long fabric_div, target_div, cur_rate;
 115        struct cpu_clk *cpuclk = to_cpu_clk(hwclk);
 116
 117        /*
 118         * PMU DFS registers are not mapped, Device Tree does not
 119         * describes them. We cannot change the frequency dynamically.
 120         */
 121        if (!cpuclk->pmu_dfs)
 122                return -ENODEV;
 123
 124        cur_rate = clk_hw_get_rate(hwclk);
 125
 126        reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL2_OFFSET);
 127        fabric_div = (reg >> SYS_CTRL_CLK_DIVIDER_CTRL2_NBCLK_RATIO_SHIFT) &
 128                SYS_CTRL_CLK_DIVIDER_MASK;
 129
 130        /* Frequency is going up */
 131        if (rate == 2 * cur_rate)
 132                target_div = fabric_div / 2;
 133        /* Frequency is going down */
 134        else
 135                target_div = fabric_div;
 136
 137        if (target_div == 0)
 138                target_div = 1;
 139
 140        reg = readl(cpuclk->pmu_dfs);
 141        reg &= ~(PMU_DFS_RATIO_MASK << PMU_DFS_RATIO_SHIFT);
 142        reg |= (target_div << PMU_DFS_RATIO_SHIFT);
 143        writel(reg, cpuclk->pmu_dfs);
 144
 145        reg = readl(cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
 146        reg |= (SYS_CTRL_CLK_DIVIDER_CTRL_RESET_ALL <<
 147                SYS_CTRL_CLK_DIVIDER_CTRL_RESET_SHIFT);
 148        writel(reg, cpuclk->reg_base + SYS_CTRL_CLK_DIVIDER_CTRL_OFFSET);
 149
 150        return mvebu_pmsu_dfs_request(cpuclk->cpu);
 151}
 152
 153static int clk_cpu_set_rate(struct clk_hw *hwclk, unsigned long rate,
 154                            unsigned long parent_rate)
 155{
 156        if (__clk_is_enabled(hwclk->clk))
 157                return clk_cpu_on_set_rate(hwclk, rate, parent_rate);
 158        else
 159                return clk_cpu_off_set_rate(hwclk, rate, parent_rate);
 160}
 161
 162static const struct clk_ops cpu_ops = {
 163        .recalc_rate = clk_cpu_recalc_rate,
 164        .round_rate = clk_cpu_round_rate,
 165        .set_rate = clk_cpu_set_rate,
 166};
 167
 168static void __init of_cpu_clk_setup(struct device_node *node)
 169{
 170        struct cpu_clk *cpuclk;
 171        void __iomem *clock_complex_base = of_iomap(node, 0);
 172        void __iomem *pmu_dfs_base = of_iomap(node, 1);
 173        int ncpus = 0;
 174        struct device_node *dn;
 175
 176        if (clock_complex_base == NULL) {
 177                pr_err("%s: clock-complex base register not set\n",
 178                        __func__);
 179                return;
 180        }
 181
 182        if (pmu_dfs_base == NULL)
 183                pr_warn("%s: pmu-dfs base register not set, dynamic frequency scaling not available\n",
 184                        __func__);
 185
 186        for_each_node_by_type(dn, "cpu")
 187                ncpus++;
 188
 189        cpuclk = kcalloc(ncpus, sizeof(*cpuclk), GFP_KERNEL);
 190        if (WARN_ON(!cpuclk))
 191                goto cpuclk_out;
 192
 193        clks = kcalloc(ncpus, sizeof(*clks), GFP_KERNEL);
 194        if (WARN_ON(!clks))
 195                goto clks_out;
 196
 197        for_each_node_by_type(dn, "cpu") {
 198                struct clk_init_data init;
 199                struct clk *clk;
 200                char *clk_name = kzalloc(5, GFP_KERNEL);
 201                int cpu, err;
 202
 203                if (WARN_ON(!clk_name))
 204                        goto bail_out;
 205
 206                err = of_property_read_u32(dn, "reg", &cpu);
 207                if (WARN_ON(err))
 208                        goto bail_out;
 209
 210                sprintf(clk_name, "cpu%d", cpu);
 211
 212                cpuclk[cpu].parent_name = of_clk_get_parent_name(node, 0);
 213                cpuclk[cpu].clk_name = clk_name;
 214                cpuclk[cpu].cpu = cpu;
 215                cpuclk[cpu].reg_base = clock_complex_base;
 216                if (pmu_dfs_base)
 217                        cpuclk[cpu].pmu_dfs = pmu_dfs_base + 4 * cpu;
 218                cpuclk[cpu].hw.init = &init;
 219
 220                init.name = cpuclk[cpu].clk_name;
 221                init.ops = &cpu_ops;
 222                init.flags = 0;
 223                init.parent_names = &cpuclk[cpu].parent_name;
 224                init.num_parents = 1;
 225
 226                clk = clk_register(NULL, &cpuclk[cpu].hw);
 227                if (WARN_ON(IS_ERR(clk)))
 228                        goto bail_out;
 229                clks[cpu] = clk;
 230        }
 231        clk_data.clk_num = MAX_CPU;
 232        clk_data.clks = clks;
 233        of_clk_add_provider(node, of_clk_src_onecell_get, &clk_data);
 234
 235        return;
 236bail_out:
 237        kfree(clks);
 238        while(ncpus--)
 239                kfree(cpuclk[ncpus].clk_name);
 240clks_out:
 241        kfree(cpuclk);
 242cpuclk_out:
 243        iounmap(clock_complex_base);
 244}
 245
 246CLK_OF_DECLARE(armada_xp_cpu_clock, "marvell,armada-xp-cpu-clock",
 247                                         of_cpu_clk_setup);
 248
 249static void __init of_mv98dx3236_cpu_clk_setup(struct device_node *node)
 250{
 251        of_clk_add_provider(node, of_clk_src_simple_get, NULL);
 252}
 253
 254CLK_OF_DECLARE(mv98dx3236_cpu_clock, "marvell,mv98dx3236-cpu-clock",
 255                                         of_mv98dx3236_cpu_clk_setup);
 256