linux/drivers/clk/sunxi-ng/ccu_mp.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright (C) 2016 Maxime Ripard
   4 * Maxime Ripard <maxime.ripard@free-electrons.com>
   5 */
   6
   7#include <linux/clk-provider.h>
   8#include <linux/io.h>
   9
  10#include "ccu_gate.h"
  11#include "ccu_mp.h"
  12
  13static void ccu_mp_find_best(unsigned long parent, unsigned long rate,
  14                             unsigned int max_m, unsigned int max_p,
  15                             unsigned int *m, unsigned int *p)
  16{
  17        unsigned long best_rate = 0;
  18        unsigned int best_m = 0, best_p = 0;
  19        unsigned int _m, _p;
  20
  21        for (_p = 1; _p <= max_p; _p <<= 1) {
  22                for (_m = 1; _m <= max_m; _m++) {
  23                        unsigned long tmp_rate = parent / _p / _m;
  24
  25                        if (tmp_rate > rate)
  26                                continue;
  27
  28                        if ((rate - tmp_rate) < (rate - best_rate)) {
  29                                best_rate = tmp_rate;
  30                                best_m = _m;
  31                                best_p = _p;
  32                        }
  33                }
  34        }
  35
  36        *m = best_m;
  37        *p = best_p;
  38}
  39
  40static unsigned long ccu_mp_find_best_with_parent_adj(struct clk_hw *hw,
  41                                                      unsigned long *parent,
  42                                                      unsigned long rate,
  43                                                      unsigned int max_m,
  44                                                      unsigned int max_p)
  45{
  46        unsigned long parent_rate_saved;
  47        unsigned long parent_rate, now;
  48        unsigned long best_rate = 0;
  49        unsigned int _m, _p, div;
  50        unsigned long maxdiv;
  51
  52        parent_rate_saved = *parent;
  53
  54        /*
  55         * The maximum divider we can use without overflowing
  56         * unsigned long in rate * m * p below
  57         */
  58        maxdiv = max_m * max_p;
  59        maxdiv = min(ULONG_MAX / rate, maxdiv);
  60
  61        for (_p = 1; _p <= max_p; _p <<= 1) {
  62                for (_m = 1; _m <= max_m; _m++) {
  63                        div = _m * _p;
  64
  65                        if (div > maxdiv)
  66                                break;
  67
  68                        if (rate * div == parent_rate_saved) {
  69                                /*
  70                                 * It's the most ideal case if the requested
  71                                 * rate can be divided from parent clock without
  72                                 * needing to change parent rate, so return the
  73                                 * divider immediately.
  74                                 */
  75                                *parent = parent_rate_saved;
  76                                return rate;
  77                        }
  78
  79                        parent_rate = clk_hw_round_rate(hw, rate * div);
  80                        now = parent_rate / div;
  81
  82                        if (now <= rate && now > best_rate) {
  83                                best_rate = now;
  84                                *parent = parent_rate;
  85
  86                                if (now == rate)
  87                                        return rate;
  88                        }
  89                }
  90        }
  91
  92        return best_rate;
  93}
  94
  95static unsigned long ccu_mp_round_rate(struct ccu_mux_internal *mux,
  96                                       struct clk_hw *hw,
  97                                       unsigned long *parent_rate,
  98                                       unsigned long rate,
  99                                       void *data)
 100{
 101        struct ccu_mp *cmp = data;
 102        unsigned int max_m, max_p;
 103        unsigned int m, p;
 104
 105        if (cmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
 106                rate *= cmp->fixed_post_div;
 107
 108        max_m = cmp->m.max ?: 1 << cmp->m.width;
 109        max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1);
 110
 111        if (!clk_hw_can_set_rate_parent(&cmp->common.hw)) {
 112                ccu_mp_find_best(*parent_rate, rate, max_m, max_p, &m, &p);
 113                rate = *parent_rate / p / m;
 114        } else {
 115                rate = ccu_mp_find_best_with_parent_adj(hw, parent_rate, rate,
 116                                                        max_m, max_p);
 117        }
 118
 119        if (cmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
 120                rate /= cmp->fixed_post_div;
 121
 122        return rate;
 123}
 124
 125static void ccu_mp_disable(struct clk_hw *hw)
 126{
 127        struct ccu_mp *cmp = hw_to_ccu_mp(hw);
 128
 129        return ccu_gate_helper_disable(&cmp->common, cmp->enable);
 130}
 131
 132static int ccu_mp_enable(struct clk_hw *hw)
 133{
 134        struct ccu_mp *cmp = hw_to_ccu_mp(hw);
 135
 136        return ccu_gate_helper_enable(&cmp->common, cmp->enable);
 137}
 138
 139static int ccu_mp_is_enabled(struct clk_hw *hw)
 140{
 141        struct ccu_mp *cmp = hw_to_ccu_mp(hw);
 142
 143        return ccu_gate_helper_is_enabled(&cmp->common, cmp->enable);
 144}
 145
 146static unsigned long ccu_mp_recalc_rate(struct clk_hw *hw,
 147                                        unsigned long parent_rate)
 148{
 149        struct ccu_mp *cmp = hw_to_ccu_mp(hw);
 150        unsigned long rate;
 151        unsigned int m, p;
 152        u32 reg;
 153
 154        /* Adjust parent_rate according to pre-dividers */
 155        parent_rate = ccu_mux_helper_apply_prediv(&cmp->common, &cmp->mux, -1,
 156                                                  parent_rate);
 157
 158        reg = readl(cmp->common.base + cmp->common.reg);
 159
 160        m = reg >> cmp->m.shift;
 161        m &= (1 << cmp->m.width) - 1;
 162        m += cmp->m.offset;
 163        if (!m)
 164                m++;
 165
 166        p = reg >> cmp->p.shift;
 167        p &= (1 << cmp->p.width) - 1;
 168
 169        rate = (parent_rate >> p) / m;
 170        if (cmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
 171                rate /= cmp->fixed_post_div;
 172
 173        return rate;
 174}
 175
 176static int ccu_mp_determine_rate(struct clk_hw *hw,
 177                                 struct clk_rate_request *req)
 178{
 179        struct ccu_mp *cmp = hw_to_ccu_mp(hw);
 180
 181        return ccu_mux_helper_determine_rate(&cmp->common, &cmp->mux,
 182                                             req, ccu_mp_round_rate, cmp);
 183}
 184
 185static int ccu_mp_set_rate(struct clk_hw *hw, unsigned long rate,
 186                           unsigned long parent_rate)
 187{
 188        struct ccu_mp *cmp = hw_to_ccu_mp(hw);
 189        unsigned long flags;
 190        unsigned int max_m, max_p;
 191        unsigned int m, p;
 192        u32 reg;
 193
 194        /* Adjust parent_rate according to pre-dividers */
 195        parent_rate = ccu_mux_helper_apply_prediv(&cmp->common, &cmp->mux, -1,
 196                                                  parent_rate);
 197
 198        max_m = cmp->m.max ?: 1 << cmp->m.width;
 199        max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1);
 200
 201        /* Adjust target rate according to post-dividers */
 202        if (cmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
 203                rate = rate * cmp->fixed_post_div;
 204
 205        ccu_mp_find_best(parent_rate, rate, max_m, max_p, &m, &p);
 206
 207        spin_lock_irqsave(cmp->common.lock, flags);
 208
 209        reg = readl(cmp->common.base + cmp->common.reg);
 210        reg &= ~GENMASK(cmp->m.width + cmp->m.shift - 1, cmp->m.shift);
 211        reg &= ~GENMASK(cmp->p.width + cmp->p.shift - 1, cmp->p.shift);
 212        reg |= (m - cmp->m.offset) << cmp->m.shift;
 213        reg |= ilog2(p) << cmp->p.shift;
 214
 215        writel(reg, cmp->common.base + cmp->common.reg);
 216
 217        spin_unlock_irqrestore(cmp->common.lock, flags);
 218
 219        return 0;
 220}
 221
 222static u8 ccu_mp_get_parent(struct clk_hw *hw)
 223{
 224        struct ccu_mp *cmp = hw_to_ccu_mp(hw);
 225
 226        return ccu_mux_helper_get_parent(&cmp->common, &cmp->mux);
 227}
 228
 229static int ccu_mp_set_parent(struct clk_hw *hw, u8 index)
 230{
 231        struct ccu_mp *cmp = hw_to_ccu_mp(hw);
 232
 233        return ccu_mux_helper_set_parent(&cmp->common, &cmp->mux, index);
 234}
 235
 236const struct clk_ops ccu_mp_ops = {
 237        .disable        = ccu_mp_disable,
 238        .enable         = ccu_mp_enable,
 239        .is_enabled     = ccu_mp_is_enabled,
 240
 241        .get_parent     = ccu_mp_get_parent,
 242        .set_parent     = ccu_mp_set_parent,
 243
 244        .determine_rate = ccu_mp_determine_rate,
 245        .recalc_rate    = ccu_mp_recalc_rate,
 246        .set_rate       = ccu_mp_set_rate,
 247};
 248
 249/*
 250 * Support for MMC timing mode switching
 251 *
 252 * The MMC clocks on some SoCs support switching between old and
 253 * new timing modes. A platform specific API is provided to query
 254 * and set the timing mode on supported SoCs.
 255 *
 256 * In addition, a special class of ccu_mp_ops is provided, which
 257 * takes in to account the timing mode switch. When the new timing
 258 * mode is active, the clock output rate is halved. This new class
 259 * is a wrapper around the generic ccu_mp_ops. When clock rates
 260 * are passed through to ccu_mp_ops callbacks, they are doubled
 261 * if the new timing mode bit is set, to account for the post
 262 * divider. Conversely, when clock rates are passed back, they
 263 * are halved if the mode bit is set.
 264 */
 265
 266static unsigned long ccu_mp_mmc_recalc_rate(struct clk_hw *hw,
 267                                            unsigned long parent_rate)
 268{
 269        unsigned long rate = ccu_mp_recalc_rate(hw, parent_rate);
 270        struct ccu_common *cm = hw_to_ccu_common(hw);
 271        u32 val = readl(cm->base + cm->reg);
 272
 273        if (val & CCU_MMC_NEW_TIMING_MODE)
 274                return rate / 2;
 275        return rate;
 276}
 277
 278static int ccu_mp_mmc_determine_rate(struct clk_hw *hw,
 279                                     struct clk_rate_request *req)
 280{
 281        struct ccu_common *cm = hw_to_ccu_common(hw);
 282        u32 val = readl(cm->base + cm->reg);
 283        int ret;
 284
 285        /* adjust the requested clock rate */
 286        if (val & CCU_MMC_NEW_TIMING_MODE) {
 287                req->rate *= 2;
 288                req->min_rate *= 2;
 289                req->max_rate *= 2;
 290        }
 291
 292        ret = ccu_mp_determine_rate(hw, req);
 293
 294        /* re-adjust the requested clock rate back */
 295        if (val & CCU_MMC_NEW_TIMING_MODE) {
 296                req->rate /= 2;
 297                req->min_rate /= 2;
 298                req->max_rate /= 2;
 299        }
 300
 301        return ret;
 302}
 303
 304static int ccu_mp_mmc_set_rate(struct clk_hw *hw, unsigned long rate,
 305                               unsigned long parent_rate)
 306{
 307        struct ccu_common *cm = hw_to_ccu_common(hw);
 308        u32 val = readl(cm->base + cm->reg);
 309
 310        if (val & CCU_MMC_NEW_TIMING_MODE)
 311                rate *= 2;
 312
 313        return ccu_mp_set_rate(hw, rate, parent_rate);
 314}
 315
 316const struct clk_ops ccu_mp_mmc_ops = {
 317        .disable        = ccu_mp_disable,
 318        .enable         = ccu_mp_enable,
 319        .is_enabled     = ccu_mp_is_enabled,
 320
 321        .get_parent     = ccu_mp_get_parent,
 322        .set_parent     = ccu_mp_set_parent,
 323
 324        .determine_rate = ccu_mp_mmc_determine_rate,
 325        .recalc_rate    = ccu_mp_mmc_recalc_rate,
 326        .set_rate       = ccu_mp_mmc_set_rate,
 327};
 328