1
2
3
4
5
6
7
8
9
10
11#include <linux/clk-provider.h>
12
13#include "ccu_frac.h"
14#include "ccu_gate.h"
15#include "ccu_nm.h"
16
17struct _ccu_nm {
18 unsigned long n, min_n, max_n;
19 unsigned long m, min_m, max_m;
20};
21
22static void ccu_nm_find_best(unsigned long parent, unsigned long rate,
23 struct _ccu_nm *nm)
24{
25 unsigned long best_rate = 0;
26 unsigned long best_n = 0, best_m = 0;
27 unsigned long _n, _m;
28
29 for (_n = nm->min_n; _n <= nm->max_n; _n++) {
30 for (_m = nm->min_m; _m <= nm->max_m; _m++) {
31 unsigned long tmp_rate = parent * _n / _m;
32
33 if (tmp_rate > rate)
34 continue;
35
36 if ((rate - tmp_rate) < (rate - best_rate)) {
37 best_rate = tmp_rate;
38 best_n = _n;
39 best_m = _m;
40 }
41 }
42 }
43
44 nm->n = best_n;
45 nm->m = best_m;
46}
47
48static void ccu_nm_disable(struct clk_hw *hw)
49{
50 struct ccu_nm *nm = hw_to_ccu_nm(hw);
51
52 return ccu_gate_helper_disable(&nm->common, nm->enable);
53}
54
55static int ccu_nm_enable(struct clk_hw *hw)
56{
57 struct ccu_nm *nm = hw_to_ccu_nm(hw);
58
59 return ccu_gate_helper_enable(&nm->common, nm->enable);
60}
61
62static int ccu_nm_is_enabled(struct clk_hw *hw)
63{
64 struct ccu_nm *nm = hw_to_ccu_nm(hw);
65
66 return ccu_gate_helper_is_enabled(&nm->common, nm->enable);
67}
68
69static unsigned long ccu_nm_recalc_rate(struct clk_hw *hw,
70 unsigned long parent_rate)
71{
72 struct ccu_nm *nm = hw_to_ccu_nm(hw);
73 unsigned long n, m;
74 u32 reg;
75
76 if (ccu_frac_helper_is_enabled(&nm->common, &nm->frac))
77 return ccu_frac_helper_read_rate(&nm->common, &nm->frac);
78
79 reg = readl(nm->common.base + nm->common.reg);
80
81 n = reg >> nm->n.shift;
82 n &= (1 << nm->n.width) - 1;
83 n += nm->n.offset;
84 if (!n)
85 n++;
86
87 m = reg >> nm->m.shift;
88 m &= (1 << nm->m.width) - 1;
89 m += nm->m.offset;
90 if (!m)
91 m++;
92
93 return parent_rate * n / m;
94}
95
96static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate,
97 unsigned long *parent_rate)
98{
99 struct ccu_nm *nm = hw_to_ccu_nm(hw);
100 struct _ccu_nm _nm;
101
102 _nm.min_n = nm->n.min ?: 1;
103 _nm.max_n = nm->n.max ?: 1 << nm->n.width;
104 _nm.min_m = 1;
105 _nm.max_m = nm->m.max ?: 1 << nm->m.width;
106
107 ccu_nm_find_best(*parent_rate, rate, &_nm);
108
109 return *parent_rate * _nm.n / _nm.m;
110}
111
112static int ccu_nm_set_rate(struct clk_hw *hw, unsigned long rate,
113 unsigned long parent_rate)
114{
115 struct ccu_nm *nm = hw_to_ccu_nm(hw);
116 struct _ccu_nm _nm;
117 unsigned long flags;
118 u32 reg;
119
120 if (ccu_frac_helper_has_rate(&nm->common, &nm->frac, rate)) {
121 spin_lock_irqsave(nm->common.lock, flags);
122
123
124 reg = readl(nm->common.base + nm->common.reg);
125 reg &= ~GENMASK(nm->m.width + nm->m.shift - 1, nm->m.shift);
126 writel(reg, nm->common.base + nm->common.reg);
127
128 spin_unlock_irqrestore(nm->common.lock, flags);
129
130 ccu_frac_helper_enable(&nm->common, &nm->frac);
131
132 return ccu_frac_helper_set_rate(&nm->common, &nm->frac,
133 rate, nm->lock);
134 } else {
135 ccu_frac_helper_disable(&nm->common, &nm->frac);
136 }
137
138 _nm.min_n = nm->n.min ?: 1;
139 _nm.max_n = nm->n.max ?: 1 << nm->n.width;
140 _nm.min_m = 1;
141 _nm.max_m = nm->m.max ?: 1 << nm->m.width;
142
143 ccu_nm_find_best(parent_rate, rate, &_nm);
144
145 spin_lock_irqsave(nm->common.lock, flags);
146
147 reg = readl(nm->common.base + nm->common.reg);
148 reg &= ~GENMASK(nm->n.width + nm->n.shift - 1, nm->n.shift);
149 reg &= ~GENMASK(nm->m.width + nm->m.shift - 1, nm->m.shift);
150
151 reg |= (_nm.n - nm->n.offset) << nm->n.shift;
152 reg |= (_nm.m - nm->m.offset) << nm->m.shift;
153 writel(reg, nm->common.base + nm->common.reg);
154
155 spin_unlock_irqrestore(nm->common.lock, flags);
156
157 ccu_helper_wait_for_lock(&nm->common, nm->lock);
158
159 return 0;
160}
161
162const struct clk_ops ccu_nm_ops = {
163 .disable = ccu_nm_disable,
164 .enable = ccu_nm_enable,
165 .is_enabled = ccu_nm_is_enabled,
166
167 .recalc_rate = ccu_nm_recalc_rate,
168 .round_rate = ccu_nm_round_rate,
169 .set_rate = ccu_nm_set_rate,
170};
171