1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/clk.h>
14#include <linux/clk-provider.h>
15#include <linux/device.h>
16#include <linux/err.h>
17#include <linux/init.h>
18#include <linux/io.h>
19#include <linux/pm.h>
20#include <linux/slab.h>
21#include <linux/sys_soc.h>
22
23#include "rcar-cpg-lib.h"
24
25spinlock_t cpg_lock;
26
27void cpg_reg_modify(void __iomem *reg, u32 clear, u32 set)
28{
29 unsigned long flags;
30 u32 val;
31
32 spin_lock_irqsave(&cpg_lock, flags);
33 val = readl(reg);
34 val &= ~clear;
35 val |= set;
36 writel(val, reg);
37 spin_unlock_irqrestore(&cpg_lock, flags);
38};
39
40static int cpg_simple_notifier_call(struct notifier_block *nb,
41 unsigned long action, void *data)
42{
43 struct cpg_simple_notifier *csn =
44 container_of(nb, struct cpg_simple_notifier, nb);
45
46 switch (action) {
47 case PM_EVENT_SUSPEND:
48 csn->saved = readl(csn->reg);
49 return NOTIFY_OK;
50
51 case PM_EVENT_RESUME:
52 writel(csn->saved, csn->reg);
53 return NOTIFY_OK;
54 }
55 return NOTIFY_DONE;
56}
57
58void cpg_simple_notifier_register(struct raw_notifier_head *notifiers,
59 struct cpg_simple_notifier *csn)
60{
61 csn->nb.notifier_call = cpg_simple_notifier_call;
62 raw_notifier_chain_register(notifiers, &csn->nb);
63}
64
65
66
67
68#define CPG_SD_STP_HCK BIT(9)
69#define CPG_SD_STP_CK BIT(8)
70
71#define CPG_SD_STP_MASK (CPG_SD_STP_HCK | CPG_SD_STP_CK)
72#define CPG_SD_FC_MASK (0x7 << 2 | 0x3 << 0)
73
74#define CPG_SD_DIV_TABLE_DATA(stp_hck, sd_srcfc, sd_fc, sd_div) \
75{ \
76 .val = ((stp_hck) ? CPG_SD_STP_HCK : 0) | \
77 ((sd_srcfc) << 2) | \
78 ((sd_fc) << 0), \
79 .div = (sd_div), \
80}
81
82struct sd_div_table {
83 u32 val;
84 unsigned int div;
85};
86
87struct sd_clock {
88 struct clk_hw hw;
89 const struct sd_div_table *div_table;
90 struct cpg_simple_notifier csn;
91 unsigned int div_num;
92 unsigned int cur_div_idx;
93};
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114static const struct sd_div_table cpg_sd_div_table[] = {
115
116 CPG_SD_DIV_TABLE_DATA(0, 0, 1, 4),
117 CPG_SD_DIV_TABLE_DATA(0, 1, 1, 8),
118 CPG_SD_DIV_TABLE_DATA(1, 2, 1, 16),
119 CPG_SD_DIV_TABLE_DATA(1, 3, 1, 32),
120 CPG_SD_DIV_TABLE_DATA(1, 4, 1, 64),
121 CPG_SD_DIV_TABLE_DATA(0, 0, 0, 2),
122 CPG_SD_DIV_TABLE_DATA(0, 1, 0, 4),
123 CPG_SD_DIV_TABLE_DATA(1, 2, 0, 8),
124 CPG_SD_DIV_TABLE_DATA(1, 3, 0, 16),
125 CPG_SD_DIV_TABLE_DATA(1, 4, 0, 32),
126};
127
128#define to_sd_clock(_hw) container_of(_hw, struct sd_clock, hw)
129
130static int cpg_sd_clock_enable(struct clk_hw *hw)
131{
132 struct sd_clock *clock = to_sd_clock(hw);
133
134 cpg_reg_modify(clock->csn.reg, CPG_SD_STP_MASK,
135 clock->div_table[clock->cur_div_idx].val &
136 CPG_SD_STP_MASK);
137
138 return 0;
139}
140
141static void cpg_sd_clock_disable(struct clk_hw *hw)
142{
143 struct sd_clock *clock = to_sd_clock(hw);
144
145 cpg_reg_modify(clock->csn.reg, 0, CPG_SD_STP_MASK);
146}
147
148static int cpg_sd_clock_is_enabled(struct clk_hw *hw)
149{
150 struct sd_clock *clock = to_sd_clock(hw);
151
152 return !(readl(clock->csn.reg) & CPG_SD_STP_MASK);
153}
154
155static unsigned long cpg_sd_clock_recalc_rate(struct clk_hw *hw,
156 unsigned long parent_rate)
157{
158 struct sd_clock *clock = to_sd_clock(hw);
159
160 return DIV_ROUND_CLOSEST(parent_rate,
161 clock->div_table[clock->cur_div_idx].div);
162}
163
164static int cpg_sd_clock_determine_rate(struct clk_hw *hw,
165 struct clk_rate_request *req)
166{
167 unsigned long best_rate = ULONG_MAX, diff_min = ULONG_MAX;
168 struct sd_clock *clock = to_sd_clock(hw);
169 unsigned long calc_rate, diff;
170 unsigned int i;
171
172 for (i = 0; i < clock->div_num; i++) {
173 calc_rate = DIV_ROUND_CLOSEST(req->best_parent_rate,
174 clock->div_table[i].div);
175 if (calc_rate < req->min_rate || calc_rate > req->max_rate)
176 continue;
177
178 diff = calc_rate > req->rate ? calc_rate - req->rate
179 : req->rate - calc_rate;
180 if (diff < diff_min) {
181 best_rate = calc_rate;
182 diff_min = diff;
183 }
184 }
185
186 if (best_rate == ULONG_MAX)
187 return -EINVAL;
188
189 req->rate = best_rate;
190 return 0;
191}
192
193static int cpg_sd_clock_set_rate(struct clk_hw *hw, unsigned long rate,
194 unsigned long parent_rate)
195{
196 struct sd_clock *clock = to_sd_clock(hw);
197 unsigned int i;
198
199 for (i = 0; i < clock->div_num; i++)
200 if (rate == DIV_ROUND_CLOSEST(parent_rate,
201 clock->div_table[i].div))
202 break;
203
204 if (i >= clock->div_num)
205 return -EINVAL;
206
207 clock->cur_div_idx = i;
208
209 cpg_reg_modify(clock->csn.reg, CPG_SD_STP_MASK | CPG_SD_FC_MASK,
210 clock->div_table[i].val &
211 (CPG_SD_STP_MASK | CPG_SD_FC_MASK));
212
213 return 0;
214}
215
216static const struct clk_ops cpg_sd_clock_ops = {
217 .enable = cpg_sd_clock_enable,
218 .disable = cpg_sd_clock_disable,
219 .is_enabled = cpg_sd_clock_is_enabled,
220 .recalc_rate = cpg_sd_clock_recalc_rate,
221 .determine_rate = cpg_sd_clock_determine_rate,
222 .set_rate = cpg_sd_clock_set_rate,
223};
224
225struct clk * __init cpg_sd_clk_register(const char *name,
226 void __iomem *base, unsigned int offset, const char *parent_name,
227 struct raw_notifier_head *notifiers, bool skip_first)
228{
229 struct clk_init_data init = {};
230 struct sd_clock *clock;
231 struct clk *clk;
232 u32 val;
233
234 clock = kzalloc(sizeof(*clock), GFP_KERNEL);
235 if (!clock)
236 return ERR_PTR(-ENOMEM);
237
238 init.name = name;
239 init.ops = &cpg_sd_clock_ops;
240 init.flags = CLK_SET_RATE_PARENT;
241 init.parent_names = &parent_name;
242 init.num_parents = 1;
243
244 clock->csn.reg = base + offset;
245 clock->hw.init = &init;
246 clock->div_table = cpg_sd_div_table;
247 clock->div_num = ARRAY_SIZE(cpg_sd_div_table);
248
249 if (skip_first) {
250 clock->div_table++;
251 clock->div_num--;
252 }
253
254 val = readl(clock->csn.reg) & ~CPG_SD_FC_MASK;
255 val |= CPG_SD_STP_MASK | (clock->div_table[0].val & CPG_SD_FC_MASK);
256 writel(val, clock->csn.reg);
257
258 clk = clk_register(NULL, &clock->hw);
259 if (IS_ERR(clk))
260 goto free_clock;
261
262 cpg_simple_notifier_register(notifiers, &clock->csn);
263 return clk;
264
265free_clock:
266 kfree(clock);
267 return clk;
268}
269
270
271