1
2
3
4
5
6
7
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/err.h>
11#include <linux/platform_device.h>
12#include <linux/clk-provider.h>
13#include <linux/clkdev.h>
14#include <sound/intel-nhlt.h>
15#include "skl.h"
16#include "skl-ssp-clk.h"
17#include "skl-topology.h"
18
19#define to_skl_clk(_hw) container_of(_hw, struct skl_clk, hw)
20
21struct skl_clk_parent {
22 struct clk_hw *hw;
23 struct clk_lookup *lookup;
24};
25
26struct skl_clk {
27 struct clk_hw hw;
28 struct clk_lookup *lookup;
29 unsigned long rate;
30 struct skl_clk_pdata *pdata;
31 u32 id;
32};
33
34struct skl_clk_data {
35 struct skl_clk_parent parent[SKL_MAX_CLK_SRC];
36 struct skl_clk *clk[SKL_MAX_CLK_CNT];
37 u8 avail_clk_cnt;
38};
39
40static int skl_get_clk_type(u32 index)
41{
42 switch (index) {
43 case 0 ... (SKL_SCLK_OFS - 1):
44 return SKL_MCLK;
45
46 case SKL_SCLK_OFS ... (SKL_SCLKFS_OFS - 1):
47 return SKL_SCLK;
48
49 case SKL_SCLKFS_OFS ... (SKL_MAX_CLK_CNT - 1):
50 return SKL_SCLK_FS;
51
52 default:
53 return -EINVAL;
54 }
55}
56
57static int skl_get_vbus_id(u32 index, u8 clk_type)
58{
59 switch (clk_type) {
60 case SKL_MCLK:
61 return index;
62
63 case SKL_SCLK:
64 return index - SKL_SCLK_OFS;
65
66 case SKL_SCLK_FS:
67 return index - SKL_SCLKFS_OFS;
68
69 default:
70 return -EINVAL;
71 }
72}
73
74static void skl_fill_clk_ipc(struct skl_clk_rate_cfg_table *rcfg, u8 clk_type)
75{
76 struct nhlt_fmt_cfg *fmt_cfg;
77 union skl_clk_ctrl_ipc *ipc;
78 struct wav_fmt *wfmt;
79
80 if (!rcfg)
81 return;
82
83 ipc = &rcfg->dma_ctl_ipc;
84 if (clk_type == SKL_SCLK_FS) {
85 fmt_cfg = (struct nhlt_fmt_cfg *)rcfg->config;
86 wfmt = &fmt_cfg->fmt_ext.fmt;
87
88
89 ipc->sclk_fs.hdr.size = sizeof(struct skl_dmactrl_sclkfs_cfg) -
90 sizeof(struct skl_tlv_hdr);
91 ipc->sclk_fs.sampling_frequency = wfmt->samples_per_sec;
92 ipc->sclk_fs.bit_depth = wfmt->bits_per_sample;
93 ipc->sclk_fs.valid_bit_depth =
94 fmt_cfg->fmt_ext.sample.valid_bits_per_sample;
95 ipc->sclk_fs.number_of_channels = wfmt->channels;
96 } else {
97 ipc->mclk.hdr.type = DMA_CLK_CONTROLS;
98
99 ipc->mclk.hdr.size = sizeof(struct skl_dmactrl_mclk_cfg) -
100 sizeof(struct skl_tlv_hdr);
101 }
102}
103
104
105static int skl_send_clk_dma_control(struct skl_dev *skl,
106 struct skl_clk_rate_cfg_table *rcfg,
107 u32 vbus_id, u8 clk_type,
108 bool enable)
109{
110 struct nhlt_specific_cfg *sp_cfg;
111 u32 i2s_config_size, node_id = 0;
112 struct nhlt_fmt_cfg *fmt_cfg;
113 union skl_clk_ctrl_ipc *ipc;
114 void *i2s_config = NULL;
115 u8 *data, size;
116 int ret;
117
118 if (!rcfg)
119 return -EIO;
120
121 ipc = &rcfg->dma_ctl_ipc;
122 fmt_cfg = (struct nhlt_fmt_cfg *)rcfg->config;
123 sp_cfg = &fmt_cfg->config;
124
125 if (clk_type == SKL_SCLK_FS) {
126 ipc->sclk_fs.hdr.type =
127 enable ? DMA_TRANSMITION_START : DMA_TRANSMITION_STOP;
128 data = (u8 *)&ipc->sclk_fs;
129 size = sizeof(struct skl_dmactrl_sclkfs_cfg);
130 } else {
131
132 if (clk_type == SKL_SCLK)
133 ipc->mclk.mclk = 0;
134 else
135 ipc->mclk.mclk = 1;
136
137 ipc->mclk.keep_running = enable;
138 ipc->mclk.warm_up_over = enable;
139 ipc->mclk.clk_stop_over = !enable;
140 data = (u8 *)&ipc->mclk;
141 size = sizeof(struct skl_dmactrl_mclk_cfg);
142 }
143
144 i2s_config_size = sp_cfg->size + size;
145 i2s_config = kzalloc(i2s_config_size, GFP_KERNEL);
146 if (!i2s_config)
147 return -ENOMEM;
148
149
150 memcpy(i2s_config, sp_cfg->caps, sp_cfg->size);
151
152
153 memcpy(i2s_config + sp_cfg->size, data, size);
154
155 node_id = ((SKL_DMA_I2S_LINK_INPUT_CLASS << 8) | (vbus_id << 4));
156 ret = skl_dsp_set_dma_control(skl, (u32 *)i2s_config,
157 i2s_config_size, node_id);
158 kfree(i2s_config);
159
160 return ret;
161}
162
163static struct skl_clk_rate_cfg_table *skl_get_rate_cfg(
164 struct skl_clk_rate_cfg_table *rcfg,
165 unsigned long rate)
166{
167 int i;
168
169 for (i = 0; (i < SKL_MAX_CLK_RATES) && rcfg[i].rate; i++) {
170 if (rcfg[i].rate == rate)
171 return &rcfg[i];
172 }
173
174 return NULL;
175}
176
177static int skl_clk_change_status(struct skl_clk *clkdev,
178 bool enable)
179{
180 struct skl_clk_rate_cfg_table *rcfg;
181 int vbus_id, clk_type;
182
183 clk_type = skl_get_clk_type(clkdev->id);
184 if (clk_type < 0)
185 return clk_type;
186
187 vbus_id = skl_get_vbus_id(clkdev->id, clk_type);
188 if (vbus_id < 0)
189 return vbus_id;
190
191 rcfg = skl_get_rate_cfg(clkdev->pdata->ssp_clks[clkdev->id].rate_cfg,
192 clkdev->rate);
193 if (!rcfg)
194 return -EINVAL;
195
196 return skl_send_clk_dma_control(clkdev->pdata->pvt_data, rcfg,
197 vbus_id, clk_type, enable);
198}
199
200static int skl_clk_prepare(struct clk_hw *hw)
201{
202 struct skl_clk *clkdev = to_skl_clk(hw);
203
204 return skl_clk_change_status(clkdev, true);
205}
206
207static void skl_clk_unprepare(struct clk_hw *hw)
208{
209 struct skl_clk *clkdev = to_skl_clk(hw);
210
211 skl_clk_change_status(clkdev, false);
212}
213
214static int skl_clk_set_rate(struct clk_hw *hw, unsigned long rate,
215 unsigned long parent_rate)
216{
217 struct skl_clk *clkdev = to_skl_clk(hw);
218 struct skl_clk_rate_cfg_table *rcfg;
219 int clk_type;
220
221 if (!rate)
222 return -EINVAL;
223
224 rcfg = skl_get_rate_cfg(clkdev->pdata->ssp_clks[clkdev->id].rate_cfg,
225 rate);
226 if (!rcfg)
227 return -EINVAL;
228
229 clk_type = skl_get_clk_type(clkdev->id);
230 if (clk_type < 0)
231 return clk_type;
232
233 skl_fill_clk_ipc(rcfg, clk_type);
234 clkdev->rate = rate;
235
236 return 0;
237}
238
239static unsigned long skl_clk_recalc_rate(struct clk_hw *hw,
240 unsigned long parent_rate)
241{
242 struct skl_clk *clkdev = to_skl_clk(hw);
243
244 if (clkdev->rate)
245 return clkdev->rate;
246
247 return 0;
248}
249
250
251static long skl_clk_round_rate(struct clk_hw *hw, unsigned long rate,
252 unsigned long *parent_rate)
253{
254 return rate;
255}
256
257
258
259
260
261static const struct clk_ops skl_clk_ops = {
262 .prepare = skl_clk_prepare,
263 .unprepare = skl_clk_unprepare,
264 .set_rate = skl_clk_set_rate,
265 .round_rate = skl_clk_round_rate,
266 .recalc_rate = skl_clk_recalc_rate,
267};
268
269static void unregister_parent_src_clk(struct skl_clk_parent *pclk,
270 unsigned int id)
271{
272 while (id--) {
273 clkdev_drop(pclk[id].lookup);
274 clk_hw_unregister_fixed_rate(pclk[id].hw);
275 }
276}
277
278static void unregister_src_clk(struct skl_clk_data *dclk)
279{
280 while (dclk->avail_clk_cnt--)
281 clkdev_drop(dclk->clk[dclk->avail_clk_cnt]->lookup);
282}
283
284static int skl_register_parent_clks(struct device *dev,
285 struct skl_clk_parent *parent,
286 struct skl_clk_parent_src *pclk)
287{
288 int i, ret;
289
290 for (i = 0; i < SKL_MAX_CLK_SRC; i++) {
291
292
293 parent[i].hw = clk_hw_register_fixed_rate(dev, pclk[i].name,
294 pclk[i].parent_name, 0, pclk[i].rate);
295 if (IS_ERR(parent[i].hw)) {
296 ret = PTR_ERR(parent[i].hw);
297 goto err;
298 }
299
300 parent[i].lookup = clkdev_hw_create(parent[i].hw, pclk[i].name,
301 NULL);
302 if (!parent[i].lookup) {
303 clk_hw_unregister_fixed_rate(parent[i].hw);
304 ret = -ENOMEM;
305 goto err;
306 }
307 }
308
309 return 0;
310err:
311 unregister_parent_src_clk(parent, i);
312 return ret;
313}
314
315
316static struct skl_clk *register_skl_clk(struct device *dev,
317 struct skl_ssp_clk *clk,
318 struct skl_clk_pdata *clk_pdata, int id)
319{
320 struct clk_init_data init;
321 struct skl_clk *clkdev;
322 int ret;
323
324 clkdev = devm_kzalloc(dev, sizeof(*clkdev), GFP_KERNEL);
325 if (!clkdev)
326 return ERR_PTR(-ENOMEM);
327
328 init.name = clk->name;
329 init.ops = &skl_clk_ops;
330 init.flags = CLK_SET_RATE_GATE;
331 init.parent_names = &clk->parent_name;
332 init.num_parents = 1;
333 clkdev->hw.init = &init;
334 clkdev->pdata = clk_pdata;
335
336 clkdev->id = id;
337 ret = devm_clk_hw_register(dev, &clkdev->hw);
338 if (ret) {
339 clkdev = ERR_PTR(ret);
340 return clkdev;
341 }
342
343 clkdev->lookup = clkdev_hw_create(&clkdev->hw, init.name, NULL);
344 if (!clkdev->lookup)
345 clkdev = ERR_PTR(-ENOMEM);
346
347 return clkdev;
348}
349
350static int skl_clk_dev_probe(struct platform_device *pdev)
351{
352 struct device *dev = &pdev->dev;
353 struct device *parent_dev = dev->parent;
354 struct skl_clk_parent_src *parent_clks;
355 struct skl_clk_pdata *clk_pdata;
356 struct skl_clk_data *data;
357 struct skl_ssp_clk *clks;
358 int ret, i;
359
360 clk_pdata = dev_get_platdata(&pdev->dev);
361 parent_clks = clk_pdata->parent_clks;
362 clks = clk_pdata->ssp_clks;
363 if (!parent_clks || !clks)
364 return -EIO;
365
366 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
367 if (!data)
368 return -ENOMEM;
369
370
371 ret = skl_register_parent_clks(parent_dev, data->parent, parent_clks);
372 if (ret < 0)
373 return ret;
374
375 for (i = 0; i < clk_pdata->num_clks; i++) {
376
377
378
379
380 if (clks[i].rate_cfg[0].rate == 0)
381 continue;
382
383 data->clk[data->avail_clk_cnt] = register_skl_clk(dev,
384 &clks[i], clk_pdata, i);
385
386 if (IS_ERR(data->clk[data->avail_clk_cnt])) {
387 ret = PTR_ERR(data->clk[data->avail_clk_cnt++]);
388 goto err_unreg_skl_clk;
389 }
390 }
391
392 platform_set_drvdata(pdev, data);
393
394 return 0;
395
396err_unreg_skl_clk:
397 unregister_src_clk(data);
398 unregister_parent_src_clk(data->parent, SKL_MAX_CLK_SRC);
399
400 return ret;
401}
402
403static int skl_clk_dev_remove(struct platform_device *pdev)
404{
405 struct skl_clk_data *data;
406
407 data = platform_get_drvdata(pdev);
408 unregister_src_clk(data);
409 unregister_parent_src_clk(data->parent, SKL_MAX_CLK_SRC);
410
411 return 0;
412}
413
414static struct platform_driver skl_clk_driver = {
415 .driver = {
416 .name = "skl-ssp-clk",
417 },
418 .probe = skl_clk_dev_probe,
419 .remove = skl_clk_dev_remove,
420};
421
422module_platform_driver(skl_clk_driver);
423
424MODULE_DESCRIPTION("Skylake clock driver");
425MODULE_AUTHOR("Jaikrishna Nemallapudi <jaikrishnax.nemallapudi@intel.com>");
426MODULE_AUTHOR("Subhransu S. Prusty <subhransu.s.prusty@intel.com>");
427MODULE_LICENSE("GPL v2");
428MODULE_ALIAS("platform:skl-ssp-clk");
429