1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/module.h>
19#include <linux/delay.h>
20#include <linux/firmware.h>
21#include <linux/device.h>
22
23#include "../common/sst-dsp.h"
24#include "../common/sst-dsp-priv.h"
25#include "../common/sst-ipc.h"
26#include "cnl-sst-dsp.h"
27#include "skl.h"
28
29#define CNL_FW_ROM_INIT 0x1
30#define CNL_FW_INIT 0x5
31#define CNL_IPC_PURGE 0x01004000
32#define CNL_INIT_TIMEOUT 300
33#define CNL_BASEFW_TIMEOUT 3000
34
35#define CNL_ADSP_SRAM0_BASE 0x80000
36
37
38#define CNL_ADSP_FW_STATUS CNL_ADSP_SRAM0_BASE
39#define CNL_ADSP_ERROR_CODE (CNL_ADSP_FW_STATUS + 0x4)
40
41#define CNL_INSTANCE_ID 0
42#define CNL_BASE_FW_MODULE_ID 0
43#define CNL_ADSP_FW_HDR_OFFSET 0x2000
44#define CNL_ROM_CTRL_DMA_ID 0x9
45
46static int cnl_prepare_fw(struct sst_dsp *ctx, const void *fwdata, u32 fwsize)
47{
48
49 int ret, stream_tag;
50
51 stream_tag = ctx->dsp_ops.prepare(ctx->dev, 0x40, fwsize, &ctx->dmab);
52 if (stream_tag <= 0) {
53 dev_err(ctx->dev, "dma prepare failed: 0%#x\n", stream_tag);
54 return stream_tag;
55 }
56
57 ctx->dsp_ops.stream_tag = stream_tag;
58 memcpy(ctx->dmab.area, fwdata, fwsize);
59
60 ret = skl_dsp_core_power_up(ctx, SKL_DSP_CORE0_MASK);
61 if (ret < 0) {
62 dev_err(ctx->dev, "dsp core0 power up failed\n");
63 ret = -EIO;
64 goto base_fw_load_failed;
65 }
66
67
68 sst_dsp_shim_write(ctx, CNL_ADSP_REG_HIPCIDR,
69 CNL_ADSP_REG_HIPCIDR_BUSY | (CNL_IPC_PURGE |
70 ((stream_tag - 1) << CNL_ROM_CTRL_DMA_ID)));
71
72 ret = skl_dsp_start_core(ctx, SKL_DSP_CORE0_MASK);
73 if (ret < 0) {
74 dev_err(ctx->dev, "Start dsp core failed ret: %d\n", ret);
75 ret = -EIO;
76 goto base_fw_load_failed;
77 }
78
79 ret = sst_dsp_register_poll(ctx, CNL_ADSP_REG_HIPCIDA,
80 CNL_ADSP_REG_HIPCIDA_DONE,
81 CNL_ADSP_REG_HIPCIDA_DONE,
82 BXT_INIT_TIMEOUT, "HIPCIDA Done");
83 if (ret < 0) {
84 dev_err(ctx->dev, "timeout for purge request: %d\n", ret);
85 goto base_fw_load_failed;
86 }
87
88
89 cnl_ipc_int_enable(ctx);
90 cnl_ipc_op_int_enable(ctx);
91
92 ret = sst_dsp_register_poll(ctx, CNL_ADSP_FW_STATUS, CNL_FW_STS_MASK,
93 CNL_FW_ROM_INIT, CNL_INIT_TIMEOUT,
94 "rom load");
95 if (ret < 0) {
96 dev_err(ctx->dev, "rom init timeout, ret: %d\n", ret);
97 goto base_fw_load_failed;
98 }
99
100 return 0;
101
102base_fw_load_failed:
103 ctx->dsp_ops.cleanup(ctx->dev, &ctx->dmab, stream_tag);
104 cnl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
105
106 return ret;
107}
108
109static int sst_transfer_fw_host_dma(struct sst_dsp *ctx)
110{
111 int ret;
112
113 ctx->dsp_ops.trigger(ctx->dev, true, ctx->dsp_ops.stream_tag);
114 ret = sst_dsp_register_poll(ctx, CNL_ADSP_FW_STATUS, CNL_FW_STS_MASK,
115 CNL_FW_INIT, CNL_BASEFW_TIMEOUT,
116 "firmware boot");
117
118 ctx->dsp_ops.trigger(ctx->dev, false, ctx->dsp_ops.stream_tag);
119 ctx->dsp_ops.cleanup(ctx->dev, &ctx->dmab, ctx->dsp_ops.stream_tag);
120
121 return ret;
122}
123
124static int cnl_load_base_firmware(struct sst_dsp *ctx)
125{
126 struct firmware stripped_fw;
127 struct skl_dev *cnl = ctx->thread_context;
128 int ret, i;
129
130 if (!ctx->fw) {
131 ret = request_firmware(&ctx->fw, ctx->fw_name, ctx->dev);
132 if (ret < 0) {
133 dev_err(ctx->dev, "request firmware failed: %d\n", ret);
134 goto cnl_load_base_firmware_failed;
135 }
136 }
137
138
139 if (cnl->is_first_boot) {
140 ret = snd_skl_parse_uuids(ctx, ctx->fw,
141 CNL_ADSP_FW_HDR_OFFSET, 0);
142 if (ret < 0)
143 goto cnl_load_base_firmware_failed;
144 }
145
146 stripped_fw.data = ctx->fw->data;
147 stripped_fw.size = ctx->fw->size;
148 skl_dsp_strip_extended_manifest(&stripped_fw);
149
150 for (i = 0; i < BXT_FW_ROM_INIT_RETRY; i++) {
151 ret = cnl_prepare_fw(ctx, stripped_fw.data, stripped_fw.size);
152 if (!ret)
153 break;
154 dev_dbg(ctx->dev, "prepare firmware failed: %d\n", ret);
155 }
156
157 if (ret < 0)
158 goto cnl_load_base_firmware_failed;
159
160 ret = sst_transfer_fw_host_dma(ctx);
161 if (ret < 0) {
162 dev_err(ctx->dev, "transfer firmware failed: %d\n", ret);
163 cnl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
164 goto cnl_load_base_firmware_failed;
165 }
166
167 ret = wait_event_timeout(cnl->boot_wait, cnl->boot_complete,
168 msecs_to_jiffies(SKL_IPC_BOOT_MSECS));
169 if (ret == 0) {
170 dev_err(ctx->dev, "FW ready timed-out\n");
171 cnl_dsp_disable_core(ctx, SKL_DSP_CORE0_MASK);
172 ret = -EIO;
173 goto cnl_load_base_firmware_failed;
174 }
175
176 cnl->fw_loaded = true;
177
178 return 0;
179
180cnl_load_base_firmware_failed:
181 dev_err(ctx->dev, "firmware load failed: %d\n", ret);
182 release_firmware(ctx->fw);
183 ctx->fw = NULL;
184
185 return ret;
186}
187
188static int cnl_set_dsp_D0(struct sst_dsp *ctx, unsigned int core_id)
189{
190 struct skl_dev *cnl = ctx->thread_context;
191 unsigned int core_mask = SKL_DSP_CORE_MASK(core_id);
192 struct skl_ipc_dxstate_info dx;
193 int ret;
194
195 if (!cnl->fw_loaded) {
196 cnl->boot_complete = false;
197 ret = cnl_load_base_firmware(ctx);
198 if (ret < 0) {
199 dev_err(ctx->dev, "fw reload failed: %d\n", ret);
200 return ret;
201 }
202
203 cnl->cores.state[core_id] = SKL_DSP_RUNNING;
204 return ret;
205 }
206
207 ret = cnl_dsp_enable_core(ctx, core_mask);
208 if (ret < 0) {
209 dev_err(ctx->dev, "enable dsp core %d failed: %d\n",
210 core_id, ret);
211 goto err;
212 }
213
214 if (core_id == SKL_DSP_CORE0_ID) {
215
216 cnl_ipc_int_enable(ctx);
217 cnl_ipc_op_int_enable(ctx);
218 cnl->boot_complete = false;
219
220 ret = wait_event_timeout(cnl->boot_wait, cnl->boot_complete,
221 msecs_to_jiffies(SKL_IPC_BOOT_MSECS));
222 if (ret == 0) {
223 dev_err(ctx->dev,
224 "dsp boot timeout, status=%#x error=%#x\n",
225 sst_dsp_shim_read(ctx, CNL_ADSP_FW_STATUS),
226 sst_dsp_shim_read(ctx, CNL_ADSP_ERROR_CODE));
227 goto err;
228 }
229 } else {
230 dx.core_mask = core_mask;
231 dx.dx_mask = core_mask;
232
233 ret = skl_ipc_set_dx(&cnl->ipc, CNL_INSTANCE_ID,
234 CNL_BASE_FW_MODULE_ID, &dx);
235 if (ret < 0) {
236 dev_err(ctx->dev, "set_dx failed, core: %d ret: %d\n",
237 core_id, ret);
238 goto err;
239 }
240 }
241 cnl->cores.state[core_id] = SKL_DSP_RUNNING;
242
243 return 0;
244err:
245 cnl_dsp_disable_core(ctx, core_mask);
246
247 return ret;
248}
249
250static int cnl_set_dsp_D3(struct sst_dsp *ctx, unsigned int core_id)
251{
252 struct skl_dev *cnl = ctx->thread_context;
253 unsigned int core_mask = SKL_DSP_CORE_MASK(core_id);
254 struct skl_ipc_dxstate_info dx;
255 int ret;
256
257 dx.core_mask = core_mask;
258 dx.dx_mask = SKL_IPC_D3_MASK;
259
260 ret = skl_ipc_set_dx(&cnl->ipc, CNL_INSTANCE_ID,
261 CNL_BASE_FW_MODULE_ID, &dx);
262 if (ret < 0) {
263 dev_err(ctx->dev,
264 "dsp core %d to d3 failed; continue reset\n",
265 core_id);
266 cnl->fw_loaded = false;
267 }
268
269
270 if (core_id == SKL_DSP_CORE0_ID) {
271 skl_ipc_op_int_disable(ctx);
272 skl_ipc_int_disable(ctx);
273 }
274
275 ret = cnl_dsp_disable_core(ctx, core_mask);
276 if (ret < 0) {
277 dev_err(ctx->dev, "disable dsp core %d failed: %d\n",
278 core_id, ret);
279 return ret;
280 }
281
282 cnl->cores.state[core_id] = SKL_DSP_RESET;
283
284 return ret;
285}
286
287static unsigned int cnl_get_errno(struct sst_dsp *ctx)
288{
289 return sst_dsp_shim_read(ctx, CNL_ADSP_ERROR_CODE);
290}
291
292static const struct skl_dsp_fw_ops cnl_fw_ops = {
293 .set_state_D0 = cnl_set_dsp_D0,
294 .set_state_D3 = cnl_set_dsp_D3,
295 .load_fw = cnl_load_base_firmware,
296 .get_fw_errcode = cnl_get_errno,
297};
298
299static struct sst_ops cnl_ops = {
300 .irq_handler = cnl_dsp_sst_interrupt,
301 .write = sst_shim32_write,
302 .read = sst_shim32_read,
303 .ram_read = sst_memcpy_fromio_32,
304 .ram_write = sst_memcpy_toio_32,
305 .free = cnl_dsp_free,
306};
307
308#define CNL_IPC_GLB_NOTIFY_RSP_SHIFT 29
309#define CNL_IPC_GLB_NOTIFY_RSP_MASK 0x1
310#define CNL_IPC_GLB_NOTIFY_RSP_TYPE(x) (((x) >> CNL_IPC_GLB_NOTIFY_RSP_SHIFT) \
311 & CNL_IPC_GLB_NOTIFY_RSP_MASK)
312
313static irqreturn_t cnl_dsp_irq_thread_handler(int irq, void *context)
314{
315 struct sst_dsp *dsp = context;
316 struct skl_dev *cnl = sst_dsp_get_thread_context(dsp);
317 struct sst_generic_ipc *ipc = &cnl->ipc;
318 struct skl_ipc_header header = {0};
319 u32 hipcida, hipctdr, hipctdd;
320 int ipc_irq = 0;
321
322
323 if (!(dsp->intr_status & CNL_ADSPIS_IPC))
324 return IRQ_NONE;
325
326 hipcida = sst_dsp_shim_read_unlocked(dsp, CNL_ADSP_REG_HIPCIDA);
327 hipctdr = sst_dsp_shim_read_unlocked(dsp, CNL_ADSP_REG_HIPCTDR);
328 hipctdd = sst_dsp_shim_read_unlocked(dsp, CNL_ADSP_REG_HIPCTDD);
329
330
331 if (hipcida & CNL_ADSP_REG_HIPCIDA_DONE) {
332 sst_dsp_shim_update_bits(dsp, CNL_ADSP_REG_HIPCCTL,
333 CNL_ADSP_REG_HIPCCTL_DONE, 0);
334
335
336 sst_dsp_shim_update_bits_forced(dsp, CNL_ADSP_REG_HIPCIDA,
337 CNL_ADSP_REG_HIPCIDA_DONE, CNL_ADSP_REG_HIPCIDA_DONE);
338
339 ipc_irq = 1;
340
341
342 sst_dsp_shim_update_bits(dsp, CNL_ADSP_REG_HIPCCTL,
343 CNL_ADSP_REG_HIPCCTL_DONE, CNL_ADSP_REG_HIPCCTL_DONE);
344 }
345
346
347 if (hipctdr & CNL_ADSP_REG_HIPCTDR_BUSY) {
348 header.primary = hipctdr;
349 header.extension = hipctdd;
350 dev_dbg(dsp->dev, "IPC irq: Firmware respond primary:%x",
351 header.primary);
352 dev_dbg(dsp->dev, "IPC irq: Firmware respond extension:%x",
353 header.extension);
354
355 if (CNL_IPC_GLB_NOTIFY_RSP_TYPE(header.primary)) {
356
357 skl_ipc_process_reply(ipc, header);
358 } else {
359 dev_dbg(dsp->dev, "IPC irq: Notification from firmware\n");
360 skl_ipc_process_notification(ipc, header);
361 }
362
363 sst_dsp_shim_update_bits_forced(dsp, CNL_ADSP_REG_HIPCTDR,
364 CNL_ADSP_REG_HIPCTDR_BUSY, CNL_ADSP_REG_HIPCTDR_BUSY);
365
366
367 sst_dsp_shim_update_bits_forced(dsp, CNL_ADSP_REG_HIPCTDA,
368 CNL_ADSP_REG_HIPCTDA_DONE, CNL_ADSP_REG_HIPCTDA_DONE);
369 ipc_irq = 1;
370 }
371
372 if (ipc_irq == 0)
373 return IRQ_NONE;
374
375 cnl_ipc_int_enable(dsp);
376
377
378 schedule_work(&ipc->kwork);
379
380 return IRQ_HANDLED;
381}
382
383static struct sst_dsp_device cnl_dev = {
384 .thread = cnl_dsp_irq_thread_handler,
385 .ops = &cnl_ops,
386};
387
388static void cnl_ipc_tx_msg(struct sst_generic_ipc *ipc, struct ipc_message *msg)
389{
390 struct skl_ipc_header *header = (struct skl_ipc_header *)(&msg->tx.header);
391
392 if (msg->tx.size)
393 sst_dsp_outbox_write(ipc->dsp, msg->tx.data, msg->tx.size);
394 sst_dsp_shim_write_unlocked(ipc->dsp, CNL_ADSP_REG_HIPCIDD,
395 header->extension);
396 sst_dsp_shim_write_unlocked(ipc->dsp, CNL_ADSP_REG_HIPCIDR,
397 header->primary | CNL_ADSP_REG_HIPCIDR_BUSY);
398}
399
400static bool cnl_ipc_is_dsp_busy(struct sst_dsp *dsp)
401{
402 u32 hipcidr;
403
404 hipcidr = sst_dsp_shim_read_unlocked(dsp, CNL_ADSP_REG_HIPCIDR);
405
406 return (hipcidr & CNL_ADSP_REG_HIPCIDR_BUSY);
407}
408
409static int cnl_ipc_init(struct device *dev, struct skl_dev *cnl)
410{
411 struct sst_generic_ipc *ipc;
412 int err;
413
414 ipc = &cnl->ipc;
415 ipc->dsp = cnl->dsp;
416 ipc->dev = dev;
417
418 ipc->tx_data_max_size = CNL_ADSP_W1_SZ;
419 ipc->rx_data_max_size = CNL_ADSP_W0_UP_SZ;
420
421 err = sst_ipc_init(ipc);
422 if (err)
423 return err;
424
425
426
427
428
429 ipc->ops.tx_msg = cnl_ipc_tx_msg;
430 ipc->ops.tx_data_copy = skl_ipc_tx_data_copy;
431 ipc->ops.is_dsp_busy = cnl_ipc_is_dsp_busy;
432
433 return 0;
434}
435
436int cnl_sst_dsp_init(struct device *dev, void __iomem *mmio_base, int irq,
437 const char *fw_name, struct skl_dsp_loader_ops dsp_ops,
438 struct skl_dev **dsp)
439{
440 struct skl_dev *cnl;
441 struct sst_dsp *sst;
442 int ret;
443
444 ret = skl_sst_ctx_init(dev, irq, fw_name, dsp_ops, dsp, &cnl_dev);
445 if (ret < 0) {
446 dev_err(dev, "%s: no device\n", __func__);
447 return ret;
448 }
449
450 cnl = *dsp;
451 sst = cnl->dsp;
452 sst->fw_ops = cnl_fw_ops;
453 sst->addr.lpe = mmio_base;
454 sst->addr.shim = mmio_base;
455 sst->addr.sram0_base = CNL_ADSP_SRAM0_BASE;
456 sst->addr.sram1_base = CNL_ADSP_SRAM1_BASE;
457 sst->addr.w0_stat_sz = CNL_ADSP_W0_STAT_SZ;
458 sst->addr.w0_up_sz = CNL_ADSP_W0_UP_SZ;
459
460 sst_dsp_mailbox_init(sst, (CNL_ADSP_SRAM0_BASE + CNL_ADSP_W0_STAT_SZ),
461 CNL_ADSP_W0_UP_SZ, CNL_ADSP_SRAM1_BASE,
462 CNL_ADSP_W1_SZ);
463
464 ret = cnl_ipc_init(dev, cnl);
465 if (ret) {
466 skl_dsp_free(sst);
467 return ret;
468 }
469
470 cnl->boot_complete = false;
471 init_waitqueue_head(&cnl->boot_wait);
472
473 return skl_dsp_acquire_irq(sst);
474}
475EXPORT_SYMBOL_GPL(cnl_sst_dsp_init);
476
477int cnl_sst_init_fw(struct device *dev, struct skl_dev *skl)
478{
479 int ret;
480 struct sst_dsp *sst = skl->dsp;
481
482 ret = skl->dsp->fw_ops.load_fw(sst);
483 if (ret < 0) {
484 dev_err(dev, "load base fw failed: %d", ret);
485 return ret;
486 }
487
488 skl_dsp_init_core_state(sst);
489
490 skl->is_first_boot = false;
491
492 return 0;
493}
494EXPORT_SYMBOL_GPL(cnl_sst_init_fw);
495
496void cnl_sst_dsp_cleanup(struct device *dev, struct skl_dev *skl)
497{
498 if (skl->dsp->fw)
499 release_firmware(skl->dsp->fw);
500
501 skl_freeup_uuid_list(skl);
502 cnl_ipc_free(&skl->ipc);
503
504 skl->dsp->ops->free(skl->dsp);
505}
506EXPORT_SYMBOL_GPL(cnl_sst_dsp_cleanup);
507
508MODULE_LICENSE("GPL v2");
509MODULE_DESCRIPTION("Intel Cannonlake IPC driver");
510