1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/kernel.h>
18#include <linux/slab.h>
19#include <linux/sched.h>
20#include <linux/firmware.h>
21#include <linux/export.h>
22#include <linux/module.h>
23#include <linux/platform_device.h>
24#include <linux/dma-mapping.h>
25#include <linux/dmaengine.h>
26#include <linux/pci.h>
27#include <linux/acpi.h>
28
29
30#include <linux/dma/dw.h>
31
32#include <asm/page.h>
33#include <asm/pgtable.h>
34
35#include "sst-dsp.h"
36#include "sst-dsp-priv.h"
37
38#define SST_DMA_RESOURCES 2
39#define SST_DSP_DMA_MAX_BURST 0x3
40#define SST_HSW_BLOCK_ANY 0xffffffff
41
42#define SST_HSW_MASK_DMA_ADDR_DSP 0xfff00000
43
44struct sst_dma {
45 struct sst_dsp *sst;
46
47 struct dw_dma_chip *chip;
48
49 struct dma_async_tx_descriptor *desc;
50 struct dma_chan *ch;
51};
52
53static inline void sst_memcpy32(volatile void __iomem *dest, void *src, u32 bytes)
54{
55 u32 tmp = 0;
56 int i, m, n;
57 const u8 *src_byte = src;
58
59 m = bytes / 4;
60 n = bytes % 4;
61
62
63 __iowrite32_copy((void *)dest, src, m);
64
65 if (n) {
66 for (i = 0; i < n; i++)
67 tmp |= (u32)*(src_byte + m * 4 + i) << (i * 8);
68 __iowrite32_copy((void *)(dest + m * 4), &tmp, 1);
69 }
70
71}
72
73static void sst_dma_transfer_complete(void *arg)
74{
75 struct sst_dsp *sst = (struct sst_dsp *)arg;
76
77 dev_dbg(sst->dev, "DMA: callback\n");
78}
79
80static int sst_dsp_dma_copy(struct sst_dsp *sst, dma_addr_t dest_addr,
81 dma_addr_t src_addr, size_t size)
82{
83 struct dma_async_tx_descriptor *desc;
84 struct sst_dma *dma = sst->dma;
85
86 if (dma->ch == NULL) {
87 dev_err(sst->dev, "error: no DMA channel\n");
88 return -ENODEV;
89 }
90
91 dev_dbg(sst->dev, "DMA: src: 0x%lx dest 0x%lx size %zu\n",
92 (unsigned long)src_addr, (unsigned long)dest_addr, size);
93
94 desc = dma->ch->device->device_prep_dma_memcpy(dma->ch, dest_addr,
95 src_addr, size, DMA_CTRL_ACK);
96 if (!desc){
97 dev_err(sst->dev, "error: dma prep memcpy failed\n");
98 return -EINVAL;
99 }
100
101 desc->callback = sst_dma_transfer_complete;
102 desc->callback_param = sst;
103
104 desc->tx_submit(desc);
105 dma_wait_for_async_tx(desc);
106
107 return 0;
108}
109
110
111int sst_dsp_dma_copyto(struct sst_dsp *sst, dma_addr_t dest_addr,
112 dma_addr_t src_addr, size_t size)
113{
114 return sst_dsp_dma_copy(sst, dest_addr | SST_HSW_MASK_DMA_ADDR_DSP,
115 src_addr, size);
116}
117EXPORT_SYMBOL_GPL(sst_dsp_dma_copyto);
118
119
120int sst_dsp_dma_copyfrom(struct sst_dsp *sst, dma_addr_t dest_addr,
121 dma_addr_t src_addr, size_t size)
122{
123 return sst_dsp_dma_copy(sst, dest_addr,
124 src_addr | SST_HSW_MASK_DMA_ADDR_DSP, size);
125}
126EXPORT_SYMBOL_GPL(sst_dsp_dma_copyfrom);
127
128
129static void block_list_remove(struct sst_dsp *dsp,
130 struct list_head *block_list)
131{
132 struct sst_mem_block *block, *tmp;
133 int err;
134
135
136 list_for_each_entry(block, block_list, module_list) {
137
138 if (block->ops && block->ops->disable) {
139 err = block->ops->disable(block);
140 if (err < 0)
141 dev_err(dsp->dev,
142 "error: cant disable block %d:%d\n",
143 block->type, block->index);
144 }
145 }
146
147
148 list_for_each_entry_safe(block, tmp, block_list, module_list) {
149 list_del(&block->module_list);
150 list_move(&block->list, &dsp->free_block_list);
151 dev_dbg(dsp->dev, "block freed %d:%d at offset 0x%x\n",
152 block->type, block->index, block->offset);
153 }
154}
155
156
157static int block_list_prepare(struct sst_dsp *dsp,
158 struct list_head *block_list)
159{
160 struct sst_mem_block *block;
161 int ret = 0;
162
163
164 list_for_each_entry(block, block_list, module_list) {
165
166 if (block->ops && block->ops->enable && !block->users) {
167 ret = block->ops->enable(block);
168 if (ret < 0) {
169 dev_err(dsp->dev,
170 "error: cant disable block %d:%d\n",
171 block->type, block->index);
172 goto err;
173 }
174 }
175 }
176 return ret;
177
178err:
179 list_for_each_entry(block, block_list, module_list) {
180 if (block->ops && block->ops->disable)
181 block->ops->disable(block);
182 }
183 return ret;
184}
185
186static struct dw_dma_chip *dw_probe(struct device *dev, struct resource *mem,
187 int irq)
188{
189 struct dw_dma_chip *chip;
190 int err;
191
192 chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
193 if (!chip)
194 return ERR_PTR(-ENOMEM);
195
196 chip->irq = irq;
197 chip->regs = devm_ioremap_resource(dev, mem);
198 if (IS_ERR(chip->regs))
199 return ERR_CAST(chip->regs);
200
201 err = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(31));
202 if (err)
203 return ERR_PTR(err);
204
205 chip->dev = dev;
206
207 err = dw_dma_probe(chip);
208 if (err)
209 return ERR_PTR(err);
210
211 return chip;
212}
213
214static void dw_remove(struct dw_dma_chip *chip)
215{
216 dw_dma_remove(chip);
217}
218
219static bool dma_chan_filter(struct dma_chan *chan, void *param)
220{
221 struct sst_dsp *dsp = (struct sst_dsp *)param;
222
223 return chan->device->dev == dsp->dma_dev;
224}
225
226int sst_dsp_dma_get_channel(struct sst_dsp *dsp, int chan_id)
227{
228 struct sst_dma *dma = dsp->dma;
229 struct dma_slave_config slave;
230 dma_cap_mask_t mask;
231 int ret;
232
233 dma_cap_zero(mask);
234 dma_cap_set(DMA_SLAVE, mask);
235 dma_cap_set(DMA_MEMCPY, mask);
236
237 dma->ch = dma_request_channel(mask, dma_chan_filter, dsp);
238 if (dma->ch == NULL) {
239 dev_err(dsp->dev, "error: DMA request channel failed\n");
240 return -EIO;
241 }
242
243 memset(&slave, 0, sizeof(slave));
244 slave.direction = DMA_MEM_TO_DEV;
245 slave.src_addr_width =
246 slave.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
247 slave.src_maxburst = slave.dst_maxburst = SST_DSP_DMA_MAX_BURST;
248
249 ret = dmaengine_slave_config(dma->ch, &slave);
250 if (ret) {
251 dev_err(dsp->dev, "error: unable to set DMA slave config %d\n",
252 ret);
253 dma_release_channel(dma->ch);
254 dma->ch = NULL;
255 }
256
257 return ret;
258}
259EXPORT_SYMBOL_GPL(sst_dsp_dma_get_channel);
260
261void sst_dsp_dma_put_channel(struct sst_dsp *dsp)
262{
263 struct sst_dma *dma = dsp->dma;
264
265 if (!dma->ch)
266 return;
267
268 dma_release_channel(dma->ch);
269 dma->ch = NULL;
270}
271EXPORT_SYMBOL_GPL(sst_dsp_dma_put_channel);
272
273int sst_dma_new(struct sst_dsp *sst)
274{
275 struct sst_pdata *sst_pdata = sst->pdata;
276 struct sst_dma *dma;
277 struct resource mem;
278 int ret = 0;
279
280 if (sst->pdata->resindex_dma_base == -1)
281
282 return 0;
283
284
285
286 switch (sst->pdata->dma_engine) {
287 case SST_DMA_TYPE_DW:
288 break;
289 default:
290 dev_err(sst->dev, "error: invalid DMA engine %d\n",
291 sst->pdata->dma_engine);
292 return -EINVAL;
293 }
294
295 dma = devm_kzalloc(sst->dev, sizeof(struct sst_dma), GFP_KERNEL);
296 if (!dma)
297 return -ENOMEM;
298
299 dma->sst = sst;
300
301 memset(&mem, 0, sizeof(mem));
302
303 mem.start = sst->addr.lpe_base + sst_pdata->dma_base;
304 mem.end = sst->addr.lpe_base + sst_pdata->dma_base + sst_pdata->dma_size - 1;
305 mem.flags = IORESOURCE_MEM;
306
307
308 dma->chip = dw_probe(sst->dma_dev, &mem, sst_pdata->irq);
309 if (IS_ERR(dma->chip)) {
310 dev_err(sst->dev, "error: DMA device register failed\n");
311 ret = PTR_ERR(dma->chip);
312 goto err_dma_dev;
313 }
314
315 sst->dma = dma;
316 sst->fw_use_dma = true;
317 return 0;
318
319err_dma_dev:
320 devm_kfree(sst->dev, dma);
321 return ret;
322}
323EXPORT_SYMBOL(sst_dma_new);
324
325void sst_dma_free(struct sst_dma *dma)
326{
327
328 if (dma == NULL)
329 return;
330
331 if (dma->ch)
332 dma_release_channel(dma->ch);
333
334 if (dma->chip)
335 dw_remove(dma->chip);
336
337}
338EXPORT_SYMBOL(sst_dma_free);
339
340
341struct sst_fw *sst_fw_new(struct sst_dsp *dsp,
342 const struct firmware *fw, void *private)
343{
344 struct sst_fw *sst_fw;
345 int err;
346
347 if (!dsp->ops->parse_fw)
348 return NULL;
349
350 sst_fw = kzalloc(sizeof(*sst_fw), GFP_KERNEL);
351 if (sst_fw == NULL)
352 return NULL;
353
354 sst_fw->dsp = dsp;
355 sst_fw->private = private;
356 sst_fw->size = fw->size;
357
358
359 sst_fw->dma_buf = dma_alloc_coherent(dsp->dma_dev, sst_fw->size,
360 &sst_fw->dmable_fw_paddr, GFP_DMA | GFP_KERNEL);
361 if (!sst_fw->dma_buf) {
362 dev_err(dsp->dev, "error: DMA alloc failed\n");
363 kfree(sst_fw);
364 return NULL;
365 }
366
367
368 memcpy((void *)sst_fw->dma_buf, (void *)fw->data, fw->size);
369
370 if (dsp->fw_use_dma) {
371 err = sst_dsp_dma_get_channel(dsp, 0);
372 if (err < 0)
373 goto chan_err;
374 }
375
376
377 err = dsp->ops->parse_fw(sst_fw);
378 if (err < 0) {
379 dev_err(dsp->dev, "error: parse fw failed %d\n", err);
380 goto parse_err;
381 }
382
383 if (dsp->fw_use_dma)
384 sst_dsp_dma_put_channel(dsp);
385
386 mutex_lock(&dsp->mutex);
387 list_add(&sst_fw->list, &dsp->fw_list);
388 mutex_unlock(&dsp->mutex);
389
390 return sst_fw;
391
392parse_err:
393 if (dsp->fw_use_dma)
394 sst_dsp_dma_put_channel(dsp);
395chan_err:
396 dma_free_coherent(dsp->dma_dev, sst_fw->size,
397 sst_fw->dma_buf,
398 sst_fw->dmable_fw_paddr);
399 sst_fw->dma_buf = NULL;
400 kfree(sst_fw);
401 return NULL;
402}
403EXPORT_SYMBOL_GPL(sst_fw_new);
404
405int sst_fw_reload(struct sst_fw *sst_fw)
406{
407 struct sst_dsp *dsp = sst_fw->dsp;
408 int ret;
409
410 dev_dbg(dsp->dev, "reloading firmware\n");
411
412
413 ret = dsp->ops->parse_fw(sst_fw);
414 if (ret < 0)
415 dev_err(dsp->dev, "error: parse fw failed %d\n", ret);
416
417 return ret;
418}
419EXPORT_SYMBOL_GPL(sst_fw_reload);
420
421void sst_fw_unload(struct sst_fw *sst_fw)
422{
423 struct sst_dsp *dsp = sst_fw->dsp;
424 struct sst_module *module, *mtmp;
425 struct sst_module_runtime *runtime, *rtmp;
426
427 dev_dbg(dsp->dev, "unloading firmware\n");
428
429 mutex_lock(&dsp->mutex);
430
431
432 list_for_each_entry_safe(module, mtmp, &dsp->module_list, list) {
433 if (module->sst_fw == sst_fw) {
434
435
436 list_for_each_entry_safe(runtime, rtmp, &module->runtime_list, list) {
437
438 block_list_remove(dsp, &runtime->block_list);
439 list_del(&runtime->list);
440 kfree(runtime);
441 }
442
443
444 block_list_remove(dsp, &module->block_list);
445 list_del(&module->list);
446 kfree(module);
447 }
448 }
449
450
451 block_list_remove(dsp, &dsp->scratch_block_list);
452
453 mutex_unlock(&dsp->mutex);
454}
455EXPORT_SYMBOL_GPL(sst_fw_unload);
456
457
458void sst_fw_free(struct sst_fw *sst_fw)
459{
460 struct sst_dsp *dsp = sst_fw->dsp;
461
462 mutex_lock(&dsp->mutex);
463 list_del(&sst_fw->list);
464 mutex_unlock(&dsp->mutex);
465
466 if (sst_fw->dma_buf)
467 dma_free_coherent(dsp->dma_dev, sst_fw->size, sst_fw->dma_buf,
468 sst_fw->dmable_fw_paddr);
469 kfree(sst_fw);
470}
471EXPORT_SYMBOL_GPL(sst_fw_free);
472
473
474void sst_fw_free_all(struct sst_dsp *dsp)
475{
476 struct sst_fw *sst_fw, *t;
477
478 mutex_lock(&dsp->mutex);
479 list_for_each_entry_safe(sst_fw, t, &dsp->fw_list, list) {
480
481 list_del(&sst_fw->list);
482 dma_free_coherent(dsp->dev, sst_fw->size, sst_fw->dma_buf,
483 sst_fw->dmable_fw_paddr);
484 kfree(sst_fw);
485 }
486 mutex_unlock(&dsp->mutex);
487}
488EXPORT_SYMBOL_GPL(sst_fw_free_all);
489
490
491struct sst_module *sst_module_new(struct sst_fw *sst_fw,
492 struct sst_module_template *template, void *private)
493{
494 struct sst_dsp *dsp = sst_fw->dsp;
495 struct sst_module *sst_module;
496
497 sst_module = kzalloc(sizeof(*sst_module), GFP_KERNEL);
498 if (sst_module == NULL)
499 return NULL;
500
501 sst_module->id = template->id;
502 sst_module->dsp = dsp;
503 sst_module->sst_fw = sst_fw;
504 sst_module->scratch_size = template->scratch_size;
505 sst_module->persistent_size = template->persistent_size;
506 sst_module->entry = template->entry;
507 sst_module->state = SST_MODULE_STATE_UNLOADED;
508
509 INIT_LIST_HEAD(&sst_module->block_list);
510 INIT_LIST_HEAD(&sst_module->runtime_list);
511
512 mutex_lock(&dsp->mutex);
513 list_add(&sst_module->list, &dsp->module_list);
514 mutex_unlock(&dsp->mutex);
515
516 return sst_module;
517}
518EXPORT_SYMBOL_GPL(sst_module_new);
519
520
521void sst_module_free(struct sst_module *sst_module)
522{
523 struct sst_dsp *dsp = sst_module->dsp;
524
525 mutex_lock(&dsp->mutex);
526 list_del(&sst_module->list);
527 mutex_unlock(&dsp->mutex);
528
529 kfree(sst_module);
530}
531EXPORT_SYMBOL_GPL(sst_module_free);
532
533struct sst_module_runtime *sst_module_runtime_new(struct sst_module *module,
534 int id, void *private)
535{
536 struct sst_dsp *dsp = module->dsp;
537 struct sst_module_runtime *runtime;
538
539 runtime = kzalloc(sizeof(*runtime), GFP_KERNEL);
540 if (runtime == NULL)
541 return NULL;
542
543 runtime->id = id;
544 runtime->dsp = dsp;
545 runtime->module = module;
546 INIT_LIST_HEAD(&runtime->block_list);
547
548 mutex_lock(&dsp->mutex);
549 list_add(&runtime->list, &module->runtime_list);
550 mutex_unlock(&dsp->mutex);
551
552 return runtime;
553}
554EXPORT_SYMBOL_GPL(sst_module_runtime_new);
555
556void sst_module_runtime_free(struct sst_module_runtime *runtime)
557{
558 struct sst_dsp *dsp = runtime->dsp;
559
560 mutex_lock(&dsp->mutex);
561 list_del(&runtime->list);
562 mutex_unlock(&dsp->mutex);
563
564 kfree(runtime);
565}
566EXPORT_SYMBOL_GPL(sst_module_runtime_free);
567
568static struct sst_mem_block *find_block(struct sst_dsp *dsp,
569 struct sst_block_allocator *ba)
570{
571 struct sst_mem_block *block;
572
573 list_for_each_entry(block, &dsp->free_block_list, list) {
574 if (block->type == ba->type && block->offset == ba->offset)
575 return block;
576 }
577
578 return NULL;
579}
580
581
582static int block_alloc_contiguous(struct sst_dsp *dsp,
583 struct sst_block_allocator *ba, struct list_head *block_list)
584{
585 struct list_head tmp = LIST_HEAD_INIT(tmp);
586 struct sst_mem_block *block;
587 u32 block_start = SST_HSW_BLOCK_ANY;
588 int size = ba->size, offset = ba->offset;
589
590 while (ba->size > 0) {
591
592 block = find_block(dsp, ba);
593 if (!block) {
594 list_splice(&tmp, &dsp->free_block_list);
595
596 ba->size = size;
597 ba->offset = offset;
598 return -ENOMEM;
599 }
600
601 list_move_tail(&block->list, &tmp);
602 ba->offset += block->size;
603 ba->size -= block->size;
604 }
605 ba->size = size;
606 ba->offset = offset;
607
608 list_for_each_entry(block, &tmp, list) {
609
610 if (block->offset < block_start)
611 block_start = block->offset;
612
613 list_add(&block->module_list, block_list);
614
615 dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
616 block->type, block->index, block->offset);
617 }
618
619 list_splice(&tmp, &dsp->used_block_list);
620 return 0;
621}
622
623
624static int block_alloc(struct sst_dsp *dsp, struct sst_block_allocator *ba,
625 struct list_head *block_list)
626{
627 struct sst_mem_block *block, *tmp;
628 int ret = 0;
629
630 if (ba->size == 0)
631 return 0;
632
633
634 list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
635
636
637 if (block->type != ba->type)
638 continue;
639
640 if (ba->size > block->size)
641 continue;
642
643 ba->offset = block->offset;
644 block->bytes_used = ba->size % block->size;
645 list_add(&block->module_list, block_list);
646 list_move(&block->list, &dsp->used_block_list);
647 dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
648 block->type, block->index, block->offset);
649 return 0;
650 }
651
652
653 list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
654
655
656 if (block->type != ba->type)
657 continue;
658
659
660 if (ba->size > block->size) {
661
662
663 ba->offset = block->offset;
664
665 ret = block_alloc_contiguous(dsp, ba, block_list);
666 if (ret == 0)
667 return ret;
668
669 }
670 }
671
672
673 return -ENOMEM;
674}
675
676int sst_alloc_blocks(struct sst_dsp *dsp, struct sst_block_allocator *ba,
677 struct list_head *block_list)
678{
679 int ret;
680
681 dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n",
682 ba->size, ba->offset, ba->type);
683
684 mutex_lock(&dsp->mutex);
685
686 ret = block_alloc(dsp, ba, block_list);
687 if (ret < 0) {
688 dev_err(dsp->dev, "error: can't alloc blocks %d\n", ret);
689 goto out;
690 }
691
692
693 ret = block_list_prepare(dsp, block_list);
694 if (ret < 0)
695 dev_err(dsp->dev, "error: prepare failed\n");
696
697out:
698 mutex_unlock(&dsp->mutex);
699 return ret;
700}
701EXPORT_SYMBOL_GPL(sst_alloc_blocks);
702
703int sst_free_blocks(struct sst_dsp *dsp, struct list_head *block_list)
704{
705 mutex_lock(&dsp->mutex);
706 block_list_remove(dsp, block_list);
707 mutex_unlock(&dsp->mutex);
708 return 0;
709}
710EXPORT_SYMBOL_GPL(sst_free_blocks);
711
712
713static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba,
714 struct list_head *block_list)
715{
716 struct sst_mem_block *block, *tmp;
717 struct sst_block_allocator ba_tmp = *ba;
718 u32 end = ba->offset + ba->size, block_end;
719 int err;
720
721
722 if (ba->type != SST_MEM_IRAM && ba->type != SST_MEM_DRAM)
723 return 0;
724
725
726 list_for_each_entry_safe(block, tmp, block_list, module_list) {
727
728
729 if (block->type != ba->type)
730 continue;
731
732 block_end = block->offset + block->size;
733
734
735 if (ba->offset >= block->offset && end <= block_end)
736 return 0;
737
738
739 if (ba->offset >= block->offset && ba->offset < block_end) {
740
741
742 ba_tmp.size -= block_end - ba->offset;
743 ba_tmp.offset = block_end;
744 err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
745 if (err < 0)
746 return -ENOMEM;
747
748
749 return 0;
750 }
751 }
752
753
754 list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
755 block_end = block->offset + block->size;
756
757
758 if (block->type != ba->type)
759 continue;
760
761
762 if (ba->offset >= block->offset && end <= block_end) {
763
764
765 list_move(&block->list, &dsp->used_block_list);
766 list_add(&block->module_list, block_list);
767 dev_dbg(dsp->dev, "block allocated %d:%d at offset 0x%x\n",
768 block->type, block->index, block->offset);
769 return 0;
770 }
771
772
773 if (ba->offset >= block->offset && ba->offset < block_end) {
774
775
776 list_move(&block->list, &dsp->used_block_list);
777 list_add(&block->module_list, block_list);
778
779 ba_tmp.size -= block_end - ba->offset;
780 ba_tmp.offset = block_end;
781
782 err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
783 if (err < 0)
784 return -ENOMEM;
785
786 return 0;
787 }
788 }
789
790 return -ENOMEM;
791}
792
793
794int sst_module_alloc_blocks(struct sst_module *module)
795{
796 struct sst_dsp *dsp = module->dsp;
797 struct sst_fw *sst_fw = module->sst_fw;
798 struct sst_block_allocator ba;
799 int ret;
800
801 memset(&ba, 0, sizeof(ba));
802 ba.size = module->size;
803 ba.type = module->type;
804 ba.offset = module->offset;
805
806 dev_dbg(dsp->dev, "block request 0x%x bytes at offset 0x%x type %d\n",
807 ba.size, ba.offset, ba.type);
808
809 mutex_lock(&dsp->mutex);
810
811
812 ret = block_alloc_fixed(dsp, &ba, &module->block_list);
813 if (ret < 0) {
814 dev_err(dsp->dev,
815 "error: no free blocks for section at offset 0x%x size 0x%x\n",
816 module->offset, module->size);
817 mutex_unlock(&dsp->mutex);
818 return -ENOMEM;
819 }
820
821
822 ret = block_list_prepare(dsp, &module->block_list);
823 if (ret < 0) {
824 dev_err(dsp->dev, "error: fw module prepare failed\n");
825 goto err;
826 }
827
828
829 if (dsp->fw_use_dma) {
830 ret = sst_dsp_dma_copyto(dsp,
831 dsp->addr.lpe_base + module->offset,
832 sst_fw->dmable_fw_paddr + module->data_offset,
833 module->size);
834 if (ret < 0) {
835 dev_err(dsp->dev, "error: module copy failed\n");
836 goto err;
837 }
838 } else
839 sst_memcpy32(dsp->addr.lpe + module->offset, module->data,
840 module->size);
841
842 mutex_unlock(&dsp->mutex);
843 return ret;
844
845err:
846 block_list_remove(dsp, &module->block_list);
847 mutex_unlock(&dsp->mutex);
848 return ret;
849}
850EXPORT_SYMBOL_GPL(sst_module_alloc_blocks);
851
852
853int sst_module_free_blocks(struct sst_module *module)
854{
855 struct sst_dsp *dsp = module->dsp;
856
857 mutex_lock(&dsp->mutex);
858 block_list_remove(dsp, &module->block_list);
859 mutex_unlock(&dsp->mutex);
860 return 0;
861}
862EXPORT_SYMBOL_GPL(sst_module_free_blocks);
863
864int sst_module_runtime_alloc_blocks(struct sst_module_runtime *runtime,
865 int offset)
866{
867 struct sst_dsp *dsp = runtime->dsp;
868 struct sst_module *module = runtime->module;
869 struct sst_block_allocator ba;
870 int ret;
871
872 if (module->persistent_size == 0)
873 return 0;
874
875 memset(&ba, 0, sizeof(ba));
876 ba.size = module->persistent_size;
877 ba.type = SST_MEM_DRAM;
878
879 mutex_lock(&dsp->mutex);
880
881
882 if (offset != 0) {
883
884 ba.offset = offset;
885
886 dev_dbg(dsp->dev, "persistent fixed block request 0x%x bytes type %d offset 0x%x\n",
887 ba.size, ba.type, ba.offset);
888
889
890 ret = block_alloc_fixed(dsp, &ba, &runtime->block_list);
891
892 } else {
893 dev_dbg(dsp->dev, "persistent block request 0x%x bytes type %d\n",
894 ba.size, ba.type);
895
896
897 ret = block_alloc(dsp, &ba, &runtime->block_list);
898 }
899 if (ret < 0) {
900 dev_err(dsp->dev,
901 "error: no free blocks for runtime module size 0x%x\n",
902 module->persistent_size);
903 mutex_unlock(&dsp->mutex);
904 return -ENOMEM;
905 }
906 runtime->persistent_offset = ba.offset;
907
908
909 ret = block_list_prepare(dsp, &runtime->block_list);
910 if (ret < 0) {
911 dev_err(dsp->dev, "error: runtime block prepare failed\n");
912 goto err;
913 }
914
915 mutex_unlock(&dsp->mutex);
916 return ret;
917
918err:
919 block_list_remove(dsp, &module->block_list);
920 mutex_unlock(&dsp->mutex);
921 return ret;
922}
923EXPORT_SYMBOL_GPL(sst_module_runtime_alloc_blocks);
924
925int sst_module_runtime_free_blocks(struct sst_module_runtime *runtime)
926{
927 struct sst_dsp *dsp = runtime->dsp;
928
929 mutex_lock(&dsp->mutex);
930 block_list_remove(dsp, &runtime->block_list);
931 mutex_unlock(&dsp->mutex);
932 return 0;
933}
934EXPORT_SYMBOL_GPL(sst_module_runtime_free_blocks);
935
936int sst_module_runtime_save(struct sst_module_runtime *runtime,
937 struct sst_module_runtime_context *context)
938{
939 struct sst_dsp *dsp = runtime->dsp;
940 struct sst_module *module = runtime->module;
941 int ret = 0;
942
943 dev_dbg(dsp->dev, "saving runtime %d memory at 0x%x size 0x%x\n",
944 runtime->id, runtime->persistent_offset,
945 module->persistent_size);
946
947 context->buffer = dma_alloc_coherent(dsp->dma_dev,
948 module->persistent_size,
949 &context->dma_buffer, GFP_DMA | GFP_KERNEL);
950 if (!context->buffer) {
951 dev_err(dsp->dev, "error: DMA context alloc failed\n");
952 return -ENOMEM;
953 }
954
955 mutex_lock(&dsp->mutex);
956
957 if (dsp->fw_use_dma) {
958
959 ret = sst_dsp_dma_get_channel(dsp, 0);
960 if (ret < 0)
961 goto err;
962
963 ret = sst_dsp_dma_copyfrom(dsp, context->dma_buffer,
964 dsp->addr.lpe_base + runtime->persistent_offset,
965 module->persistent_size);
966 sst_dsp_dma_put_channel(dsp);
967 if (ret < 0) {
968 dev_err(dsp->dev, "error: context copy failed\n");
969 goto err;
970 }
971 } else
972 sst_memcpy32(context->buffer, dsp->addr.lpe +
973 runtime->persistent_offset,
974 module->persistent_size);
975
976err:
977 mutex_unlock(&dsp->mutex);
978 return ret;
979}
980EXPORT_SYMBOL_GPL(sst_module_runtime_save);
981
982int sst_module_runtime_restore(struct sst_module_runtime *runtime,
983 struct sst_module_runtime_context *context)
984{
985 struct sst_dsp *dsp = runtime->dsp;
986 struct sst_module *module = runtime->module;
987 int ret = 0;
988
989 dev_dbg(dsp->dev, "restoring runtime %d memory at 0x%x size 0x%x\n",
990 runtime->id, runtime->persistent_offset,
991 module->persistent_size);
992
993 mutex_lock(&dsp->mutex);
994
995 if (!context->buffer) {
996 dev_info(dsp->dev, "no context buffer need to restore!\n");
997 goto err;
998 }
999
1000 if (dsp->fw_use_dma) {
1001
1002 ret = sst_dsp_dma_get_channel(dsp, 0);
1003 if (ret < 0)
1004 goto err;
1005
1006 ret = sst_dsp_dma_copyto(dsp,
1007 dsp->addr.lpe_base + runtime->persistent_offset,
1008 context->dma_buffer, module->persistent_size);
1009 sst_dsp_dma_put_channel(dsp);
1010 if (ret < 0) {
1011 dev_err(dsp->dev, "error: module copy failed\n");
1012 goto err;
1013 }
1014 } else
1015 sst_memcpy32(dsp->addr.lpe + runtime->persistent_offset,
1016 context->buffer, module->persistent_size);
1017
1018 dma_free_coherent(dsp->dma_dev, module->persistent_size,
1019 context->buffer, context->dma_buffer);
1020 context->buffer = NULL;
1021
1022err:
1023 mutex_unlock(&dsp->mutex);
1024 return ret;
1025}
1026EXPORT_SYMBOL_GPL(sst_module_runtime_restore);
1027
1028
1029struct sst_mem_block *sst_mem_block_register(struct sst_dsp *dsp, u32 offset,
1030 u32 size, enum sst_mem_type type, const struct sst_block_ops *ops,
1031 u32 index, void *private)
1032{
1033 struct sst_mem_block *block;
1034
1035 block = kzalloc(sizeof(*block), GFP_KERNEL);
1036 if (block == NULL)
1037 return NULL;
1038
1039 block->offset = offset;
1040 block->size = size;
1041 block->index = index;
1042 block->type = type;
1043 block->dsp = dsp;
1044 block->private = private;
1045 block->ops = ops;
1046
1047 mutex_lock(&dsp->mutex);
1048 list_add(&block->list, &dsp->free_block_list);
1049 mutex_unlock(&dsp->mutex);
1050
1051 return block;
1052}
1053EXPORT_SYMBOL_GPL(sst_mem_block_register);
1054
1055
1056void sst_mem_block_unregister_all(struct sst_dsp *dsp)
1057{
1058 struct sst_mem_block *block, *tmp;
1059
1060 mutex_lock(&dsp->mutex);
1061
1062
1063 list_for_each_entry_safe(block, tmp, &dsp->used_block_list, list) {
1064 list_del(&block->list);
1065 kfree(block);
1066 }
1067
1068
1069 list_for_each_entry_safe(block, tmp, &dsp->free_block_list, list) {
1070 list_del(&block->list);
1071 kfree(block);
1072 }
1073
1074 mutex_unlock(&dsp->mutex);
1075}
1076EXPORT_SYMBOL_GPL(sst_mem_block_unregister_all);
1077
1078
1079int sst_block_alloc_scratch(struct sst_dsp *dsp)
1080{
1081 struct sst_module *module;
1082 struct sst_block_allocator ba;
1083 int ret;
1084
1085 mutex_lock(&dsp->mutex);
1086
1087
1088 dsp->scratch_size = 0;
1089 list_for_each_entry(module, &dsp->module_list, list) {
1090 dev_dbg(dsp->dev, "module %d scratch req 0x%x bytes\n",
1091 module->id, module->scratch_size);
1092 if (dsp->scratch_size < module->scratch_size)
1093 dsp->scratch_size = module->scratch_size;
1094 }
1095
1096 dev_dbg(dsp->dev, "scratch buffer required is 0x%x bytes\n",
1097 dsp->scratch_size);
1098
1099 if (dsp->scratch_size == 0) {
1100 dev_info(dsp->dev, "no modules need scratch buffer\n");
1101 mutex_unlock(&dsp->mutex);
1102 return 0;
1103 }
1104
1105
1106 dev_dbg(dsp->dev, "allocating scratch blocks\n");
1107
1108 ba.size = dsp->scratch_size;
1109 ba.type = SST_MEM_DRAM;
1110
1111
1112 if (dsp->scratch_offset != 0) {
1113
1114 dev_dbg(dsp->dev, "block request 0x%x bytes type %d at 0x%x\n",
1115 ba.size, ba.type, ba.offset);
1116
1117 ba.offset = dsp->scratch_offset;
1118
1119
1120 ret = block_alloc_fixed(dsp, &ba, &dsp->scratch_block_list);
1121
1122 } else {
1123 dev_dbg(dsp->dev, "block request 0x%x bytes type %d\n",
1124 ba.size, ba.type);
1125
1126 ba.offset = 0;
1127 ret = block_alloc(dsp, &ba, &dsp->scratch_block_list);
1128 }
1129 if (ret < 0) {
1130 dev_err(dsp->dev, "error: can't alloc scratch blocks\n");
1131 mutex_unlock(&dsp->mutex);
1132 return ret;
1133 }
1134
1135 ret = block_list_prepare(dsp, &dsp->scratch_block_list);
1136 if (ret < 0) {
1137 dev_err(dsp->dev, "error: scratch block prepare failed\n");
1138 mutex_unlock(&dsp->mutex);
1139 return ret;
1140 }
1141
1142
1143 dsp->scratch_offset = ba.offset;
1144 mutex_unlock(&dsp->mutex);
1145 return dsp->scratch_size;
1146}
1147EXPORT_SYMBOL_GPL(sst_block_alloc_scratch);
1148
1149
1150void sst_block_free_scratch(struct sst_dsp *dsp)
1151{
1152 mutex_lock(&dsp->mutex);
1153 block_list_remove(dsp, &dsp->scratch_block_list);
1154 mutex_unlock(&dsp->mutex);
1155}
1156EXPORT_SYMBOL_GPL(sst_block_free_scratch);
1157
1158
1159struct sst_module *sst_module_get_from_id(struct sst_dsp *dsp, u32 id)
1160{
1161 struct sst_module *module;
1162
1163 mutex_lock(&dsp->mutex);
1164
1165 list_for_each_entry(module, &dsp->module_list, list) {
1166 if (module->id == id) {
1167 mutex_unlock(&dsp->mutex);
1168 return module;
1169 }
1170 }
1171
1172 mutex_unlock(&dsp->mutex);
1173 return NULL;
1174}
1175EXPORT_SYMBOL_GPL(sst_module_get_from_id);
1176
1177struct sst_module_runtime *sst_module_runtime_get_from_id(
1178 struct sst_module *module, u32 id)
1179{
1180 struct sst_module_runtime *runtime;
1181 struct sst_dsp *dsp = module->dsp;
1182
1183 mutex_lock(&dsp->mutex);
1184
1185 list_for_each_entry(runtime, &module->runtime_list, list) {
1186 if (runtime->id == id) {
1187 mutex_unlock(&dsp->mutex);
1188 return runtime;
1189 }
1190 }
1191
1192 mutex_unlock(&dsp->mutex);
1193 return NULL;
1194}
1195EXPORT_SYMBOL_GPL(sst_module_runtime_get_from_id);
1196
1197
1198u32 sst_dsp_get_offset(struct sst_dsp *dsp, u32 offset,
1199 enum sst_mem_type type)
1200{
1201 switch (type) {
1202 case SST_MEM_IRAM:
1203 return offset - dsp->addr.iram_offset +
1204 dsp->addr.dsp_iram_offset;
1205 case SST_MEM_DRAM:
1206 return offset - dsp->addr.dram_offset +
1207 dsp->addr.dsp_dram_offset;
1208 default:
1209 return 0;
1210 }
1211}
1212EXPORT_SYMBOL_GPL(sst_dsp_get_offset);
1213
1214struct sst_dsp *sst_dsp_new(struct device *dev,
1215 struct sst_dsp_device *sst_dev, struct sst_pdata *pdata)
1216{
1217 struct sst_dsp *sst;
1218 int err;
1219
1220 dev_dbg(dev, "initialising audio DSP id 0x%x\n", pdata->id);
1221
1222 sst = devm_kzalloc(dev, sizeof(*sst), GFP_KERNEL);
1223 if (sst == NULL)
1224 return NULL;
1225
1226 spin_lock_init(&sst->spinlock);
1227 mutex_init(&sst->mutex);
1228 sst->dev = dev;
1229 sst->dma_dev = pdata->dma_dev;
1230 sst->thread_context = sst_dev->thread_context;
1231 sst->sst_dev = sst_dev;
1232 sst->id = pdata->id;
1233 sst->irq = pdata->irq;
1234 sst->ops = sst_dev->ops;
1235 sst->pdata = pdata;
1236 INIT_LIST_HEAD(&sst->used_block_list);
1237 INIT_LIST_HEAD(&sst->free_block_list);
1238 INIT_LIST_HEAD(&sst->module_list);
1239 INIT_LIST_HEAD(&sst->fw_list);
1240 INIT_LIST_HEAD(&sst->scratch_block_list);
1241
1242
1243 if (sst->ops->init) {
1244 err = sst->ops->init(sst, pdata);
1245 if (err < 0)
1246 return NULL;
1247 }
1248
1249
1250 err = request_threaded_irq(sst->irq, sst->ops->irq_handler,
1251 sst_dev->thread, IRQF_SHARED, "AudioDSP", sst);
1252 if (err)
1253 goto irq_err;
1254
1255 err = sst_dma_new(sst);
1256 if (err)
1257 dev_warn(dev, "sst_dma_new failed %d\n", err);
1258
1259 return sst;
1260
1261irq_err:
1262 if (sst->ops->free)
1263 sst->ops->free(sst);
1264
1265 return NULL;
1266}
1267EXPORT_SYMBOL_GPL(sst_dsp_new);
1268
1269void sst_dsp_free(struct sst_dsp *sst)
1270{
1271 free_irq(sst->irq, sst);
1272 if (sst->ops->free)
1273 sst->ops->free(sst);
1274
1275 sst_dma_free(sst->dma);
1276}
1277EXPORT_SYMBOL_GPL(sst_dsp_free);
1278
1279MODULE_DESCRIPTION("Intel SST Firmware Loader");
1280MODULE_LICENSE("GPL v2");
1281