1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/init.h>
14#include <linux/iopoll.h>
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/bitops.h>
18#include <linux/mm.h>
19#include <linux/interrupt.h>
20#include <linux/clk.h>
21#include <linux/delay.h>
22#include <linux/sched.h>
23#include <linux/semaphore.h>
24#include <linux/spinlock.h>
25#include <linux/device.h>
26#include <linux/dma-mapping.h>
27#include <linux/firmware.h>
28#include <linux/slab.h>
29#include <linux/platform_device.h>
30#include <linux/dmaengine.h>
31#include <linux/of.h>
32#include <linux/of_address.h>
33#include <linux/of_device.h>
34#include <linux/of_dma.h>
35#include <linux/workqueue.h>
36
37#include <asm/irq.h>
38#include <linux/platform_data/dma-imx-sdma.h>
39#include <linux/platform_data/dma-imx.h>
40#include <linux/regmap.h>
41#include <linux/mfd/syscon.h>
42#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
43
44#include "dmaengine.h"
45#include "virt-dma.h"
46
47
48#define SDMA_H_C0PTR 0x000
49#define SDMA_H_INTR 0x004
50#define SDMA_H_STATSTOP 0x008
51#define SDMA_H_START 0x00c
52#define SDMA_H_EVTOVR 0x010
53#define SDMA_H_DSPOVR 0x014
54#define SDMA_H_HOSTOVR 0x018
55#define SDMA_H_EVTPEND 0x01c
56#define SDMA_H_DSPENBL 0x020
57#define SDMA_H_RESET 0x024
58#define SDMA_H_EVTERR 0x028
59#define SDMA_H_INTRMSK 0x02c
60#define SDMA_H_PSW 0x030
61#define SDMA_H_EVTERRDBG 0x034
62#define SDMA_H_CONFIG 0x038
63#define SDMA_ONCE_ENB 0x040
64#define SDMA_ONCE_DATA 0x044
65#define SDMA_ONCE_INSTR 0x048
66#define SDMA_ONCE_STAT 0x04c
67#define SDMA_ONCE_CMD 0x050
68#define SDMA_EVT_MIRROR 0x054
69#define SDMA_ILLINSTADDR 0x058
70#define SDMA_CHN0ADDR 0x05c
71#define SDMA_ONCE_RTB 0x060
72#define SDMA_XTRIG_CONF1 0x070
73#define SDMA_XTRIG_CONF2 0x074
74#define SDMA_CHNENBL0_IMX35 0x200
75#define SDMA_CHNENBL0_IMX31 0x080
76#define SDMA_CHNPRI_0 0x100
77
78
79
80
81#define BD_DONE 0x01
82#define BD_WRAP 0x02
83#define BD_CONT 0x04
84#define BD_INTR 0x08
85#define BD_RROR 0x10
86#define BD_LAST 0x20
87#define BD_EXTD 0x80
88
89
90
91
92#define DND_END_OF_FRAME 0x80
93#define DND_END_OF_XFER 0x40
94#define DND_DONE 0x20
95#define DND_UNUSED 0x01
96
97
98
99
100#define BD_IPCV2_END_OF_FRAME 0x40
101
102#define IPCV2_MAX_NODES 50
103
104
105
106
107#define DATA_ERROR 0x10000000
108
109
110
111
112#define C0_ADDR 0x01
113#define C0_LOAD 0x02
114#define C0_DUMP 0x03
115#define C0_SETCTX 0x07
116#define C0_GETCTX 0x03
117#define C0_SETDM 0x01
118#define C0_SETPM 0x04
119#define C0_GETDM 0x02
120#define C0_GETPM 0x08
121
122
123
124#define CHANGE_ENDIANNESS 0x80
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165#define SDMA_WATERMARK_LEVEL_LWML 0xFF
166#define SDMA_WATERMARK_LEVEL_PS BIT(8)
167#define SDMA_WATERMARK_LEVEL_PA BIT(9)
168#define SDMA_WATERMARK_LEVEL_SPDIF BIT(10)
169#define SDMA_WATERMARK_LEVEL_SP BIT(11)
170#define SDMA_WATERMARK_LEVEL_DP BIT(12)
171#define SDMA_WATERMARK_LEVEL_HWML (0xFF << 16)
172#define SDMA_WATERMARK_LEVEL_LWE BIT(28)
173#define SDMA_WATERMARK_LEVEL_HWE BIT(29)
174#define SDMA_WATERMARK_LEVEL_CONT BIT(31)
175
176#define SDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
177 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
178 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
179
180#define SDMA_DMA_DIRECTIONS (BIT(DMA_DEV_TO_MEM) | \
181 BIT(DMA_MEM_TO_DEV) | \
182 BIT(DMA_DEV_TO_DEV))
183
184
185
186
187struct sdma_mode_count {
188#define SDMA_BD_MAX_CNT 0xffff
189 u32 count : 16;
190 u32 status : 8;
191 u32 command : 8;
192};
193
194
195
196
197struct sdma_buffer_descriptor {
198 struct sdma_mode_count mode;
199 u32 buffer_addr;
200 u32 ext_buffer_addr;
201} __attribute__ ((packed));
202
203
204
205
206
207
208
209
210
211struct sdma_channel_control {
212 u32 current_bd_ptr;
213 u32 base_bd_ptr;
214 u32 unused[2];
215} __attribute__ ((packed));
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232struct sdma_state_registers {
233 u32 pc :14;
234 u32 unused1: 1;
235 u32 t : 1;
236 u32 rpc :14;
237 u32 unused0: 1;
238 u32 sf : 1;
239 u32 spc :14;
240 u32 unused2: 1;
241 u32 df : 1;
242 u32 epc :14;
243 u32 lm : 2;
244} __attribute__ ((packed));
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274struct sdma_context_data {
275 struct sdma_state_registers channel_state;
276 u32 gReg[8];
277 u32 mda;
278 u32 msa;
279 u32 ms;
280 u32 md;
281 u32 pda;
282 u32 psa;
283 u32 ps;
284 u32 pd;
285 u32 ca;
286 u32 cs;
287 u32 dda;
288 u32 dsa;
289 u32 ds;
290 u32 dd;
291 u32 scratch0;
292 u32 scratch1;
293 u32 scratch2;
294 u32 scratch3;
295 u32 scratch4;
296 u32 scratch5;
297 u32 scratch6;
298 u32 scratch7;
299} __attribute__ ((packed));
300
301
302struct sdma_engine;
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317struct sdma_desc {
318 struct virt_dma_desc vd;
319 unsigned int num_bd;
320 dma_addr_t bd_phys;
321 unsigned int buf_tail;
322 unsigned int buf_ptail;
323 unsigned int period_len;
324 unsigned int chn_real_count;
325 unsigned int chn_count;
326 struct sdma_channel *sdmac;
327 struct sdma_buffer_descriptor *bd;
328};
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360struct sdma_channel {
361 struct virt_dma_chan vc;
362 struct sdma_desc *desc;
363 struct sdma_engine *sdma;
364 unsigned int channel;
365 enum dma_transfer_direction direction;
366 struct dma_slave_config slave_config;
367 enum sdma_peripheral_type peripheral_type;
368 unsigned int event_id0;
369 unsigned int event_id1;
370 enum dma_slave_buswidth word_size;
371 unsigned int pc_from_device, pc_to_device;
372 unsigned int device_to_device;
373 unsigned int pc_to_pc;
374 unsigned long flags;
375 dma_addr_t per_address, per_address2;
376 unsigned long event_mask[2];
377 unsigned long watermark_level;
378 u32 shp_addr, per_addr;
379 enum dma_status status;
380 bool context_loaded;
381 struct imx_dma_data data;
382 struct work_struct terminate_worker;
383};
384
385#define IMX_DMA_SG_LOOP BIT(0)
386
387#define MAX_DMA_CHANNELS 32
388#define MXC_SDMA_DEFAULT_PRIORITY 1
389#define MXC_SDMA_MIN_PRIORITY 1
390#define MXC_SDMA_MAX_PRIORITY 7
391
392#define SDMA_FIRMWARE_MAGIC 0x414d4453
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408struct sdma_firmware_header {
409 u32 magic;
410 u32 version_major;
411 u32 version_minor;
412 u32 script_addrs_start;
413 u32 num_script_addrs;
414 u32 ram_code_start;
415 u32 ram_code_size;
416};
417
418struct sdma_driver_data {
419 int chnenbl0;
420 int num_events;
421 struct sdma_script_start_addrs *script_addrs;
422 bool check_ratio;
423};
424
425struct sdma_engine {
426 struct device *dev;
427 struct device_dma_parameters dma_parms;
428 struct sdma_channel channel[MAX_DMA_CHANNELS];
429 struct sdma_channel_control *channel_control;
430 void __iomem *regs;
431 struct sdma_context_data *context;
432 dma_addr_t context_phys;
433 struct dma_device dma_device;
434 struct clk *clk_ipg;
435 struct clk *clk_ahb;
436 spinlock_t channel_0_lock;
437 u32 script_number;
438 struct sdma_script_start_addrs *script_addrs;
439 const struct sdma_driver_data *drvdata;
440 u32 spba_start_addr;
441 u32 spba_end_addr;
442 unsigned int irq;
443 dma_addr_t bd0_phys;
444 struct sdma_buffer_descriptor *bd0;
445
446 bool clk_ratio;
447};
448
449static int sdma_config_write(struct dma_chan *chan,
450 struct dma_slave_config *dmaengine_cfg,
451 enum dma_transfer_direction direction);
452
453static struct sdma_driver_data sdma_imx31 = {
454 .chnenbl0 = SDMA_CHNENBL0_IMX31,
455 .num_events = 32,
456};
457
458static struct sdma_script_start_addrs sdma_script_imx25 = {
459 .ap_2_ap_addr = 729,
460 .uart_2_mcu_addr = 904,
461 .per_2_app_addr = 1255,
462 .mcu_2_app_addr = 834,
463 .uartsh_2_mcu_addr = 1120,
464 .per_2_shp_addr = 1329,
465 .mcu_2_shp_addr = 1048,
466 .ata_2_mcu_addr = 1560,
467 .mcu_2_ata_addr = 1479,
468 .app_2_per_addr = 1189,
469 .app_2_mcu_addr = 770,
470 .shp_2_per_addr = 1407,
471 .shp_2_mcu_addr = 979,
472};
473
474static struct sdma_driver_data sdma_imx25 = {
475 .chnenbl0 = SDMA_CHNENBL0_IMX35,
476 .num_events = 48,
477 .script_addrs = &sdma_script_imx25,
478};
479
480static struct sdma_driver_data sdma_imx35 = {
481 .chnenbl0 = SDMA_CHNENBL0_IMX35,
482 .num_events = 48,
483};
484
485static struct sdma_script_start_addrs sdma_script_imx51 = {
486 .ap_2_ap_addr = 642,
487 .uart_2_mcu_addr = 817,
488 .mcu_2_app_addr = 747,
489 .mcu_2_shp_addr = 961,
490 .ata_2_mcu_addr = 1473,
491 .mcu_2_ata_addr = 1392,
492 .app_2_per_addr = 1033,
493 .app_2_mcu_addr = 683,
494 .shp_2_per_addr = 1251,
495 .shp_2_mcu_addr = 892,
496};
497
498static struct sdma_driver_data sdma_imx51 = {
499 .chnenbl0 = SDMA_CHNENBL0_IMX35,
500 .num_events = 48,
501 .script_addrs = &sdma_script_imx51,
502};
503
504static struct sdma_script_start_addrs sdma_script_imx53 = {
505 .ap_2_ap_addr = 642,
506 .app_2_mcu_addr = 683,
507 .mcu_2_app_addr = 747,
508 .uart_2_mcu_addr = 817,
509 .shp_2_mcu_addr = 891,
510 .mcu_2_shp_addr = 960,
511 .uartsh_2_mcu_addr = 1032,
512 .spdif_2_mcu_addr = 1100,
513 .mcu_2_spdif_addr = 1134,
514 .firi_2_mcu_addr = 1193,
515 .mcu_2_firi_addr = 1290,
516};
517
518static struct sdma_driver_data sdma_imx53 = {
519 .chnenbl0 = SDMA_CHNENBL0_IMX35,
520 .num_events = 48,
521 .script_addrs = &sdma_script_imx53,
522};
523
524static struct sdma_script_start_addrs sdma_script_imx6q = {
525 .ap_2_ap_addr = 642,
526 .uart_2_mcu_addr = 817,
527 .mcu_2_app_addr = 747,
528 .per_2_per_addr = 6331,
529 .uartsh_2_mcu_addr = 1032,
530 .mcu_2_shp_addr = 960,
531 .app_2_mcu_addr = 683,
532 .shp_2_mcu_addr = 891,
533 .spdif_2_mcu_addr = 1100,
534 .mcu_2_spdif_addr = 1134,
535};
536
537static struct sdma_driver_data sdma_imx6q = {
538 .chnenbl0 = SDMA_CHNENBL0_IMX35,
539 .num_events = 48,
540 .script_addrs = &sdma_script_imx6q,
541};
542
543static struct sdma_script_start_addrs sdma_script_imx7d = {
544 .ap_2_ap_addr = 644,
545 .uart_2_mcu_addr = 819,
546 .mcu_2_app_addr = 749,
547 .uartsh_2_mcu_addr = 1034,
548 .mcu_2_shp_addr = 962,
549 .app_2_mcu_addr = 685,
550 .shp_2_mcu_addr = 893,
551 .spdif_2_mcu_addr = 1102,
552 .mcu_2_spdif_addr = 1136,
553};
554
555static struct sdma_driver_data sdma_imx7d = {
556 .chnenbl0 = SDMA_CHNENBL0_IMX35,
557 .num_events = 48,
558 .script_addrs = &sdma_script_imx7d,
559};
560
561static struct sdma_driver_data sdma_imx8mq = {
562 .chnenbl0 = SDMA_CHNENBL0_IMX35,
563 .num_events = 48,
564 .script_addrs = &sdma_script_imx7d,
565 .check_ratio = 1,
566};
567
568static const struct platform_device_id sdma_devtypes[] = {
569 {
570 .name = "imx25-sdma",
571 .driver_data = (unsigned long)&sdma_imx25,
572 }, {
573 .name = "imx31-sdma",
574 .driver_data = (unsigned long)&sdma_imx31,
575 }, {
576 .name = "imx35-sdma",
577 .driver_data = (unsigned long)&sdma_imx35,
578 }, {
579 .name = "imx51-sdma",
580 .driver_data = (unsigned long)&sdma_imx51,
581 }, {
582 .name = "imx53-sdma",
583 .driver_data = (unsigned long)&sdma_imx53,
584 }, {
585 .name = "imx6q-sdma",
586 .driver_data = (unsigned long)&sdma_imx6q,
587 }, {
588 .name = "imx7d-sdma",
589 .driver_data = (unsigned long)&sdma_imx7d,
590 }, {
591 .name = "imx8mq-sdma",
592 .driver_data = (unsigned long)&sdma_imx8mq,
593 }, {
594
595 }
596};
597MODULE_DEVICE_TABLE(platform, sdma_devtypes);
598
599static const struct of_device_id sdma_dt_ids[] = {
600 { .compatible = "fsl,imx6q-sdma", .data = &sdma_imx6q, },
601 { .compatible = "fsl,imx53-sdma", .data = &sdma_imx53, },
602 { .compatible = "fsl,imx51-sdma", .data = &sdma_imx51, },
603 { .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, },
604 { .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
605 { .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, },
606 { .compatible = "fsl,imx7d-sdma", .data = &sdma_imx7d, },
607 { .compatible = "fsl,imx8mq-sdma", .data = &sdma_imx8mq, },
608 { }
609};
610MODULE_DEVICE_TABLE(of, sdma_dt_ids);
611
612#define SDMA_H_CONFIG_DSPDMA BIT(12)
613#define SDMA_H_CONFIG_RTD_PINS BIT(11)
614#define SDMA_H_CONFIG_ACR BIT(4)
615#define SDMA_H_CONFIG_CSM (3)
616
617static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
618{
619 u32 chnenbl0 = sdma->drvdata->chnenbl0;
620 return chnenbl0 + event * 4;
621}
622
623static int sdma_config_ownership(struct sdma_channel *sdmac,
624 bool event_override, bool mcu_override, bool dsp_override)
625{
626 struct sdma_engine *sdma = sdmac->sdma;
627 int channel = sdmac->channel;
628 unsigned long evt, mcu, dsp;
629
630 if (event_override && mcu_override && dsp_override)
631 return -EINVAL;
632
633 evt = readl_relaxed(sdma->regs + SDMA_H_EVTOVR);
634 mcu = readl_relaxed(sdma->regs + SDMA_H_HOSTOVR);
635 dsp = readl_relaxed(sdma->regs + SDMA_H_DSPOVR);
636
637 if (dsp_override)
638 __clear_bit(channel, &dsp);
639 else
640 __set_bit(channel, &dsp);
641
642 if (event_override)
643 __clear_bit(channel, &evt);
644 else
645 __set_bit(channel, &evt);
646
647 if (mcu_override)
648 __clear_bit(channel, &mcu);
649 else
650 __set_bit(channel, &mcu);
651
652 writel_relaxed(evt, sdma->regs + SDMA_H_EVTOVR);
653 writel_relaxed(mcu, sdma->regs + SDMA_H_HOSTOVR);
654 writel_relaxed(dsp, sdma->regs + SDMA_H_DSPOVR);
655
656 return 0;
657}
658
659static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
660{
661 writel(BIT(channel), sdma->regs + SDMA_H_START);
662}
663
664
665
666
667static int sdma_run_channel0(struct sdma_engine *sdma)
668{
669 int ret;
670 u32 reg;
671
672 sdma_enable_channel(sdma, 0);
673
674 ret = readl_relaxed_poll_timeout_atomic(sdma->regs + SDMA_H_STATSTOP,
675 reg, !(reg & 1), 1, 500);
676 if (ret)
677 dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
678
679
680 reg = readl(sdma->regs + SDMA_H_CONFIG);
681 if ((reg & SDMA_H_CONFIG_CSM) == 0) {
682 reg |= SDMA_H_CONFIG_CSM;
683 writel_relaxed(reg, sdma->regs + SDMA_H_CONFIG);
684 }
685
686 return ret;
687}
688
689static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
690 u32 address)
691{
692 struct sdma_buffer_descriptor *bd0 = sdma->bd0;
693 void *buf_virt;
694 dma_addr_t buf_phys;
695 int ret;
696 unsigned long flags;
697
698 buf_virt = dma_alloc_coherent(sdma->dev, size, &buf_phys, GFP_KERNEL);
699 if (!buf_virt) {
700 return -ENOMEM;
701 }
702
703 spin_lock_irqsave(&sdma->channel_0_lock, flags);
704
705 bd0->mode.command = C0_SETPM;
706 bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD;
707 bd0->mode.count = size / 2;
708 bd0->buffer_addr = buf_phys;
709 bd0->ext_buffer_addr = address;
710
711 memcpy(buf_virt, buf, size);
712
713 ret = sdma_run_channel0(sdma);
714
715 spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
716
717 dma_free_coherent(sdma->dev, size, buf_virt, buf_phys);
718
719 return ret;
720}
721
722static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event)
723{
724 struct sdma_engine *sdma = sdmac->sdma;
725 int channel = sdmac->channel;
726 unsigned long val;
727 u32 chnenbl = chnenbl_ofs(sdma, event);
728
729 val = readl_relaxed(sdma->regs + chnenbl);
730 __set_bit(channel, &val);
731 writel_relaxed(val, sdma->regs + chnenbl);
732}
733
734static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
735{
736 struct sdma_engine *sdma = sdmac->sdma;
737 int channel = sdmac->channel;
738 u32 chnenbl = chnenbl_ofs(sdma, event);
739 unsigned long val;
740
741 val = readl_relaxed(sdma->regs + chnenbl);
742 __clear_bit(channel, &val);
743 writel_relaxed(val, sdma->regs + chnenbl);
744}
745
746static struct sdma_desc *to_sdma_desc(struct dma_async_tx_descriptor *t)
747{
748 return container_of(t, struct sdma_desc, vd.tx);
749}
750
751static void sdma_start_desc(struct sdma_channel *sdmac)
752{
753 struct virt_dma_desc *vd = vchan_next_desc(&sdmac->vc);
754 struct sdma_desc *desc;
755 struct sdma_engine *sdma = sdmac->sdma;
756 int channel = sdmac->channel;
757
758 if (!vd) {
759 sdmac->desc = NULL;
760 return;
761 }
762 sdmac->desc = desc = to_sdma_desc(&vd->tx);
763
764
765
766
767 if (!(sdmac->flags & IMX_DMA_SG_LOOP))
768 list_del(&vd->node);
769
770 sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
771 sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
772 sdma_enable_channel(sdma, sdmac->channel);
773}
774
775static void sdma_update_channel_loop(struct sdma_channel *sdmac)
776{
777 struct sdma_buffer_descriptor *bd;
778 int error = 0;
779 enum dma_status old_status = sdmac->status;
780
781
782
783
784
785 while (sdmac->desc) {
786 struct sdma_desc *desc = sdmac->desc;
787
788 bd = &desc->bd[desc->buf_tail];
789
790 if (bd->mode.status & BD_DONE)
791 break;
792
793 if (bd->mode.status & BD_RROR) {
794 bd->mode.status &= ~BD_RROR;
795 sdmac->status = DMA_ERROR;
796 error = -EIO;
797 }
798
799
800
801
802
803
804 desc->chn_real_count = bd->mode.count;
805 bd->mode.status |= BD_DONE;
806 bd->mode.count = desc->period_len;
807 desc->buf_ptail = desc->buf_tail;
808 desc->buf_tail = (desc->buf_tail + 1) % desc->num_bd;
809
810
811
812
813
814
815
816 spin_unlock(&sdmac->vc.lock);
817 dmaengine_desc_get_callback_invoke(&desc->vd.tx, NULL);
818 spin_lock(&sdmac->vc.lock);
819
820 if (error)
821 sdmac->status = old_status;
822 }
823}
824
825static void mxc_sdma_handle_channel_normal(struct sdma_channel *data)
826{
827 struct sdma_channel *sdmac = (struct sdma_channel *) data;
828 struct sdma_buffer_descriptor *bd;
829 int i, error = 0;
830
831 sdmac->desc->chn_real_count = 0;
832
833
834
835
836 for (i = 0; i < sdmac->desc->num_bd; i++) {
837 bd = &sdmac->desc->bd[i];
838
839 if (bd->mode.status & (BD_DONE | BD_RROR))
840 error = -EIO;
841 sdmac->desc->chn_real_count += bd->mode.count;
842 }
843
844 if (error)
845 sdmac->status = DMA_ERROR;
846 else
847 sdmac->status = DMA_COMPLETE;
848}
849
850static irqreturn_t sdma_int_handler(int irq, void *dev_id)
851{
852 struct sdma_engine *sdma = dev_id;
853 unsigned long stat;
854
855 stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
856 writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
857
858 stat &= ~1;
859
860 while (stat) {
861 int channel = fls(stat) - 1;
862 struct sdma_channel *sdmac = &sdma->channel[channel];
863 struct sdma_desc *desc;
864
865 spin_lock(&sdmac->vc.lock);
866 desc = sdmac->desc;
867 if (desc) {
868 if (sdmac->flags & IMX_DMA_SG_LOOP) {
869 sdma_update_channel_loop(sdmac);
870 } else {
871 mxc_sdma_handle_channel_normal(sdmac);
872 vchan_cookie_complete(&desc->vd);
873 sdma_start_desc(sdmac);
874 }
875 }
876
877 spin_unlock(&sdmac->vc.lock);
878 __clear_bit(channel, &stat);
879 }
880
881 return IRQ_HANDLED;
882}
883
884
885
886
887static void sdma_get_pc(struct sdma_channel *sdmac,
888 enum sdma_peripheral_type peripheral_type)
889{
890 struct sdma_engine *sdma = sdmac->sdma;
891 int per_2_emi = 0, emi_2_per = 0;
892
893
894
895
896 int per_2_per = 0, emi_2_emi = 0;
897
898 sdmac->pc_from_device = 0;
899 sdmac->pc_to_device = 0;
900 sdmac->device_to_device = 0;
901 sdmac->pc_to_pc = 0;
902
903 switch (peripheral_type) {
904 case IMX_DMATYPE_MEMORY:
905 emi_2_emi = sdma->script_addrs->ap_2_ap_addr;
906 break;
907 case IMX_DMATYPE_DSP:
908 emi_2_per = sdma->script_addrs->bp_2_ap_addr;
909 per_2_emi = sdma->script_addrs->ap_2_bp_addr;
910 break;
911 case IMX_DMATYPE_FIRI:
912 per_2_emi = sdma->script_addrs->firi_2_mcu_addr;
913 emi_2_per = sdma->script_addrs->mcu_2_firi_addr;
914 break;
915 case IMX_DMATYPE_UART:
916 per_2_emi = sdma->script_addrs->uart_2_mcu_addr;
917 emi_2_per = sdma->script_addrs->mcu_2_app_addr;
918 break;
919 case IMX_DMATYPE_UART_SP:
920 per_2_emi = sdma->script_addrs->uartsh_2_mcu_addr;
921 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
922 break;
923 case IMX_DMATYPE_ATA:
924 per_2_emi = sdma->script_addrs->ata_2_mcu_addr;
925 emi_2_per = sdma->script_addrs->mcu_2_ata_addr;
926 break;
927 case IMX_DMATYPE_CSPI:
928 case IMX_DMATYPE_EXT:
929 case IMX_DMATYPE_SSI:
930 case IMX_DMATYPE_SAI:
931 per_2_emi = sdma->script_addrs->app_2_mcu_addr;
932 emi_2_per = sdma->script_addrs->mcu_2_app_addr;
933 break;
934 case IMX_DMATYPE_SSI_DUAL:
935 per_2_emi = sdma->script_addrs->ssish_2_mcu_addr;
936 emi_2_per = sdma->script_addrs->mcu_2_ssish_addr;
937 break;
938 case IMX_DMATYPE_SSI_SP:
939 case IMX_DMATYPE_MMC:
940 case IMX_DMATYPE_SDHC:
941 case IMX_DMATYPE_CSPI_SP:
942 case IMX_DMATYPE_ESAI:
943 case IMX_DMATYPE_MSHC_SP:
944 per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
945 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
946 break;
947 case IMX_DMATYPE_ASRC:
948 per_2_emi = sdma->script_addrs->asrc_2_mcu_addr;
949 emi_2_per = sdma->script_addrs->asrc_2_mcu_addr;
950 per_2_per = sdma->script_addrs->per_2_per_addr;
951 break;
952 case IMX_DMATYPE_ASRC_SP:
953 per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
954 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
955 per_2_per = sdma->script_addrs->per_2_per_addr;
956 break;
957 case IMX_DMATYPE_MSHC:
958 per_2_emi = sdma->script_addrs->mshc_2_mcu_addr;
959 emi_2_per = sdma->script_addrs->mcu_2_mshc_addr;
960 break;
961 case IMX_DMATYPE_CCM:
962 per_2_emi = sdma->script_addrs->dptc_dvfs_addr;
963 break;
964 case IMX_DMATYPE_SPDIF:
965 per_2_emi = sdma->script_addrs->spdif_2_mcu_addr;
966 emi_2_per = sdma->script_addrs->mcu_2_spdif_addr;
967 break;
968 case IMX_DMATYPE_IPU_MEMORY:
969 emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr;
970 break;
971 default:
972 break;
973 }
974
975 sdmac->pc_from_device = per_2_emi;
976 sdmac->pc_to_device = emi_2_per;
977 sdmac->device_to_device = per_2_per;
978 sdmac->pc_to_pc = emi_2_emi;
979}
980
981static int sdma_load_context(struct sdma_channel *sdmac)
982{
983 struct sdma_engine *sdma = sdmac->sdma;
984 int channel = sdmac->channel;
985 int load_address;
986 struct sdma_context_data *context = sdma->context;
987 struct sdma_buffer_descriptor *bd0 = sdma->bd0;
988 int ret;
989 unsigned long flags;
990
991 if (sdmac->context_loaded)
992 return 0;
993
994 if (sdmac->direction == DMA_DEV_TO_MEM)
995 load_address = sdmac->pc_from_device;
996 else if (sdmac->direction == DMA_DEV_TO_DEV)
997 load_address = sdmac->device_to_device;
998 else if (sdmac->direction == DMA_MEM_TO_MEM)
999 load_address = sdmac->pc_to_pc;
1000 else
1001 load_address = sdmac->pc_to_device;
1002
1003 if (load_address < 0)
1004 return load_address;
1005
1006 dev_dbg(sdma->dev, "load_address = %d\n", load_address);
1007 dev_dbg(sdma->dev, "wml = 0x%08x\n", (u32)sdmac->watermark_level);
1008 dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr);
1009 dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr);
1010 dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]);
1011 dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]);
1012
1013 spin_lock_irqsave(&sdma->channel_0_lock, flags);
1014
1015 memset(context, 0, sizeof(*context));
1016 context->channel_state.pc = load_address;
1017
1018
1019
1020
1021 context->gReg[0] = sdmac->event_mask[1];
1022 context->gReg[1] = sdmac->event_mask[0];
1023 context->gReg[2] = sdmac->per_addr;
1024 context->gReg[6] = sdmac->shp_addr;
1025 context->gReg[7] = sdmac->watermark_level;
1026
1027 bd0->mode.command = C0_SETDM;
1028 bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD;
1029 bd0->mode.count = sizeof(*context) / 4;
1030 bd0->buffer_addr = sdma->context_phys;
1031 bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
1032 ret = sdma_run_channel0(sdma);
1033
1034 spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
1035
1036 sdmac->context_loaded = true;
1037
1038 return ret;
1039}
1040
1041static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
1042{
1043 return container_of(chan, struct sdma_channel, vc.chan);
1044}
1045
1046static int sdma_disable_channel(struct dma_chan *chan)
1047{
1048 struct sdma_channel *sdmac = to_sdma_chan(chan);
1049 struct sdma_engine *sdma = sdmac->sdma;
1050 int channel = sdmac->channel;
1051
1052 writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP);
1053 sdmac->status = DMA_ERROR;
1054
1055 return 0;
1056}
1057static void sdma_channel_terminate_work(struct work_struct *work)
1058{
1059 struct sdma_channel *sdmac = container_of(work, struct sdma_channel,
1060 terminate_worker);
1061 unsigned long flags;
1062 LIST_HEAD(head);
1063
1064
1065
1066
1067
1068
1069
1070 usleep_range(1000, 2000);
1071
1072 spin_lock_irqsave(&sdmac->vc.lock, flags);
1073 vchan_get_all_descriptors(&sdmac->vc, &head);
1074 sdmac->desc = NULL;
1075 spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1076 vchan_dma_desc_free_list(&sdmac->vc, &head);
1077 sdmac->context_loaded = false;
1078}
1079
1080static int sdma_disable_channel_async(struct dma_chan *chan)
1081{
1082 struct sdma_channel *sdmac = to_sdma_chan(chan);
1083
1084 sdma_disable_channel(chan);
1085
1086 if (sdmac->desc)
1087 schedule_work(&sdmac->terminate_worker);
1088
1089 return 0;
1090}
1091
1092static void sdma_channel_synchronize(struct dma_chan *chan)
1093{
1094 struct sdma_channel *sdmac = to_sdma_chan(chan);
1095
1096 vchan_synchronize(&sdmac->vc);
1097
1098 flush_work(&sdmac->terminate_worker);
1099}
1100
1101static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
1102{
1103 struct sdma_engine *sdma = sdmac->sdma;
1104
1105 int lwml = sdmac->watermark_level & SDMA_WATERMARK_LEVEL_LWML;
1106 int hwml = (sdmac->watermark_level & SDMA_WATERMARK_LEVEL_HWML) >> 16;
1107
1108 set_bit(sdmac->event_id0 % 32, &sdmac->event_mask[1]);
1109 set_bit(sdmac->event_id1 % 32, &sdmac->event_mask[0]);
1110
1111 if (sdmac->event_id0 > 31)
1112 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_LWE;
1113
1114 if (sdmac->event_id1 > 31)
1115 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_HWE;
1116
1117
1118
1119
1120
1121
1122 if (lwml > hwml) {
1123 sdmac->watermark_level &= ~(SDMA_WATERMARK_LEVEL_LWML |
1124 SDMA_WATERMARK_LEVEL_HWML);
1125 sdmac->watermark_level |= hwml;
1126 sdmac->watermark_level |= lwml << 16;
1127 swap(sdmac->event_mask[0], sdmac->event_mask[1]);
1128 }
1129
1130 if (sdmac->per_address2 >= sdma->spba_start_addr &&
1131 sdmac->per_address2 <= sdma->spba_end_addr)
1132 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SP;
1133
1134 if (sdmac->per_address >= sdma->spba_start_addr &&
1135 sdmac->per_address <= sdma->spba_end_addr)
1136 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DP;
1137
1138 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_CONT;
1139}
1140
1141static int sdma_config_channel(struct dma_chan *chan)
1142{
1143 struct sdma_channel *sdmac = to_sdma_chan(chan);
1144 int ret;
1145
1146 sdma_disable_channel(chan);
1147
1148 sdmac->event_mask[0] = 0;
1149 sdmac->event_mask[1] = 0;
1150 sdmac->shp_addr = 0;
1151 sdmac->per_addr = 0;
1152
1153 switch (sdmac->peripheral_type) {
1154 case IMX_DMATYPE_DSP:
1155 sdma_config_ownership(sdmac, false, true, true);
1156 break;
1157 case IMX_DMATYPE_MEMORY:
1158 sdma_config_ownership(sdmac, false, true, false);
1159 break;
1160 default:
1161 sdma_config_ownership(sdmac, true, true, false);
1162 break;
1163 }
1164
1165 sdma_get_pc(sdmac, sdmac->peripheral_type);
1166
1167 if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) &&
1168 (sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
1169
1170 if (sdmac->event_id1) {
1171 if (sdmac->peripheral_type == IMX_DMATYPE_ASRC_SP ||
1172 sdmac->peripheral_type == IMX_DMATYPE_ASRC)
1173 sdma_set_watermarklevel_for_p2p(sdmac);
1174 } else
1175 __set_bit(sdmac->event_id0, sdmac->event_mask);
1176
1177
1178 sdmac->shp_addr = sdmac->per_address;
1179 sdmac->per_addr = sdmac->per_address2;
1180 } else {
1181 sdmac->watermark_level = 0;
1182 }
1183
1184 ret = sdma_load_context(sdmac);
1185
1186 return ret;
1187}
1188
1189static int sdma_set_channel_priority(struct sdma_channel *sdmac,
1190 unsigned int priority)
1191{
1192 struct sdma_engine *sdma = sdmac->sdma;
1193 int channel = sdmac->channel;
1194
1195 if (priority < MXC_SDMA_MIN_PRIORITY
1196 || priority > MXC_SDMA_MAX_PRIORITY) {
1197 return -EINVAL;
1198 }
1199
1200 writel_relaxed(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel);
1201
1202 return 0;
1203}
1204
1205static int sdma_request_channel0(struct sdma_engine *sdma)
1206{
1207 int ret = -EBUSY;
1208
1209 sdma->bd0 = dma_alloc_coherent(sdma->dev, PAGE_SIZE, &sdma->bd0_phys,
1210 GFP_NOWAIT);
1211 if (!sdma->bd0) {
1212 ret = -ENOMEM;
1213 goto out;
1214 }
1215
1216 sdma->channel_control[0].base_bd_ptr = sdma->bd0_phys;
1217 sdma->channel_control[0].current_bd_ptr = sdma->bd0_phys;
1218
1219 sdma_set_channel_priority(&sdma->channel[0], MXC_SDMA_DEFAULT_PRIORITY);
1220 return 0;
1221out:
1222
1223 return ret;
1224}
1225
1226
1227static int sdma_alloc_bd(struct sdma_desc *desc)
1228{
1229 u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
1230 int ret = 0;
1231
1232 desc->bd = dma_alloc_coherent(desc->sdmac->sdma->dev, bd_size,
1233 &desc->bd_phys, GFP_NOWAIT);
1234 if (!desc->bd) {
1235 ret = -ENOMEM;
1236 goto out;
1237 }
1238out:
1239 return ret;
1240}
1241
1242static void sdma_free_bd(struct sdma_desc *desc)
1243{
1244 u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
1245
1246 dma_free_coherent(desc->sdmac->sdma->dev, bd_size, desc->bd,
1247 desc->bd_phys);
1248}
1249
1250static void sdma_desc_free(struct virt_dma_desc *vd)
1251{
1252 struct sdma_desc *desc = container_of(vd, struct sdma_desc, vd);
1253
1254 sdma_free_bd(desc);
1255 kfree(desc);
1256}
1257
1258static int sdma_alloc_chan_resources(struct dma_chan *chan)
1259{
1260 struct sdma_channel *sdmac = to_sdma_chan(chan);
1261 struct imx_dma_data *data = chan->private;
1262 struct imx_dma_data mem_data;
1263 int prio, ret;
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274 if (!data) {
1275 dev_dbg(sdmac->sdma->dev, "MEMCPY in case?\n");
1276 mem_data.priority = 2;
1277 mem_data.peripheral_type = IMX_DMATYPE_MEMORY;
1278 mem_data.dma_request = 0;
1279 mem_data.dma_request2 = 0;
1280 data = &mem_data;
1281
1282 sdma_get_pc(sdmac, IMX_DMATYPE_MEMORY);
1283 }
1284
1285 switch (data->priority) {
1286 case DMA_PRIO_HIGH:
1287 prio = 3;
1288 break;
1289 case DMA_PRIO_MEDIUM:
1290 prio = 2;
1291 break;
1292 case DMA_PRIO_LOW:
1293 default:
1294 prio = 1;
1295 break;
1296 }
1297
1298 sdmac->peripheral_type = data->peripheral_type;
1299 sdmac->event_id0 = data->dma_request;
1300 sdmac->event_id1 = data->dma_request2;
1301
1302 ret = clk_enable(sdmac->sdma->clk_ipg);
1303 if (ret)
1304 return ret;
1305 ret = clk_enable(sdmac->sdma->clk_ahb);
1306 if (ret)
1307 goto disable_clk_ipg;
1308
1309 ret = sdma_set_channel_priority(sdmac, prio);
1310 if (ret)
1311 goto disable_clk_ahb;
1312
1313 return 0;
1314
1315disable_clk_ahb:
1316 clk_disable(sdmac->sdma->clk_ahb);
1317disable_clk_ipg:
1318 clk_disable(sdmac->sdma->clk_ipg);
1319 return ret;
1320}
1321
1322static void sdma_free_chan_resources(struct dma_chan *chan)
1323{
1324 struct sdma_channel *sdmac = to_sdma_chan(chan);
1325 struct sdma_engine *sdma = sdmac->sdma;
1326
1327 sdma_disable_channel_async(chan);
1328
1329 sdma_channel_synchronize(chan);
1330
1331 if (sdmac->event_id0)
1332 sdma_event_disable(sdmac, sdmac->event_id0);
1333 if (sdmac->event_id1)
1334 sdma_event_disable(sdmac, sdmac->event_id1);
1335
1336 sdmac->event_id0 = 0;
1337 sdmac->event_id1 = 0;
1338
1339 sdma_set_channel_priority(sdmac, 0);
1340
1341 clk_disable(sdma->clk_ipg);
1342 clk_disable(sdma->clk_ahb);
1343}
1344
1345static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
1346 enum dma_transfer_direction direction, u32 bds)
1347{
1348 struct sdma_desc *desc;
1349
1350 desc = kzalloc((sizeof(*desc)), GFP_NOWAIT);
1351 if (!desc)
1352 goto err_out;
1353
1354 sdmac->status = DMA_IN_PROGRESS;
1355 sdmac->direction = direction;
1356 sdmac->flags = 0;
1357
1358 desc->chn_count = 0;
1359 desc->chn_real_count = 0;
1360 desc->buf_tail = 0;
1361 desc->buf_ptail = 0;
1362 desc->sdmac = sdmac;
1363 desc->num_bd = bds;
1364
1365 if (sdma_alloc_bd(desc))
1366 goto err_desc_out;
1367
1368
1369 if (direction == DMA_MEM_TO_MEM)
1370 sdma_config_ownership(sdmac, false, true, false);
1371
1372 if (sdma_load_context(sdmac))
1373 goto err_desc_out;
1374
1375 return desc;
1376
1377err_desc_out:
1378 kfree(desc);
1379err_out:
1380 return NULL;
1381}
1382
1383static struct dma_async_tx_descriptor *sdma_prep_memcpy(
1384 struct dma_chan *chan, dma_addr_t dma_dst,
1385 dma_addr_t dma_src, size_t len, unsigned long flags)
1386{
1387 struct sdma_channel *sdmac = to_sdma_chan(chan);
1388 struct sdma_engine *sdma = sdmac->sdma;
1389 int channel = sdmac->channel;
1390 size_t count;
1391 int i = 0, param;
1392 struct sdma_buffer_descriptor *bd;
1393 struct sdma_desc *desc;
1394
1395 if (!chan || !len)
1396 return NULL;
1397
1398 dev_dbg(sdma->dev, "memcpy: %pad->%pad, len=%zu, channel=%d.\n",
1399 &dma_src, &dma_dst, len, channel);
1400
1401 desc = sdma_transfer_init(sdmac, DMA_MEM_TO_MEM,
1402 len / SDMA_BD_MAX_CNT + 1);
1403 if (!desc)
1404 return NULL;
1405
1406 do {
1407 count = min_t(size_t, len, SDMA_BD_MAX_CNT);
1408 bd = &desc->bd[i];
1409 bd->buffer_addr = dma_src;
1410 bd->ext_buffer_addr = dma_dst;
1411 bd->mode.count = count;
1412 desc->chn_count += count;
1413 bd->mode.command = 0;
1414
1415 dma_src += count;
1416 dma_dst += count;
1417 len -= count;
1418 i++;
1419
1420 param = BD_DONE | BD_EXTD | BD_CONT;
1421
1422 if (!len) {
1423 param |= BD_INTR;
1424 param |= BD_LAST;
1425 param &= ~BD_CONT;
1426 }
1427
1428 dev_dbg(sdma->dev, "entry %d: count: %zd dma: 0x%x %s%s\n",
1429 i, count, bd->buffer_addr,
1430 param & BD_WRAP ? "wrap" : "",
1431 param & BD_INTR ? " intr" : "");
1432
1433 bd->mode.status = param;
1434 } while (len);
1435
1436 return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
1437}
1438
1439static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
1440 struct dma_chan *chan, struct scatterlist *sgl,
1441 unsigned int sg_len, enum dma_transfer_direction direction,
1442 unsigned long flags, void *context)
1443{
1444 struct sdma_channel *sdmac = to_sdma_chan(chan);
1445 struct sdma_engine *sdma = sdmac->sdma;
1446 int i, count;
1447 int channel = sdmac->channel;
1448 struct scatterlist *sg;
1449 struct sdma_desc *desc;
1450
1451 sdma_config_write(chan, &sdmac->slave_config, direction);
1452
1453 desc = sdma_transfer_init(sdmac, direction, sg_len);
1454 if (!desc)
1455 goto err_out;
1456
1457 dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
1458 sg_len, channel);
1459
1460 for_each_sg(sgl, sg, sg_len, i) {
1461 struct sdma_buffer_descriptor *bd = &desc->bd[i];
1462 int param;
1463
1464 bd->buffer_addr = sg->dma_address;
1465
1466 count = sg_dma_len(sg);
1467
1468 if (count > SDMA_BD_MAX_CNT) {
1469 dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
1470 channel, count, SDMA_BD_MAX_CNT);
1471 goto err_bd_out;
1472 }
1473
1474 bd->mode.count = count;
1475 desc->chn_count += count;
1476
1477 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
1478 goto err_bd_out;
1479
1480 switch (sdmac->word_size) {
1481 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1482 bd->mode.command = 0;
1483 if (count & 3 || sg->dma_address & 3)
1484 goto err_bd_out;
1485 break;
1486 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1487 bd->mode.command = 2;
1488 if (count & 1 || sg->dma_address & 1)
1489 goto err_bd_out;
1490 break;
1491 case DMA_SLAVE_BUSWIDTH_1_BYTE:
1492 bd->mode.command = 1;
1493 break;
1494 default:
1495 goto err_bd_out;
1496 }
1497
1498 param = BD_DONE | BD_EXTD | BD_CONT;
1499
1500 if (i + 1 == sg_len) {
1501 param |= BD_INTR;
1502 param |= BD_LAST;
1503 param &= ~BD_CONT;
1504 }
1505
1506 dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n",
1507 i, count, (u64)sg->dma_address,
1508 param & BD_WRAP ? "wrap" : "",
1509 param & BD_INTR ? " intr" : "");
1510
1511 bd->mode.status = param;
1512 }
1513
1514 return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
1515err_bd_out:
1516 sdma_free_bd(desc);
1517 kfree(desc);
1518err_out:
1519 sdmac->status = DMA_ERROR;
1520 return NULL;
1521}
1522
1523static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
1524 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
1525 size_t period_len, enum dma_transfer_direction direction,
1526 unsigned long flags)
1527{
1528 struct sdma_channel *sdmac = to_sdma_chan(chan);
1529 struct sdma_engine *sdma = sdmac->sdma;
1530 int num_periods = buf_len / period_len;
1531 int channel = sdmac->channel;
1532 int i = 0, buf = 0;
1533 struct sdma_desc *desc;
1534
1535 dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
1536
1537 sdma_config_write(chan, &sdmac->slave_config, direction);
1538
1539 desc = sdma_transfer_init(sdmac, direction, num_periods);
1540 if (!desc)
1541 goto err_out;
1542
1543 desc->period_len = period_len;
1544
1545 sdmac->flags |= IMX_DMA_SG_LOOP;
1546
1547 if (period_len > SDMA_BD_MAX_CNT) {
1548 dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %zu > %d\n",
1549 channel, period_len, SDMA_BD_MAX_CNT);
1550 goto err_bd_out;
1551 }
1552
1553 while (buf < buf_len) {
1554 struct sdma_buffer_descriptor *bd = &desc->bd[i];
1555 int param;
1556
1557 bd->buffer_addr = dma_addr;
1558
1559 bd->mode.count = period_len;
1560
1561 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
1562 goto err_bd_out;
1563 if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
1564 bd->mode.command = 0;
1565 else
1566 bd->mode.command = sdmac->word_size;
1567
1568 param = BD_DONE | BD_EXTD | BD_CONT | BD_INTR;
1569 if (i + 1 == num_periods)
1570 param |= BD_WRAP;
1571
1572 dev_dbg(sdma->dev, "entry %d: count: %zu dma: %#llx %s%s\n",
1573 i, period_len, (u64)dma_addr,
1574 param & BD_WRAP ? "wrap" : "",
1575 param & BD_INTR ? " intr" : "");
1576
1577 bd->mode.status = param;
1578
1579 dma_addr += period_len;
1580 buf += period_len;
1581
1582 i++;
1583 }
1584
1585 return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
1586err_bd_out:
1587 sdma_free_bd(desc);
1588 kfree(desc);
1589err_out:
1590 sdmac->status = DMA_ERROR;
1591 return NULL;
1592}
1593
1594static int sdma_config_write(struct dma_chan *chan,
1595 struct dma_slave_config *dmaengine_cfg,
1596 enum dma_transfer_direction direction)
1597{
1598 struct sdma_channel *sdmac = to_sdma_chan(chan);
1599
1600 if (direction == DMA_DEV_TO_MEM) {
1601 sdmac->per_address = dmaengine_cfg->src_addr;
1602 sdmac->watermark_level = dmaengine_cfg->src_maxburst *
1603 dmaengine_cfg->src_addr_width;
1604 sdmac->word_size = dmaengine_cfg->src_addr_width;
1605 } else if (direction == DMA_DEV_TO_DEV) {
1606 sdmac->per_address2 = dmaengine_cfg->src_addr;
1607 sdmac->per_address = dmaengine_cfg->dst_addr;
1608 sdmac->watermark_level = dmaengine_cfg->src_maxburst &
1609 SDMA_WATERMARK_LEVEL_LWML;
1610 sdmac->watermark_level |= (dmaengine_cfg->dst_maxburst << 16) &
1611 SDMA_WATERMARK_LEVEL_HWML;
1612 sdmac->word_size = dmaengine_cfg->dst_addr_width;
1613 } else {
1614 sdmac->per_address = dmaengine_cfg->dst_addr;
1615 sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
1616 dmaengine_cfg->dst_addr_width;
1617 sdmac->word_size = dmaengine_cfg->dst_addr_width;
1618 }
1619 sdmac->direction = direction;
1620 return sdma_config_channel(chan);
1621}
1622
1623static int sdma_config(struct dma_chan *chan,
1624 struct dma_slave_config *dmaengine_cfg)
1625{
1626 struct sdma_channel *sdmac = to_sdma_chan(chan);
1627
1628 memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg));
1629
1630
1631 if (sdmac->event_id0) {
1632 if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
1633 return -EINVAL;
1634 sdma_event_enable(sdmac, sdmac->event_id0);
1635 }
1636
1637 if (sdmac->event_id1) {
1638 if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events)
1639 return -EINVAL;
1640 sdma_event_enable(sdmac, sdmac->event_id1);
1641 }
1642
1643 return 0;
1644}
1645
1646static enum dma_status sdma_tx_status(struct dma_chan *chan,
1647 dma_cookie_t cookie,
1648 struct dma_tx_state *txstate)
1649{
1650 struct sdma_channel *sdmac = to_sdma_chan(chan);
1651 struct sdma_desc *desc;
1652 u32 residue;
1653 struct virt_dma_desc *vd;
1654 enum dma_status ret;
1655 unsigned long flags;
1656
1657 ret = dma_cookie_status(chan, cookie, txstate);
1658 if (ret == DMA_COMPLETE || !txstate)
1659 return ret;
1660
1661 spin_lock_irqsave(&sdmac->vc.lock, flags);
1662 vd = vchan_find_desc(&sdmac->vc, cookie);
1663 if (vd) {
1664 desc = to_sdma_desc(&vd->tx);
1665 if (sdmac->flags & IMX_DMA_SG_LOOP)
1666 residue = (desc->num_bd - desc->buf_ptail) *
1667 desc->period_len - desc->chn_real_count;
1668 else
1669 residue = desc->chn_count - desc->chn_real_count;
1670 } else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie) {
1671 residue = sdmac->desc->chn_count - sdmac->desc->chn_real_count;
1672 } else {
1673 residue = 0;
1674 }
1675 spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1676
1677 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
1678 residue);
1679
1680 return sdmac->status;
1681}
1682
1683static void sdma_issue_pending(struct dma_chan *chan)
1684{
1685 struct sdma_channel *sdmac = to_sdma_chan(chan);
1686 unsigned long flags;
1687
1688 spin_lock_irqsave(&sdmac->vc.lock, flags);
1689 if (vchan_issue_pending(&sdmac->vc) && !sdmac->desc)
1690 sdma_start_desc(sdmac);
1691 spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1692}
1693
1694#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34
1695#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38
1696#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3 41
1697#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4 42
1698
1699static void sdma_add_scripts(struct sdma_engine *sdma,
1700 const struct sdma_script_start_addrs *addr)
1701{
1702 s32 *addr_arr = (u32 *)addr;
1703 s32 *saddr_arr = (u32 *)sdma->script_addrs;
1704 int i;
1705
1706
1707 if (!sdma->script_number)
1708 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
1709
1710 for (i = 0; i < sdma->script_number; i++)
1711 if (addr_arr[i] > 0)
1712 saddr_arr[i] = addr_arr[i];
1713}
1714
1715static void sdma_load_firmware(const struct firmware *fw, void *context)
1716{
1717 struct sdma_engine *sdma = context;
1718 const struct sdma_firmware_header *header;
1719 const struct sdma_script_start_addrs *addr;
1720 unsigned short *ram_code;
1721
1722 if (!fw) {
1723 dev_info(sdma->dev, "external firmware not found, using ROM firmware\n");
1724
1725 return;
1726 }
1727
1728 if (fw->size < sizeof(*header))
1729 goto err_firmware;
1730
1731 header = (struct sdma_firmware_header *)fw->data;
1732
1733 if (header->magic != SDMA_FIRMWARE_MAGIC)
1734 goto err_firmware;
1735 if (header->ram_code_start + header->ram_code_size > fw->size)
1736 goto err_firmware;
1737 switch (header->version_major) {
1738 case 1:
1739 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
1740 break;
1741 case 2:
1742 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2;
1743 break;
1744 case 3:
1745 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3;
1746 break;
1747 case 4:
1748 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4;
1749 break;
1750 default:
1751 dev_err(sdma->dev, "unknown firmware version\n");
1752 goto err_firmware;
1753 }
1754
1755 addr = (void *)header + header->script_addrs_start;
1756 ram_code = (void *)header + header->ram_code_start;
1757
1758 clk_enable(sdma->clk_ipg);
1759 clk_enable(sdma->clk_ahb);
1760
1761 sdma_load_script(sdma, ram_code,
1762 header->ram_code_size,
1763 addr->ram_code_start_addr);
1764 clk_disable(sdma->clk_ipg);
1765 clk_disable(sdma->clk_ahb);
1766
1767 sdma_add_scripts(sdma, addr);
1768
1769 dev_info(sdma->dev, "loaded firmware %d.%d\n",
1770 header->version_major,
1771 header->version_minor);
1772
1773err_firmware:
1774 release_firmware(fw);
1775}
1776
1777#define EVENT_REMAP_CELLS 3
1778
1779static int sdma_event_remap(struct sdma_engine *sdma)
1780{
1781 struct device_node *np = sdma->dev->of_node;
1782 struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0);
1783 struct property *event_remap;
1784 struct regmap *gpr;
1785 char propname[] = "fsl,sdma-event-remap";
1786 u32 reg, val, shift, num_map, i;
1787 int ret = 0;
1788
1789 if (IS_ERR(np) || IS_ERR(gpr_np))
1790 goto out;
1791
1792 event_remap = of_find_property(np, propname, NULL);
1793 num_map = event_remap ? (event_remap->length / sizeof(u32)) : 0;
1794 if (!num_map) {
1795 dev_dbg(sdma->dev, "no event needs to be remapped\n");
1796 goto out;
1797 } else if (num_map % EVENT_REMAP_CELLS) {
1798 dev_err(sdma->dev, "the property %s must modulo %d\n",
1799 propname, EVENT_REMAP_CELLS);
1800 ret = -EINVAL;
1801 goto out;
1802 }
1803
1804 gpr = syscon_node_to_regmap(gpr_np);
1805 if (IS_ERR(gpr)) {
1806 dev_err(sdma->dev, "failed to get gpr regmap\n");
1807 ret = PTR_ERR(gpr);
1808 goto out;
1809 }
1810
1811 for (i = 0; i < num_map; i += EVENT_REMAP_CELLS) {
1812 ret = of_property_read_u32_index(np, propname, i, ®);
1813 if (ret) {
1814 dev_err(sdma->dev, "failed to read property %s index %d\n",
1815 propname, i);
1816 goto out;
1817 }
1818
1819 ret = of_property_read_u32_index(np, propname, i + 1, &shift);
1820 if (ret) {
1821 dev_err(sdma->dev, "failed to read property %s index %d\n",
1822 propname, i + 1);
1823 goto out;
1824 }
1825
1826 ret = of_property_read_u32_index(np, propname, i + 2, &val);
1827 if (ret) {
1828 dev_err(sdma->dev, "failed to read property %s index %d\n",
1829 propname, i + 2);
1830 goto out;
1831 }
1832
1833 regmap_update_bits(gpr, reg, BIT(shift), val << shift);
1834 }
1835
1836out:
1837 if (!IS_ERR(gpr_np))
1838 of_node_put(gpr_np);
1839
1840 return ret;
1841}
1842
1843static int sdma_get_firmware(struct sdma_engine *sdma,
1844 const char *fw_name)
1845{
1846 int ret;
1847
1848 ret = request_firmware_nowait(THIS_MODULE,
1849 FW_ACTION_HOTPLUG, fw_name, sdma->dev,
1850 GFP_KERNEL, sdma, sdma_load_firmware);
1851
1852 return ret;
1853}
1854
1855static int sdma_init(struct sdma_engine *sdma)
1856{
1857 int i, ret;
1858 dma_addr_t ccb_phys;
1859
1860 ret = clk_enable(sdma->clk_ipg);
1861 if (ret)
1862 return ret;
1863 ret = clk_enable(sdma->clk_ahb);
1864 if (ret)
1865 goto disable_clk_ipg;
1866
1867 if (sdma->drvdata->check_ratio &&
1868 (clk_get_rate(sdma->clk_ahb) == clk_get_rate(sdma->clk_ipg)))
1869 sdma->clk_ratio = 1;
1870
1871
1872 writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
1873
1874 sdma->channel_control = dma_alloc_coherent(sdma->dev,
1875 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) +
1876 sizeof(struct sdma_context_data),
1877 &ccb_phys, GFP_KERNEL);
1878
1879 if (!sdma->channel_control) {
1880 ret = -ENOMEM;
1881 goto err_dma_alloc;
1882 }
1883
1884 sdma->context = (void *)sdma->channel_control +
1885 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1886 sdma->context_phys = ccb_phys +
1887 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1888
1889
1890 memset(sdma->channel_control, 0,
1891 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control));
1892
1893
1894 for (i = 0; i < sdma->drvdata->num_events; i++)
1895 writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i));
1896
1897
1898 for (i = 0; i < MAX_DMA_CHANNELS; i++)
1899 writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
1900
1901 ret = sdma_request_channel0(sdma);
1902 if (ret)
1903 goto err_dma_alloc;
1904
1905 sdma_config_ownership(&sdma->channel[0], false, true, false);
1906
1907
1908 writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR);
1909
1910
1911 if (sdma->clk_ratio)
1912 writel_relaxed(SDMA_H_CONFIG_ACR, sdma->regs + SDMA_H_CONFIG);
1913 else
1914 writel_relaxed(0, sdma->regs + SDMA_H_CONFIG);
1915
1916 writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR);
1917
1918
1919 sdma_set_channel_priority(&sdma->channel[0], 7);
1920
1921 clk_disable(sdma->clk_ipg);
1922 clk_disable(sdma->clk_ahb);
1923
1924 return 0;
1925
1926err_dma_alloc:
1927 clk_disable(sdma->clk_ahb);
1928disable_clk_ipg:
1929 clk_disable(sdma->clk_ipg);
1930 dev_err(sdma->dev, "initialisation failed with %d\n", ret);
1931 return ret;
1932}
1933
1934static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param)
1935{
1936 struct sdma_channel *sdmac = to_sdma_chan(chan);
1937 struct imx_dma_data *data = fn_param;
1938
1939 if (!imx_dma_is_general_purpose(chan))
1940 return false;
1941
1942 sdmac->data = *data;
1943 chan->private = &sdmac->data;
1944
1945 return true;
1946}
1947
1948static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec,
1949 struct of_dma *ofdma)
1950{
1951 struct sdma_engine *sdma = ofdma->of_dma_data;
1952 dma_cap_mask_t mask = sdma->dma_device.cap_mask;
1953 struct imx_dma_data data;
1954
1955 if (dma_spec->args_count != 3)
1956 return NULL;
1957
1958 data.dma_request = dma_spec->args[0];
1959 data.peripheral_type = dma_spec->args[1];
1960 data.priority = dma_spec->args[2];
1961
1962
1963
1964
1965
1966
1967
1968 data.dma_request2 = 0;
1969
1970 return __dma_request_channel(&mask, sdma_filter_fn, &data,
1971 ofdma->of_node);
1972}
1973
1974static int sdma_probe(struct platform_device *pdev)
1975{
1976 const struct of_device_id *of_id =
1977 of_match_device(sdma_dt_ids, &pdev->dev);
1978 struct device_node *np = pdev->dev.of_node;
1979 struct device_node *spba_bus;
1980 const char *fw_name;
1981 int ret;
1982 int irq;
1983 struct resource *iores;
1984 struct resource spba_res;
1985 struct sdma_platform_data *pdata = dev_get_platdata(&pdev->dev);
1986 int i;
1987 struct sdma_engine *sdma;
1988 s32 *saddr_arr;
1989 const struct sdma_driver_data *drvdata = NULL;
1990
1991 if (of_id)
1992 drvdata = of_id->data;
1993 else if (pdev->id_entry)
1994 drvdata = (void *)pdev->id_entry->driver_data;
1995
1996 if (!drvdata) {
1997 dev_err(&pdev->dev, "unable to find driver data\n");
1998 return -EINVAL;
1999 }
2000
2001 ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2002 if (ret)
2003 return ret;
2004
2005 sdma = devm_kzalloc(&pdev->dev, sizeof(*sdma), GFP_KERNEL);
2006 if (!sdma)
2007 return -ENOMEM;
2008
2009 spin_lock_init(&sdma->channel_0_lock);
2010
2011 sdma->dev = &pdev->dev;
2012 sdma->drvdata = drvdata;
2013
2014 irq = platform_get_irq(pdev, 0);
2015 if (irq < 0)
2016 return irq;
2017
2018 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2019 sdma->regs = devm_ioremap_resource(&pdev->dev, iores);
2020 if (IS_ERR(sdma->regs))
2021 return PTR_ERR(sdma->regs);
2022
2023 sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
2024 if (IS_ERR(sdma->clk_ipg))
2025 return PTR_ERR(sdma->clk_ipg);
2026
2027 sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
2028 if (IS_ERR(sdma->clk_ahb))
2029 return PTR_ERR(sdma->clk_ahb);
2030
2031 ret = clk_prepare(sdma->clk_ipg);
2032 if (ret)
2033 return ret;
2034
2035 ret = clk_prepare(sdma->clk_ahb);
2036 if (ret)
2037 goto err_clk;
2038
2039 ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma",
2040 sdma);
2041 if (ret)
2042 goto err_irq;
2043
2044 sdma->irq = irq;
2045
2046 sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
2047 if (!sdma->script_addrs) {
2048 ret = -ENOMEM;
2049 goto err_irq;
2050 }
2051
2052
2053 saddr_arr = (s32 *)sdma->script_addrs;
2054 for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
2055 saddr_arr[i] = -EINVAL;
2056
2057 dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
2058 dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
2059 dma_cap_set(DMA_MEMCPY, sdma->dma_device.cap_mask);
2060
2061 INIT_LIST_HEAD(&sdma->dma_device.channels);
2062
2063 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
2064 struct sdma_channel *sdmac = &sdma->channel[i];
2065
2066 sdmac->sdma = sdma;
2067
2068 sdmac->channel = i;
2069 sdmac->vc.desc_free = sdma_desc_free;
2070 INIT_WORK(&sdmac->terminate_worker,
2071 sdma_channel_terminate_work);
2072
2073
2074
2075
2076
2077 if (i)
2078 vchan_init(&sdmac->vc, &sdma->dma_device);
2079 }
2080
2081 ret = sdma_init(sdma);
2082 if (ret)
2083 goto err_init;
2084
2085 ret = sdma_event_remap(sdma);
2086 if (ret)
2087 goto err_init;
2088
2089 if (sdma->drvdata->script_addrs)
2090 sdma_add_scripts(sdma, sdma->drvdata->script_addrs);
2091 if (pdata && pdata->script_addrs)
2092 sdma_add_scripts(sdma, pdata->script_addrs);
2093
2094 sdma->dma_device.dev = &pdev->dev;
2095
2096 sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
2097 sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources;
2098 sdma->dma_device.device_tx_status = sdma_tx_status;
2099 sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
2100 sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
2101 sdma->dma_device.device_config = sdma_config;
2102 sdma->dma_device.device_terminate_all = sdma_disable_channel_async;
2103 sdma->dma_device.device_synchronize = sdma_channel_synchronize;
2104 sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS;
2105 sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS;
2106 sdma->dma_device.directions = SDMA_DMA_DIRECTIONS;
2107 sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
2108 sdma->dma_device.device_prep_dma_memcpy = sdma_prep_memcpy;
2109 sdma->dma_device.device_issue_pending = sdma_issue_pending;
2110 sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
2111 sdma->dma_device.copy_align = 2;
2112 dma_set_max_seg_size(sdma->dma_device.dev, SDMA_BD_MAX_CNT);
2113
2114 platform_set_drvdata(pdev, sdma);
2115
2116 ret = dma_async_device_register(&sdma->dma_device);
2117 if (ret) {
2118 dev_err(&pdev->dev, "unable to register\n");
2119 goto err_init;
2120 }
2121
2122 if (np) {
2123 ret = of_dma_controller_register(np, sdma_xlate, sdma);
2124 if (ret) {
2125 dev_err(&pdev->dev, "failed to register controller\n");
2126 goto err_register;
2127 }
2128
2129 spba_bus = of_find_compatible_node(NULL, NULL, "fsl,spba-bus");
2130 ret = of_address_to_resource(spba_bus, 0, &spba_res);
2131 if (!ret) {
2132 sdma->spba_start_addr = spba_res.start;
2133 sdma->spba_end_addr = spba_res.end;
2134 }
2135 of_node_put(spba_bus);
2136 }
2137
2138
2139
2140
2141
2142
2143
2144 if (pdata) {
2145 ret = sdma_get_firmware(sdma, pdata->fw_name);
2146 if (ret)
2147 dev_warn(&pdev->dev, "failed to get firmware from platform data\n");
2148 } else {
2149
2150
2151
2152
2153
2154 ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
2155 &fw_name);
2156 if (ret) {
2157 dev_warn(&pdev->dev, "failed to get firmware name\n");
2158 } else {
2159 ret = sdma_get_firmware(sdma, fw_name);
2160 if (ret)
2161 dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
2162 }
2163 }
2164
2165 return 0;
2166
2167err_register:
2168 dma_async_device_unregister(&sdma->dma_device);
2169err_init:
2170 kfree(sdma->script_addrs);
2171err_irq:
2172 clk_unprepare(sdma->clk_ahb);
2173err_clk:
2174 clk_unprepare(sdma->clk_ipg);
2175 return ret;
2176}
2177
2178static int sdma_remove(struct platform_device *pdev)
2179{
2180 struct sdma_engine *sdma = platform_get_drvdata(pdev);
2181 int i;
2182
2183 devm_free_irq(&pdev->dev, sdma->irq, sdma);
2184 dma_async_device_unregister(&sdma->dma_device);
2185 kfree(sdma->script_addrs);
2186 clk_unprepare(sdma->clk_ahb);
2187 clk_unprepare(sdma->clk_ipg);
2188
2189 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
2190 struct sdma_channel *sdmac = &sdma->channel[i];
2191
2192 tasklet_kill(&sdmac->vc.task);
2193 sdma_free_chan_resources(&sdmac->vc.chan);
2194 }
2195
2196 platform_set_drvdata(pdev, NULL);
2197 return 0;
2198}
2199
2200static struct platform_driver sdma_driver = {
2201 .driver = {
2202 .name = "imx-sdma",
2203 .of_match_table = sdma_dt_ids,
2204 },
2205 .id_table = sdma_devtypes,
2206 .remove = sdma_remove,
2207 .probe = sdma_probe,
2208};
2209
2210module_platform_driver(sdma_driver);
2211
2212MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
2213MODULE_DESCRIPTION("i.MX SDMA driver");
2214#if IS_ENABLED(CONFIG_SOC_IMX6Q)
2215MODULE_FIRMWARE("imx/sdma/sdma-imx6q.bin");
2216#endif
2217#if IS_ENABLED(CONFIG_SOC_IMX7D)
2218MODULE_FIRMWARE("imx/sdma/sdma-imx7d.bin");
2219#endif
2220MODULE_LICENSE("GPL");
2221