1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/init.h>
21#include <linux/iopoll.h>
22#include <linux/module.h>
23#include <linux/types.h>
24#include <linux/bitops.h>
25#include <linux/mm.h>
26#include <linux/interrupt.h>
27#include <linux/clk.h>
28#include <linux/delay.h>
29#include <linux/sched.h>
30#include <linux/semaphore.h>
31#include <linux/spinlock.h>
32#include <linux/device.h>
33#include <linux/dma-mapping.h>
34#include <linux/firmware.h>
35#include <linux/slab.h>
36#include <linux/platform_device.h>
37#include <linux/dmaengine.h>
38#include <linux/of.h>
39#include <linux/of_address.h>
40#include <linux/of_device.h>
41#include <linux/of_dma.h>
42
43#include <asm/irq.h>
44#include <linux/platform_data/dma-imx-sdma.h>
45#include <linux/platform_data/dma-imx.h>
46#include <linux/regmap.h>
47#include <linux/mfd/syscon.h>
48#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
49
50#include "dmaengine.h"
51
52
53#define SDMA_H_C0PTR 0x000
54#define SDMA_H_INTR 0x004
55#define SDMA_H_STATSTOP 0x008
56#define SDMA_H_START 0x00c
57#define SDMA_H_EVTOVR 0x010
58#define SDMA_H_DSPOVR 0x014
59#define SDMA_H_HOSTOVR 0x018
60#define SDMA_H_EVTPEND 0x01c
61#define SDMA_H_DSPENBL 0x020
62#define SDMA_H_RESET 0x024
63#define SDMA_H_EVTERR 0x028
64#define SDMA_H_INTRMSK 0x02c
65#define SDMA_H_PSW 0x030
66#define SDMA_H_EVTERRDBG 0x034
67#define SDMA_H_CONFIG 0x038
68#define SDMA_ONCE_ENB 0x040
69#define SDMA_ONCE_DATA 0x044
70#define SDMA_ONCE_INSTR 0x048
71#define SDMA_ONCE_STAT 0x04c
72#define SDMA_ONCE_CMD 0x050
73#define SDMA_EVT_MIRROR 0x054
74#define SDMA_ILLINSTADDR 0x058
75#define SDMA_CHN0ADDR 0x05c
76#define SDMA_ONCE_RTB 0x060
77#define SDMA_XTRIG_CONF1 0x070
78#define SDMA_XTRIG_CONF2 0x074
79#define SDMA_CHNENBL0_IMX35 0x200
80#define SDMA_CHNENBL0_IMX31 0x080
81#define SDMA_CHNPRI_0 0x100
82
83
84
85
86#define BD_DONE 0x01
87#define BD_WRAP 0x02
88#define BD_CONT 0x04
89#define BD_INTR 0x08
90#define BD_RROR 0x10
91#define BD_LAST 0x20
92#define BD_EXTD 0x80
93
94
95
96
97#define DND_END_OF_FRAME 0x80
98#define DND_END_OF_XFER 0x40
99#define DND_DONE 0x20
100#define DND_UNUSED 0x01
101
102
103
104
105#define BD_IPCV2_END_OF_FRAME 0x40
106
107#define IPCV2_MAX_NODES 50
108
109
110
111
112#define DATA_ERROR 0x10000000
113
114
115
116
117#define C0_ADDR 0x01
118#define C0_LOAD 0x02
119#define C0_DUMP 0x03
120#define C0_SETCTX 0x07
121#define C0_GETCTX 0x03
122#define C0_SETDM 0x01
123#define C0_SETPM 0x04
124#define C0_GETDM 0x02
125#define C0_GETPM 0x08
126
127
128
129#define CHANGE_ENDIANNESS 0x80
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170#define SDMA_WATERMARK_LEVEL_LWML 0xFF
171#define SDMA_WATERMARK_LEVEL_PS BIT(8)
172#define SDMA_WATERMARK_LEVEL_PA BIT(9)
173#define SDMA_WATERMARK_LEVEL_SPDIF BIT(10)
174#define SDMA_WATERMARK_LEVEL_SP BIT(11)
175#define SDMA_WATERMARK_LEVEL_DP BIT(12)
176#define SDMA_WATERMARK_LEVEL_HWML (0xFF << 16)
177#define SDMA_WATERMARK_LEVEL_LWE BIT(28)
178#define SDMA_WATERMARK_LEVEL_HWE BIT(29)
179#define SDMA_WATERMARK_LEVEL_CONT BIT(31)
180
181
182
183
184struct sdma_mode_count {
185 u32 count : 16;
186 u32 status : 8;
187 u32 command : 8;
188};
189
190
191
192
193struct sdma_buffer_descriptor {
194 struct sdma_mode_count mode;
195 u32 buffer_addr;
196 u32 ext_buffer_addr;
197} __attribute__ ((packed));
198
199
200
201
202
203
204
205
206
207struct sdma_channel_control {
208 u32 current_bd_ptr;
209 u32 base_bd_ptr;
210 u32 unused[2];
211} __attribute__ ((packed));
212
213
214
215
216
217
218
219
220
221
222
223
224
225struct sdma_state_registers {
226 u32 pc :14;
227 u32 unused1: 1;
228 u32 t : 1;
229 u32 rpc :14;
230 u32 unused0: 1;
231 u32 sf : 1;
232 u32 spc :14;
233 u32 unused2: 1;
234 u32 df : 1;
235 u32 epc :14;
236 u32 lm : 2;
237} __attribute__ ((packed));
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259struct sdma_context_data {
260 struct sdma_state_registers channel_state;
261 u32 gReg[8];
262 u32 mda;
263 u32 msa;
264 u32 ms;
265 u32 md;
266 u32 pda;
267 u32 psa;
268 u32 ps;
269 u32 pd;
270 u32 ca;
271 u32 cs;
272 u32 dda;
273 u32 dsa;
274 u32 ds;
275 u32 dd;
276 u32 scratch0;
277 u32 scratch1;
278 u32 scratch2;
279 u32 scratch3;
280 u32 scratch4;
281 u32 scratch5;
282 u32 scratch6;
283 u32 scratch7;
284} __attribute__ ((packed));
285
286#define NUM_BD (int)(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor))
287
288struct sdma_engine;
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304struct sdma_channel {
305 struct sdma_engine *sdma;
306 unsigned int channel;
307 enum dma_transfer_direction direction;
308 enum sdma_peripheral_type peripheral_type;
309 unsigned int event_id0;
310 unsigned int event_id1;
311 enum dma_slave_buswidth word_size;
312 unsigned int buf_tail;
313 unsigned int buf_ptail;
314 unsigned int num_bd;
315 unsigned int period_len;
316 struct sdma_buffer_descriptor *bd;
317 dma_addr_t bd_phys;
318 unsigned int pc_from_device, pc_to_device;
319 unsigned int device_to_device;
320 unsigned long flags;
321 dma_addr_t per_address, per_address2;
322 unsigned long event_mask[2];
323 unsigned long watermark_level;
324 u32 shp_addr, per_addr;
325 struct dma_chan chan;
326 spinlock_t lock;
327 struct dma_async_tx_descriptor desc;
328 enum dma_status status;
329 unsigned int chn_count;
330 unsigned int chn_real_count;
331 struct tasklet_struct tasklet;
332 struct imx_dma_data data;
333};
334
335#define IMX_DMA_SG_LOOP BIT(0)
336
337#define MAX_DMA_CHANNELS 32
338#define MXC_SDMA_DEFAULT_PRIORITY 1
339#define MXC_SDMA_MIN_PRIORITY 1
340#define MXC_SDMA_MAX_PRIORITY 7
341
342#define SDMA_FIRMWARE_MAGIC 0x414d4453
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358struct sdma_firmware_header {
359 u32 magic;
360 u32 version_major;
361 u32 version_minor;
362 u32 script_addrs_start;
363 u32 num_script_addrs;
364 u32 ram_code_start;
365 u32 ram_code_size;
366};
367
368struct sdma_driver_data {
369 int chnenbl0;
370 int num_events;
371 struct sdma_script_start_addrs *script_addrs;
372};
373
374struct sdma_engine {
375 struct device *dev;
376 struct device_dma_parameters dma_parms;
377 struct sdma_channel channel[MAX_DMA_CHANNELS];
378 struct sdma_channel_control *channel_control;
379 void __iomem *regs;
380 struct sdma_context_data *context;
381 dma_addr_t context_phys;
382 struct dma_device dma_device;
383 struct clk *clk_ipg;
384 struct clk *clk_ahb;
385 spinlock_t channel_0_lock;
386 u32 script_number;
387 struct sdma_script_start_addrs *script_addrs;
388 const struct sdma_driver_data *drvdata;
389 u32 spba_start_addr;
390 u32 spba_end_addr;
391 unsigned int irq;
392};
393
394static struct sdma_driver_data sdma_imx31 = {
395 .chnenbl0 = SDMA_CHNENBL0_IMX31,
396 .num_events = 32,
397};
398
399static struct sdma_script_start_addrs sdma_script_imx25 = {
400 .ap_2_ap_addr = 729,
401 .uart_2_mcu_addr = 904,
402 .per_2_app_addr = 1255,
403 .mcu_2_app_addr = 834,
404 .uartsh_2_mcu_addr = 1120,
405 .per_2_shp_addr = 1329,
406 .mcu_2_shp_addr = 1048,
407 .ata_2_mcu_addr = 1560,
408 .mcu_2_ata_addr = 1479,
409 .app_2_per_addr = 1189,
410 .app_2_mcu_addr = 770,
411 .shp_2_per_addr = 1407,
412 .shp_2_mcu_addr = 979,
413};
414
415static struct sdma_driver_data sdma_imx25 = {
416 .chnenbl0 = SDMA_CHNENBL0_IMX35,
417 .num_events = 48,
418 .script_addrs = &sdma_script_imx25,
419};
420
421static struct sdma_driver_data sdma_imx35 = {
422 .chnenbl0 = SDMA_CHNENBL0_IMX35,
423 .num_events = 48,
424};
425
426static struct sdma_script_start_addrs sdma_script_imx51 = {
427 .ap_2_ap_addr = 642,
428 .uart_2_mcu_addr = 817,
429 .mcu_2_app_addr = 747,
430 .mcu_2_shp_addr = 961,
431 .ata_2_mcu_addr = 1473,
432 .mcu_2_ata_addr = 1392,
433 .app_2_per_addr = 1033,
434 .app_2_mcu_addr = 683,
435 .shp_2_per_addr = 1251,
436 .shp_2_mcu_addr = 892,
437};
438
439static struct sdma_driver_data sdma_imx51 = {
440 .chnenbl0 = SDMA_CHNENBL0_IMX35,
441 .num_events = 48,
442 .script_addrs = &sdma_script_imx51,
443};
444
445static struct sdma_script_start_addrs sdma_script_imx53 = {
446 .ap_2_ap_addr = 642,
447 .app_2_mcu_addr = 683,
448 .mcu_2_app_addr = 747,
449 .uart_2_mcu_addr = 817,
450 .shp_2_mcu_addr = 891,
451 .mcu_2_shp_addr = 960,
452 .uartsh_2_mcu_addr = 1032,
453 .spdif_2_mcu_addr = 1100,
454 .mcu_2_spdif_addr = 1134,
455 .firi_2_mcu_addr = 1193,
456 .mcu_2_firi_addr = 1290,
457};
458
459static struct sdma_driver_data sdma_imx53 = {
460 .chnenbl0 = SDMA_CHNENBL0_IMX35,
461 .num_events = 48,
462 .script_addrs = &sdma_script_imx53,
463};
464
465static struct sdma_script_start_addrs sdma_script_imx6q = {
466 .ap_2_ap_addr = 642,
467 .uart_2_mcu_addr = 817,
468 .mcu_2_app_addr = 747,
469 .per_2_per_addr = 6331,
470 .uartsh_2_mcu_addr = 1032,
471 .mcu_2_shp_addr = 960,
472 .app_2_mcu_addr = 683,
473 .shp_2_mcu_addr = 891,
474 .spdif_2_mcu_addr = 1100,
475 .mcu_2_spdif_addr = 1134,
476};
477
478static struct sdma_driver_data sdma_imx6q = {
479 .chnenbl0 = SDMA_CHNENBL0_IMX35,
480 .num_events = 48,
481 .script_addrs = &sdma_script_imx6q,
482};
483
484static struct sdma_script_start_addrs sdma_script_imx7d = {
485 .ap_2_ap_addr = 644,
486 .uart_2_mcu_addr = 819,
487 .mcu_2_app_addr = 749,
488 .uartsh_2_mcu_addr = 1034,
489 .mcu_2_shp_addr = 962,
490 .app_2_mcu_addr = 685,
491 .shp_2_mcu_addr = 893,
492 .spdif_2_mcu_addr = 1102,
493 .mcu_2_spdif_addr = 1136,
494};
495
496static struct sdma_driver_data sdma_imx7d = {
497 .chnenbl0 = SDMA_CHNENBL0_IMX35,
498 .num_events = 48,
499 .script_addrs = &sdma_script_imx7d,
500};
501
502static const struct platform_device_id sdma_devtypes[] = {
503 {
504 .name = "imx25-sdma",
505 .driver_data = (unsigned long)&sdma_imx25,
506 }, {
507 .name = "imx31-sdma",
508 .driver_data = (unsigned long)&sdma_imx31,
509 }, {
510 .name = "imx35-sdma",
511 .driver_data = (unsigned long)&sdma_imx35,
512 }, {
513 .name = "imx51-sdma",
514 .driver_data = (unsigned long)&sdma_imx51,
515 }, {
516 .name = "imx53-sdma",
517 .driver_data = (unsigned long)&sdma_imx53,
518 }, {
519 .name = "imx6q-sdma",
520 .driver_data = (unsigned long)&sdma_imx6q,
521 }, {
522 .name = "imx7d-sdma",
523 .driver_data = (unsigned long)&sdma_imx7d,
524 }, {
525
526 }
527};
528MODULE_DEVICE_TABLE(platform, sdma_devtypes);
529
530static const struct of_device_id sdma_dt_ids[] = {
531 { .compatible = "fsl,imx6q-sdma", .data = &sdma_imx6q, },
532 { .compatible = "fsl,imx53-sdma", .data = &sdma_imx53, },
533 { .compatible = "fsl,imx51-sdma", .data = &sdma_imx51, },
534 { .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, },
535 { .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
536 { .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, },
537 { .compatible = "fsl,imx7d-sdma", .data = &sdma_imx7d, },
538 { }
539};
540MODULE_DEVICE_TABLE(of, sdma_dt_ids);
541
542#define SDMA_H_CONFIG_DSPDMA BIT(12)
543#define SDMA_H_CONFIG_RTD_PINS BIT(11)
544#define SDMA_H_CONFIG_ACR BIT(4)
545#define SDMA_H_CONFIG_CSM (3)
546
547static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
548{
549 u32 chnenbl0 = sdma->drvdata->chnenbl0;
550 return chnenbl0 + event * 4;
551}
552
553static int sdma_config_ownership(struct sdma_channel *sdmac,
554 bool event_override, bool mcu_override, bool dsp_override)
555{
556 struct sdma_engine *sdma = sdmac->sdma;
557 int channel = sdmac->channel;
558 unsigned long evt, mcu, dsp;
559
560 if (event_override && mcu_override && dsp_override)
561 return -EINVAL;
562
563 evt = readl_relaxed(sdma->regs + SDMA_H_EVTOVR);
564 mcu = readl_relaxed(sdma->regs + SDMA_H_HOSTOVR);
565 dsp = readl_relaxed(sdma->regs + SDMA_H_DSPOVR);
566
567 if (dsp_override)
568 __clear_bit(channel, &dsp);
569 else
570 __set_bit(channel, &dsp);
571
572 if (event_override)
573 __clear_bit(channel, &evt);
574 else
575 __set_bit(channel, &evt);
576
577 if (mcu_override)
578 __clear_bit(channel, &mcu);
579 else
580 __set_bit(channel, &mcu);
581
582 writel_relaxed(evt, sdma->regs + SDMA_H_EVTOVR);
583 writel_relaxed(mcu, sdma->regs + SDMA_H_HOSTOVR);
584 writel_relaxed(dsp, sdma->regs + SDMA_H_DSPOVR);
585
586 return 0;
587}
588
589static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
590{
591 writel(BIT(channel), sdma->regs + SDMA_H_START);
592}
593
594
595
596
597static int sdma_run_channel0(struct sdma_engine *sdma)
598{
599 int ret;
600 u32 reg;
601
602 sdma_enable_channel(sdma, 0);
603
604 ret = readl_relaxed_poll_timeout_atomic(sdma->regs + SDMA_H_STATSTOP,
605 reg, !(reg & 1), 1, 500);
606 if (ret)
607 dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
608
609
610 if (readl(sdma->regs + SDMA_H_CONFIG) == 0)
611 writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
612
613 return ret;
614}
615
616static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
617 u32 address)
618{
619 struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
620 void *buf_virt;
621 dma_addr_t buf_phys;
622 int ret;
623 unsigned long flags;
624
625 buf_virt = dma_alloc_coherent(NULL,
626 size,
627 &buf_phys, GFP_KERNEL);
628 if (!buf_virt) {
629 return -ENOMEM;
630 }
631
632 spin_lock_irqsave(&sdma->channel_0_lock, flags);
633
634 bd0->mode.command = C0_SETPM;
635 bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
636 bd0->mode.count = size / 2;
637 bd0->buffer_addr = buf_phys;
638 bd0->ext_buffer_addr = address;
639
640 memcpy(buf_virt, buf, size);
641
642 ret = sdma_run_channel0(sdma);
643
644 spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
645
646 dma_free_coherent(NULL, size, buf_virt, buf_phys);
647
648 return ret;
649}
650
651static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event)
652{
653 struct sdma_engine *sdma = sdmac->sdma;
654 int channel = sdmac->channel;
655 unsigned long val;
656 u32 chnenbl = chnenbl_ofs(sdma, event);
657
658 val = readl_relaxed(sdma->regs + chnenbl);
659 __set_bit(channel, &val);
660 writel_relaxed(val, sdma->regs + chnenbl);
661}
662
663static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
664{
665 struct sdma_engine *sdma = sdmac->sdma;
666 int channel = sdmac->channel;
667 u32 chnenbl = chnenbl_ofs(sdma, event);
668 unsigned long val;
669
670 val = readl_relaxed(sdma->regs + chnenbl);
671 __clear_bit(channel, &val);
672 writel_relaxed(val, sdma->regs + chnenbl);
673}
674
675static void sdma_update_channel_loop(struct sdma_channel *sdmac)
676{
677 struct sdma_buffer_descriptor *bd;
678 int error = 0;
679 enum dma_status old_status = sdmac->status;
680
681
682
683
684
685 while (1) {
686 bd = &sdmac->bd[sdmac->buf_tail];
687
688 if (bd->mode.status & BD_DONE)
689 break;
690
691 if (bd->mode.status & BD_RROR) {
692 bd->mode.status &= ~BD_RROR;
693 sdmac->status = DMA_ERROR;
694 error = -EIO;
695 }
696
697
698
699
700
701
702 sdmac->chn_real_count = bd->mode.count;
703 bd->mode.status |= BD_DONE;
704 bd->mode.count = sdmac->period_len;
705 sdmac->buf_ptail = sdmac->buf_tail;
706 sdmac->buf_tail = (sdmac->buf_tail + 1) % sdmac->num_bd;
707
708
709
710
711
712
713
714
715 dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL);
716
717 if (error)
718 sdmac->status = old_status;
719 }
720}
721
722static void mxc_sdma_handle_channel_normal(unsigned long data)
723{
724 struct sdma_channel *sdmac = (struct sdma_channel *) data;
725 struct sdma_buffer_descriptor *bd;
726 int i, error = 0;
727
728 sdmac->chn_real_count = 0;
729
730
731
732
733 for (i = 0; i < sdmac->num_bd; i++) {
734 bd = &sdmac->bd[i];
735
736 if (bd->mode.status & (BD_DONE | BD_RROR))
737 error = -EIO;
738 sdmac->chn_real_count += bd->mode.count;
739 }
740
741 if (error)
742 sdmac->status = DMA_ERROR;
743 else
744 sdmac->status = DMA_COMPLETE;
745
746 dma_cookie_complete(&sdmac->desc);
747
748 dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL);
749}
750
751static irqreturn_t sdma_int_handler(int irq, void *dev_id)
752{
753 struct sdma_engine *sdma = dev_id;
754 unsigned long stat;
755
756 stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
757 writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
758
759 stat &= ~1;
760
761 while (stat) {
762 int channel = fls(stat) - 1;
763 struct sdma_channel *sdmac = &sdma->channel[channel];
764
765 if (sdmac->flags & IMX_DMA_SG_LOOP)
766 sdma_update_channel_loop(sdmac);
767 else
768 tasklet_schedule(&sdmac->tasklet);
769
770 __clear_bit(channel, &stat);
771 }
772
773 return IRQ_HANDLED;
774}
775
776
777
778
779static void sdma_get_pc(struct sdma_channel *sdmac,
780 enum sdma_peripheral_type peripheral_type)
781{
782 struct sdma_engine *sdma = sdmac->sdma;
783 int per_2_emi = 0, emi_2_per = 0;
784
785
786
787
788 int per_2_per = 0;
789
790 sdmac->pc_from_device = 0;
791 sdmac->pc_to_device = 0;
792 sdmac->device_to_device = 0;
793
794 switch (peripheral_type) {
795 case IMX_DMATYPE_MEMORY:
796 break;
797 case IMX_DMATYPE_DSP:
798 emi_2_per = sdma->script_addrs->bp_2_ap_addr;
799 per_2_emi = sdma->script_addrs->ap_2_bp_addr;
800 break;
801 case IMX_DMATYPE_FIRI:
802 per_2_emi = sdma->script_addrs->firi_2_mcu_addr;
803 emi_2_per = sdma->script_addrs->mcu_2_firi_addr;
804 break;
805 case IMX_DMATYPE_UART:
806 per_2_emi = sdma->script_addrs->uart_2_mcu_addr;
807 emi_2_per = sdma->script_addrs->mcu_2_app_addr;
808 break;
809 case IMX_DMATYPE_UART_SP:
810 per_2_emi = sdma->script_addrs->uartsh_2_mcu_addr;
811 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
812 break;
813 case IMX_DMATYPE_ATA:
814 per_2_emi = sdma->script_addrs->ata_2_mcu_addr;
815 emi_2_per = sdma->script_addrs->mcu_2_ata_addr;
816 break;
817 case IMX_DMATYPE_CSPI:
818 case IMX_DMATYPE_EXT:
819 case IMX_DMATYPE_SSI:
820 case IMX_DMATYPE_SAI:
821 per_2_emi = sdma->script_addrs->app_2_mcu_addr;
822 emi_2_per = sdma->script_addrs->mcu_2_app_addr;
823 break;
824 case IMX_DMATYPE_SSI_DUAL:
825 per_2_emi = sdma->script_addrs->ssish_2_mcu_addr;
826 emi_2_per = sdma->script_addrs->mcu_2_ssish_addr;
827 break;
828 case IMX_DMATYPE_SSI_SP:
829 case IMX_DMATYPE_MMC:
830 case IMX_DMATYPE_SDHC:
831 case IMX_DMATYPE_CSPI_SP:
832 case IMX_DMATYPE_ESAI:
833 case IMX_DMATYPE_MSHC_SP:
834 per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
835 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
836 break;
837 case IMX_DMATYPE_ASRC:
838 per_2_emi = sdma->script_addrs->asrc_2_mcu_addr;
839 emi_2_per = sdma->script_addrs->asrc_2_mcu_addr;
840 per_2_per = sdma->script_addrs->per_2_per_addr;
841 break;
842 case IMX_DMATYPE_ASRC_SP:
843 per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
844 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
845 per_2_per = sdma->script_addrs->per_2_per_addr;
846 break;
847 case IMX_DMATYPE_MSHC:
848 per_2_emi = sdma->script_addrs->mshc_2_mcu_addr;
849 emi_2_per = sdma->script_addrs->mcu_2_mshc_addr;
850 break;
851 case IMX_DMATYPE_CCM:
852 per_2_emi = sdma->script_addrs->dptc_dvfs_addr;
853 break;
854 case IMX_DMATYPE_SPDIF:
855 per_2_emi = sdma->script_addrs->spdif_2_mcu_addr;
856 emi_2_per = sdma->script_addrs->mcu_2_spdif_addr;
857 break;
858 case IMX_DMATYPE_IPU_MEMORY:
859 emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr;
860 break;
861 default:
862 break;
863 }
864
865 sdmac->pc_from_device = per_2_emi;
866 sdmac->pc_to_device = emi_2_per;
867 sdmac->device_to_device = per_2_per;
868}
869
870static int sdma_load_context(struct sdma_channel *sdmac)
871{
872 struct sdma_engine *sdma = sdmac->sdma;
873 int channel = sdmac->channel;
874 int load_address;
875 struct sdma_context_data *context = sdma->context;
876 struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
877 int ret;
878 unsigned long flags;
879
880 if (sdmac->direction == DMA_DEV_TO_MEM)
881 load_address = sdmac->pc_from_device;
882 else if (sdmac->direction == DMA_DEV_TO_DEV)
883 load_address = sdmac->device_to_device;
884 else
885 load_address = sdmac->pc_to_device;
886
887 if (load_address < 0)
888 return load_address;
889
890 dev_dbg(sdma->dev, "load_address = %d\n", load_address);
891 dev_dbg(sdma->dev, "wml = 0x%08x\n", (u32)sdmac->watermark_level);
892 dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr);
893 dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr);
894 dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]);
895 dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]);
896
897 spin_lock_irqsave(&sdma->channel_0_lock, flags);
898
899 memset(context, 0, sizeof(*context));
900 context->channel_state.pc = load_address;
901
902
903
904
905 context->gReg[0] = sdmac->event_mask[1];
906 context->gReg[1] = sdmac->event_mask[0];
907 context->gReg[2] = sdmac->per_addr;
908 context->gReg[6] = sdmac->shp_addr;
909 context->gReg[7] = sdmac->watermark_level;
910
911 bd0->mode.command = C0_SETDM;
912 bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
913 bd0->mode.count = sizeof(*context) / 4;
914 bd0->buffer_addr = sdma->context_phys;
915 bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
916 ret = sdma_run_channel0(sdma);
917
918 spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
919
920 return ret;
921}
922
923static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
924{
925 return container_of(chan, struct sdma_channel, chan);
926}
927
928static int sdma_disable_channel(struct dma_chan *chan)
929{
930 struct sdma_channel *sdmac = to_sdma_chan(chan);
931 struct sdma_engine *sdma = sdmac->sdma;
932 int channel = sdmac->channel;
933
934 writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP);
935 sdmac->status = DMA_ERROR;
936
937 return 0;
938}
939
940static int sdma_disable_channel_with_delay(struct dma_chan *chan)
941{
942 sdma_disable_channel(chan);
943
944
945
946
947
948
949
950 mdelay(1);
951
952 return 0;
953}
954
955static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
956{
957 struct sdma_engine *sdma = sdmac->sdma;
958
959 int lwml = sdmac->watermark_level & SDMA_WATERMARK_LEVEL_LWML;
960 int hwml = (sdmac->watermark_level & SDMA_WATERMARK_LEVEL_HWML) >> 16;
961
962 set_bit(sdmac->event_id0 % 32, &sdmac->event_mask[1]);
963 set_bit(sdmac->event_id1 % 32, &sdmac->event_mask[0]);
964
965 if (sdmac->event_id0 > 31)
966 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_LWE;
967
968 if (sdmac->event_id1 > 31)
969 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_HWE;
970
971
972
973
974
975
976 if (lwml > hwml) {
977 sdmac->watermark_level &= ~(SDMA_WATERMARK_LEVEL_LWML |
978 SDMA_WATERMARK_LEVEL_HWML);
979 sdmac->watermark_level |= hwml;
980 sdmac->watermark_level |= lwml << 16;
981 swap(sdmac->event_mask[0], sdmac->event_mask[1]);
982 }
983
984 if (sdmac->per_address2 >= sdma->spba_start_addr &&
985 sdmac->per_address2 <= sdma->spba_end_addr)
986 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SP;
987
988 if (sdmac->per_address >= sdma->spba_start_addr &&
989 sdmac->per_address <= sdma->spba_end_addr)
990 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DP;
991
992 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_CONT;
993}
994
995static int sdma_config_channel(struct dma_chan *chan)
996{
997 struct sdma_channel *sdmac = to_sdma_chan(chan);
998 int ret;
999
1000 sdma_disable_channel(chan);
1001
1002 sdmac->event_mask[0] = 0;
1003 sdmac->event_mask[1] = 0;
1004 sdmac->shp_addr = 0;
1005 sdmac->per_addr = 0;
1006
1007 if (sdmac->event_id0) {
1008 if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
1009 return -EINVAL;
1010 sdma_event_enable(sdmac, sdmac->event_id0);
1011 }
1012
1013 if (sdmac->event_id1) {
1014 if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events)
1015 return -EINVAL;
1016 sdma_event_enable(sdmac, sdmac->event_id1);
1017 }
1018
1019 switch (sdmac->peripheral_type) {
1020 case IMX_DMATYPE_DSP:
1021 sdma_config_ownership(sdmac, false, true, true);
1022 break;
1023 case IMX_DMATYPE_MEMORY:
1024 sdma_config_ownership(sdmac, false, true, false);
1025 break;
1026 default:
1027 sdma_config_ownership(sdmac, true, true, false);
1028 break;
1029 }
1030
1031 sdma_get_pc(sdmac, sdmac->peripheral_type);
1032
1033 if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) &&
1034 (sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
1035
1036 if (sdmac->event_id1) {
1037 if (sdmac->peripheral_type == IMX_DMATYPE_ASRC_SP ||
1038 sdmac->peripheral_type == IMX_DMATYPE_ASRC)
1039 sdma_set_watermarklevel_for_p2p(sdmac);
1040 } else
1041 __set_bit(sdmac->event_id0, sdmac->event_mask);
1042
1043
1044 sdmac->shp_addr = sdmac->per_address;
1045 sdmac->per_addr = sdmac->per_address2;
1046 } else {
1047 sdmac->watermark_level = 0;
1048 }
1049
1050 ret = sdma_load_context(sdmac);
1051
1052 return ret;
1053}
1054
1055static int sdma_set_channel_priority(struct sdma_channel *sdmac,
1056 unsigned int priority)
1057{
1058 struct sdma_engine *sdma = sdmac->sdma;
1059 int channel = sdmac->channel;
1060
1061 if (priority < MXC_SDMA_MIN_PRIORITY
1062 || priority > MXC_SDMA_MAX_PRIORITY) {
1063 return -EINVAL;
1064 }
1065
1066 writel_relaxed(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel);
1067
1068 return 0;
1069}
1070
1071static int sdma_request_channel(struct sdma_channel *sdmac)
1072{
1073 struct sdma_engine *sdma = sdmac->sdma;
1074 int channel = sdmac->channel;
1075 int ret = -EBUSY;
1076
1077 sdmac->bd = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys,
1078 GFP_KERNEL);
1079 if (!sdmac->bd) {
1080 ret = -ENOMEM;
1081 goto out;
1082 }
1083
1084 sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys;
1085 sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
1086
1087 sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY);
1088 return 0;
1089out:
1090
1091 return ret;
1092}
1093
1094static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
1095{
1096 unsigned long flags;
1097 struct sdma_channel *sdmac = to_sdma_chan(tx->chan);
1098 dma_cookie_t cookie;
1099
1100 spin_lock_irqsave(&sdmac->lock, flags);
1101
1102 cookie = dma_cookie_assign(tx);
1103
1104 spin_unlock_irqrestore(&sdmac->lock, flags);
1105
1106 return cookie;
1107}
1108
1109static int sdma_alloc_chan_resources(struct dma_chan *chan)
1110{
1111 struct sdma_channel *sdmac = to_sdma_chan(chan);
1112 struct imx_dma_data *data = chan->private;
1113 int prio, ret;
1114
1115 if (!data)
1116 return -EINVAL;
1117
1118 switch (data->priority) {
1119 case DMA_PRIO_HIGH:
1120 prio = 3;
1121 break;
1122 case DMA_PRIO_MEDIUM:
1123 prio = 2;
1124 break;
1125 case DMA_PRIO_LOW:
1126 default:
1127 prio = 1;
1128 break;
1129 }
1130
1131 sdmac->peripheral_type = data->peripheral_type;
1132 sdmac->event_id0 = data->dma_request;
1133 sdmac->event_id1 = data->dma_request2;
1134
1135 ret = clk_enable(sdmac->sdma->clk_ipg);
1136 if (ret)
1137 return ret;
1138 ret = clk_enable(sdmac->sdma->clk_ahb);
1139 if (ret)
1140 goto disable_clk_ipg;
1141
1142 ret = sdma_request_channel(sdmac);
1143 if (ret)
1144 goto disable_clk_ahb;
1145
1146 ret = sdma_set_channel_priority(sdmac, prio);
1147 if (ret)
1148 goto disable_clk_ahb;
1149
1150 dma_async_tx_descriptor_init(&sdmac->desc, chan);
1151 sdmac->desc.tx_submit = sdma_tx_submit;
1152
1153 sdmac->desc.flags = DMA_CTRL_ACK;
1154
1155 return 0;
1156
1157disable_clk_ahb:
1158 clk_disable(sdmac->sdma->clk_ahb);
1159disable_clk_ipg:
1160 clk_disable(sdmac->sdma->clk_ipg);
1161 return ret;
1162}
1163
1164static void sdma_free_chan_resources(struct dma_chan *chan)
1165{
1166 struct sdma_channel *sdmac = to_sdma_chan(chan);
1167 struct sdma_engine *sdma = sdmac->sdma;
1168
1169 sdma_disable_channel(chan);
1170
1171 if (sdmac->event_id0)
1172 sdma_event_disable(sdmac, sdmac->event_id0);
1173 if (sdmac->event_id1)
1174 sdma_event_disable(sdmac, sdmac->event_id1);
1175
1176 sdmac->event_id0 = 0;
1177 sdmac->event_id1 = 0;
1178
1179 sdma_set_channel_priority(sdmac, 0);
1180
1181 dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys);
1182
1183 clk_disable(sdma->clk_ipg);
1184 clk_disable(sdma->clk_ahb);
1185}
1186
1187static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
1188 struct dma_chan *chan, struct scatterlist *sgl,
1189 unsigned int sg_len, enum dma_transfer_direction direction,
1190 unsigned long flags, void *context)
1191{
1192 struct sdma_channel *sdmac = to_sdma_chan(chan);
1193 struct sdma_engine *sdma = sdmac->sdma;
1194 int ret, i, count;
1195 int channel = sdmac->channel;
1196 struct scatterlist *sg;
1197
1198 if (sdmac->status == DMA_IN_PROGRESS)
1199 return NULL;
1200 sdmac->status = DMA_IN_PROGRESS;
1201
1202 sdmac->flags = 0;
1203
1204 sdmac->buf_tail = 0;
1205 sdmac->buf_ptail = 0;
1206 sdmac->chn_real_count = 0;
1207
1208 dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
1209 sg_len, channel);
1210
1211 sdmac->direction = direction;
1212 ret = sdma_load_context(sdmac);
1213 if (ret)
1214 goto err_out;
1215
1216 if (sg_len > NUM_BD) {
1217 dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
1218 channel, sg_len, NUM_BD);
1219 ret = -EINVAL;
1220 goto err_out;
1221 }
1222
1223 sdmac->chn_count = 0;
1224 for_each_sg(sgl, sg, sg_len, i) {
1225 struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
1226 int param;
1227
1228 bd->buffer_addr = sg->dma_address;
1229
1230 count = sg_dma_len(sg);
1231
1232 if (count > 0xffff) {
1233 dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
1234 channel, count, 0xffff);
1235 ret = -EINVAL;
1236 goto err_out;
1237 }
1238
1239 bd->mode.count = count;
1240 sdmac->chn_count += count;
1241
1242 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
1243 ret = -EINVAL;
1244 goto err_out;
1245 }
1246
1247 switch (sdmac->word_size) {
1248 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1249 bd->mode.command = 0;
1250 if (count & 3 || sg->dma_address & 3)
1251 return NULL;
1252 break;
1253 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1254 bd->mode.command = 2;
1255 if (count & 1 || sg->dma_address & 1)
1256 return NULL;
1257 break;
1258 case DMA_SLAVE_BUSWIDTH_1_BYTE:
1259 bd->mode.command = 1;
1260 break;
1261 default:
1262 return NULL;
1263 }
1264
1265 param = BD_DONE | BD_EXTD | BD_CONT;
1266
1267 if (i + 1 == sg_len) {
1268 param |= BD_INTR;
1269 param |= BD_LAST;
1270 param &= ~BD_CONT;
1271 }
1272
1273 dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n",
1274 i, count, (u64)sg->dma_address,
1275 param & BD_WRAP ? "wrap" : "",
1276 param & BD_INTR ? " intr" : "");
1277
1278 bd->mode.status = param;
1279 }
1280
1281 sdmac->num_bd = sg_len;
1282 sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
1283
1284 return &sdmac->desc;
1285err_out:
1286 sdmac->status = DMA_ERROR;
1287 return NULL;
1288}
1289
1290static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
1291 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
1292 size_t period_len, enum dma_transfer_direction direction,
1293 unsigned long flags)
1294{
1295 struct sdma_channel *sdmac = to_sdma_chan(chan);
1296 struct sdma_engine *sdma = sdmac->sdma;
1297 int num_periods = buf_len / period_len;
1298 int channel = sdmac->channel;
1299 int ret, i = 0, buf = 0;
1300
1301 dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
1302
1303 if (sdmac->status == DMA_IN_PROGRESS)
1304 return NULL;
1305
1306 sdmac->status = DMA_IN_PROGRESS;
1307
1308 sdmac->buf_tail = 0;
1309 sdmac->buf_ptail = 0;
1310 sdmac->chn_real_count = 0;
1311 sdmac->period_len = period_len;
1312
1313 sdmac->flags |= IMX_DMA_SG_LOOP;
1314 sdmac->direction = direction;
1315 ret = sdma_load_context(sdmac);
1316 if (ret)
1317 goto err_out;
1318
1319 if (num_periods > NUM_BD) {
1320 dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
1321 channel, num_periods, NUM_BD);
1322 goto err_out;
1323 }
1324
1325 if (period_len > 0xffff) {
1326 dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %zu > %d\n",
1327 channel, period_len, 0xffff);
1328 goto err_out;
1329 }
1330
1331 while (buf < buf_len) {
1332 struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
1333 int param;
1334
1335 bd->buffer_addr = dma_addr;
1336
1337 bd->mode.count = period_len;
1338
1339 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
1340 goto err_out;
1341 if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
1342 bd->mode.command = 0;
1343 else
1344 bd->mode.command = sdmac->word_size;
1345
1346 param = BD_DONE | BD_EXTD | BD_CONT | BD_INTR;
1347 if (i + 1 == num_periods)
1348 param |= BD_WRAP;
1349
1350 dev_dbg(sdma->dev, "entry %d: count: %zu dma: %#llx %s%s\n",
1351 i, period_len, (u64)dma_addr,
1352 param & BD_WRAP ? "wrap" : "",
1353 param & BD_INTR ? " intr" : "");
1354
1355 bd->mode.status = param;
1356
1357 dma_addr += period_len;
1358 buf += period_len;
1359
1360 i++;
1361 }
1362
1363 sdmac->num_bd = num_periods;
1364 sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
1365
1366 return &sdmac->desc;
1367err_out:
1368 sdmac->status = DMA_ERROR;
1369 return NULL;
1370}
1371
1372static int sdma_config(struct dma_chan *chan,
1373 struct dma_slave_config *dmaengine_cfg)
1374{
1375 struct sdma_channel *sdmac = to_sdma_chan(chan);
1376
1377 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
1378 sdmac->per_address = dmaengine_cfg->src_addr;
1379 sdmac->watermark_level = dmaengine_cfg->src_maxburst *
1380 dmaengine_cfg->src_addr_width;
1381 sdmac->word_size = dmaengine_cfg->src_addr_width;
1382 } else if (dmaengine_cfg->direction == DMA_DEV_TO_DEV) {
1383 sdmac->per_address2 = dmaengine_cfg->src_addr;
1384 sdmac->per_address = dmaengine_cfg->dst_addr;
1385 sdmac->watermark_level = dmaengine_cfg->src_maxburst &
1386 SDMA_WATERMARK_LEVEL_LWML;
1387 sdmac->watermark_level |= (dmaengine_cfg->dst_maxburst << 16) &
1388 SDMA_WATERMARK_LEVEL_HWML;
1389 sdmac->word_size = dmaengine_cfg->dst_addr_width;
1390 } else {
1391 sdmac->per_address = dmaengine_cfg->dst_addr;
1392 sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
1393 dmaengine_cfg->dst_addr_width;
1394 sdmac->word_size = dmaengine_cfg->dst_addr_width;
1395 }
1396 sdmac->direction = dmaengine_cfg->direction;
1397 return sdma_config_channel(chan);
1398}
1399
1400static enum dma_status sdma_tx_status(struct dma_chan *chan,
1401 dma_cookie_t cookie,
1402 struct dma_tx_state *txstate)
1403{
1404 struct sdma_channel *sdmac = to_sdma_chan(chan);
1405 u32 residue;
1406
1407 if (sdmac->flags & IMX_DMA_SG_LOOP)
1408 residue = (sdmac->num_bd - sdmac->buf_ptail) *
1409 sdmac->period_len - sdmac->chn_real_count;
1410 else
1411 residue = sdmac->chn_count - sdmac->chn_real_count;
1412
1413 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
1414 residue);
1415
1416 return sdmac->status;
1417}
1418
1419static void sdma_issue_pending(struct dma_chan *chan)
1420{
1421 struct sdma_channel *sdmac = to_sdma_chan(chan);
1422 struct sdma_engine *sdma = sdmac->sdma;
1423
1424 if (sdmac->status == DMA_IN_PROGRESS)
1425 sdma_enable_channel(sdma, sdmac->channel);
1426}
1427
1428#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34
1429#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38
1430#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3 41
1431#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4 42
1432
1433static void sdma_add_scripts(struct sdma_engine *sdma,
1434 const struct sdma_script_start_addrs *addr)
1435{
1436 s32 *addr_arr = (u32 *)addr;
1437 s32 *saddr_arr = (u32 *)sdma->script_addrs;
1438 int i;
1439
1440
1441 if (!sdma->script_number)
1442 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
1443
1444 for (i = 0; i < sdma->script_number; i++)
1445 if (addr_arr[i] > 0)
1446 saddr_arr[i] = addr_arr[i];
1447}
1448
1449static void sdma_load_firmware(const struct firmware *fw, void *context)
1450{
1451 struct sdma_engine *sdma = context;
1452 const struct sdma_firmware_header *header;
1453 const struct sdma_script_start_addrs *addr;
1454 unsigned short *ram_code;
1455
1456 if (!fw) {
1457 dev_info(sdma->dev, "external firmware not found, using ROM firmware\n");
1458
1459 return;
1460 }
1461
1462 if (fw->size < sizeof(*header))
1463 goto err_firmware;
1464
1465 header = (struct sdma_firmware_header *)fw->data;
1466
1467 if (header->magic != SDMA_FIRMWARE_MAGIC)
1468 goto err_firmware;
1469 if (header->ram_code_start + header->ram_code_size > fw->size)
1470 goto err_firmware;
1471 switch (header->version_major) {
1472 case 1:
1473 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
1474 break;
1475 case 2:
1476 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2;
1477 break;
1478 case 3:
1479 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3;
1480 break;
1481 case 4:
1482 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4;
1483 break;
1484 default:
1485 dev_err(sdma->dev, "unknown firmware version\n");
1486 goto err_firmware;
1487 }
1488
1489 addr = (void *)header + header->script_addrs_start;
1490 ram_code = (void *)header + header->ram_code_start;
1491
1492 clk_enable(sdma->clk_ipg);
1493 clk_enable(sdma->clk_ahb);
1494
1495 sdma_load_script(sdma, ram_code,
1496 header->ram_code_size,
1497 addr->ram_code_start_addr);
1498 clk_disable(sdma->clk_ipg);
1499 clk_disable(sdma->clk_ahb);
1500
1501 sdma_add_scripts(sdma, addr);
1502
1503 dev_info(sdma->dev, "loaded firmware %d.%d\n",
1504 header->version_major,
1505 header->version_minor);
1506
1507err_firmware:
1508 release_firmware(fw);
1509}
1510
1511#define EVENT_REMAP_CELLS 3
1512
1513static int sdma_event_remap(struct sdma_engine *sdma)
1514{
1515 struct device_node *np = sdma->dev->of_node;
1516 struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0);
1517 struct property *event_remap;
1518 struct regmap *gpr;
1519 char propname[] = "fsl,sdma-event-remap";
1520 u32 reg, val, shift, num_map, i;
1521 int ret = 0;
1522
1523 if (IS_ERR(np) || IS_ERR(gpr_np))
1524 goto out;
1525
1526 event_remap = of_find_property(np, propname, NULL);
1527 num_map = event_remap ? (event_remap->length / sizeof(u32)) : 0;
1528 if (!num_map) {
1529 dev_dbg(sdma->dev, "no event needs to be remapped\n");
1530 goto out;
1531 } else if (num_map % EVENT_REMAP_CELLS) {
1532 dev_err(sdma->dev, "the property %s must modulo %d\n",
1533 propname, EVENT_REMAP_CELLS);
1534 ret = -EINVAL;
1535 goto out;
1536 }
1537
1538 gpr = syscon_node_to_regmap(gpr_np);
1539 if (IS_ERR(gpr)) {
1540 dev_err(sdma->dev, "failed to get gpr regmap\n");
1541 ret = PTR_ERR(gpr);
1542 goto out;
1543 }
1544
1545 for (i = 0; i < num_map; i += EVENT_REMAP_CELLS) {
1546 ret = of_property_read_u32_index(np, propname, i, ®);
1547 if (ret) {
1548 dev_err(sdma->dev, "failed to read property %s index %d\n",
1549 propname, i);
1550 goto out;
1551 }
1552
1553 ret = of_property_read_u32_index(np, propname, i + 1, &shift);
1554 if (ret) {
1555 dev_err(sdma->dev, "failed to read property %s index %d\n",
1556 propname, i + 1);
1557 goto out;
1558 }
1559
1560 ret = of_property_read_u32_index(np, propname, i + 2, &val);
1561 if (ret) {
1562 dev_err(sdma->dev, "failed to read property %s index %d\n",
1563 propname, i + 2);
1564 goto out;
1565 }
1566
1567 regmap_update_bits(gpr, reg, BIT(shift), val << shift);
1568 }
1569
1570out:
1571 if (!IS_ERR(gpr_np))
1572 of_node_put(gpr_np);
1573
1574 return ret;
1575}
1576
1577static int sdma_get_firmware(struct sdma_engine *sdma,
1578 const char *fw_name)
1579{
1580 int ret;
1581
1582 ret = request_firmware_nowait(THIS_MODULE,
1583 FW_ACTION_HOTPLUG, fw_name, sdma->dev,
1584 GFP_KERNEL, sdma, sdma_load_firmware);
1585
1586 return ret;
1587}
1588
1589static int sdma_init(struct sdma_engine *sdma)
1590{
1591 int i, ret;
1592 dma_addr_t ccb_phys;
1593
1594 ret = clk_enable(sdma->clk_ipg);
1595 if (ret)
1596 return ret;
1597 ret = clk_enable(sdma->clk_ahb);
1598 if (ret)
1599 goto disable_clk_ipg;
1600
1601
1602 writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
1603
1604 sdma->channel_control = dma_alloc_coherent(NULL,
1605 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) +
1606 sizeof(struct sdma_context_data),
1607 &ccb_phys, GFP_KERNEL);
1608
1609 if (!sdma->channel_control) {
1610 ret = -ENOMEM;
1611 goto err_dma_alloc;
1612 }
1613
1614 sdma->context = (void *)sdma->channel_control +
1615 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1616 sdma->context_phys = ccb_phys +
1617 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1618
1619
1620 memset(sdma->channel_control, 0,
1621 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control));
1622
1623
1624 for (i = 0; i < sdma->drvdata->num_events; i++)
1625 writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i));
1626
1627
1628 for (i = 0; i < MAX_DMA_CHANNELS; i++)
1629 writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
1630
1631 ret = sdma_request_channel(&sdma->channel[0]);
1632 if (ret)
1633 goto err_dma_alloc;
1634
1635 sdma_config_ownership(&sdma->channel[0], false, true, false);
1636
1637
1638 writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR);
1639
1640
1641
1642 writel_relaxed(0, sdma->regs + SDMA_H_CONFIG);
1643
1644 writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR);
1645
1646
1647 sdma_set_channel_priority(&sdma->channel[0], 7);
1648
1649 clk_disable(sdma->clk_ipg);
1650 clk_disable(sdma->clk_ahb);
1651
1652 return 0;
1653
1654err_dma_alloc:
1655 clk_disable(sdma->clk_ahb);
1656disable_clk_ipg:
1657 clk_disable(sdma->clk_ipg);
1658 dev_err(sdma->dev, "initialisation failed with %d\n", ret);
1659 return ret;
1660}
1661
1662static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param)
1663{
1664 struct sdma_channel *sdmac = to_sdma_chan(chan);
1665 struct imx_dma_data *data = fn_param;
1666
1667 if (!imx_dma_is_general_purpose(chan))
1668 return false;
1669
1670 sdmac->data = *data;
1671 chan->private = &sdmac->data;
1672
1673 return true;
1674}
1675
1676static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec,
1677 struct of_dma *ofdma)
1678{
1679 struct sdma_engine *sdma = ofdma->of_dma_data;
1680 dma_cap_mask_t mask = sdma->dma_device.cap_mask;
1681 struct imx_dma_data data;
1682
1683 if (dma_spec->args_count != 3)
1684 return NULL;
1685
1686 data.dma_request = dma_spec->args[0];
1687 data.peripheral_type = dma_spec->args[1];
1688 data.priority = dma_spec->args[2];
1689
1690
1691
1692
1693
1694
1695
1696 data.dma_request2 = 0;
1697
1698 return dma_request_channel(mask, sdma_filter_fn, &data);
1699}
1700
1701static int sdma_probe(struct platform_device *pdev)
1702{
1703 const struct of_device_id *of_id =
1704 of_match_device(sdma_dt_ids, &pdev->dev);
1705 struct device_node *np = pdev->dev.of_node;
1706 struct device_node *spba_bus;
1707 const char *fw_name;
1708 int ret;
1709 int irq;
1710 struct resource *iores;
1711 struct resource spba_res;
1712 struct sdma_platform_data *pdata = dev_get_platdata(&pdev->dev);
1713 int i;
1714 struct sdma_engine *sdma;
1715 s32 *saddr_arr;
1716 const struct sdma_driver_data *drvdata = NULL;
1717
1718 if (of_id)
1719 drvdata = of_id->data;
1720 else if (pdev->id_entry)
1721 drvdata = (void *)pdev->id_entry->driver_data;
1722
1723 if (!drvdata) {
1724 dev_err(&pdev->dev, "unable to find driver data\n");
1725 return -EINVAL;
1726 }
1727
1728 ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1729 if (ret)
1730 return ret;
1731
1732 sdma = devm_kzalloc(&pdev->dev, sizeof(*sdma), GFP_KERNEL);
1733 if (!sdma)
1734 return -ENOMEM;
1735
1736 spin_lock_init(&sdma->channel_0_lock);
1737
1738 sdma->dev = &pdev->dev;
1739 sdma->drvdata = drvdata;
1740
1741 irq = platform_get_irq(pdev, 0);
1742 if (irq < 0)
1743 return irq;
1744
1745 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1746 sdma->regs = devm_ioremap_resource(&pdev->dev, iores);
1747 if (IS_ERR(sdma->regs))
1748 return PTR_ERR(sdma->regs);
1749
1750 sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1751 if (IS_ERR(sdma->clk_ipg))
1752 return PTR_ERR(sdma->clk_ipg);
1753
1754 sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1755 if (IS_ERR(sdma->clk_ahb))
1756 return PTR_ERR(sdma->clk_ahb);
1757
1758 ret = clk_prepare(sdma->clk_ipg);
1759 if (ret)
1760 return ret;
1761
1762 ret = clk_prepare(sdma->clk_ahb);
1763 if (ret)
1764 goto err_clk;
1765
1766 ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma",
1767 sdma);
1768 if (ret)
1769 goto err_irq;
1770
1771 sdma->irq = irq;
1772
1773 sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
1774 if (!sdma->script_addrs) {
1775 ret = -ENOMEM;
1776 goto err_irq;
1777 }
1778
1779
1780 saddr_arr = (s32 *)sdma->script_addrs;
1781 for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
1782 saddr_arr[i] = -EINVAL;
1783
1784 dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
1785 dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
1786
1787 INIT_LIST_HEAD(&sdma->dma_device.channels);
1788
1789 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
1790 struct sdma_channel *sdmac = &sdma->channel[i];
1791
1792 sdmac->sdma = sdma;
1793 spin_lock_init(&sdmac->lock);
1794
1795 sdmac->chan.device = &sdma->dma_device;
1796 dma_cookie_init(&sdmac->chan);
1797 sdmac->channel = i;
1798
1799 tasklet_init(&sdmac->tasklet, mxc_sdma_handle_channel_normal,
1800 (unsigned long) sdmac);
1801
1802
1803
1804
1805
1806 if (i)
1807 list_add_tail(&sdmac->chan.device_node,
1808 &sdma->dma_device.channels);
1809 }
1810
1811 ret = sdma_init(sdma);
1812 if (ret)
1813 goto err_init;
1814
1815 ret = sdma_event_remap(sdma);
1816 if (ret)
1817 goto err_init;
1818
1819 if (sdma->drvdata->script_addrs)
1820 sdma_add_scripts(sdma, sdma->drvdata->script_addrs);
1821 if (pdata && pdata->script_addrs)
1822 sdma_add_scripts(sdma, pdata->script_addrs);
1823
1824 if (pdata) {
1825 ret = sdma_get_firmware(sdma, pdata->fw_name);
1826 if (ret)
1827 dev_warn(&pdev->dev, "failed to get firmware from platform data\n");
1828 } else {
1829
1830
1831
1832
1833
1834 ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
1835 &fw_name);
1836 if (ret)
1837 dev_warn(&pdev->dev, "failed to get firmware name\n");
1838 else {
1839 ret = sdma_get_firmware(sdma, fw_name);
1840 if (ret)
1841 dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
1842 }
1843 }
1844
1845 sdma->dma_device.dev = &pdev->dev;
1846
1847 sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
1848 sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources;
1849 sdma->dma_device.device_tx_status = sdma_tx_status;
1850 sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
1851 sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
1852 sdma->dma_device.device_config = sdma_config;
1853 sdma->dma_device.device_terminate_all = sdma_disable_channel_with_delay;
1854 sdma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1855 sdma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1856 sdma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1857 sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
1858 sdma->dma_device.device_issue_pending = sdma_issue_pending;
1859 sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
1860 dma_set_max_seg_size(sdma->dma_device.dev, 65535);
1861
1862 platform_set_drvdata(pdev, sdma);
1863
1864 ret = dma_async_device_register(&sdma->dma_device);
1865 if (ret) {
1866 dev_err(&pdev->dev, "unable to register\n");
1867 goto err_init;
1868 }
1869
1870 if (np) {
1871 ret = of_dma_controller_register(np, sdma_xlate, sdma);
1872 if (ret) {
1873 dev_err(&pdev->dev, "failed to register controller\n");
1874 goto err_register;
1875 }
1876
1877 spba_bus = of_find_compatible_node(NULL, NULL, "fsl,spba-bus");
1878 ret = of_address_to_resource(spba_bus, 0, &spba_res);
1879 if (!ret) {
1880 sdma->spba_start_addr = spba_res.start;
1881 sdma->spba_end_addr = spba_res.end;
1882 }
1883 of_node_put(spba_bus);
1884 }
1885
1886 return 0;
1887
1888err_register:
1889 dma_async_device_unregister(&sdma->dma_device);
1890err_init:
1891 kfree(sdma->script_addrs);
1892err_irq:
1893 clk_unprepare(sdma->clk_ahb);
1894err_clk:
1895 clk_unprepare(sdma->clk_ipg);
1896 return ret;
1897}
1898
1899static int sdma_remove(struct platform_device *pdev)
1900{
1901 struct sdma_engine *sdma = platform_get_drvdata(pdev);
1902 int i;
1903
1904 devm_free_irq(&pdev->dev, sdma->irq, sdma);
1905 dma_async_device_unregister(&sdma->dma_device);
1906 kfree(sdma->script_addrs);
1907 clk_unprepare(sdma->clk_ahb);
1908 clk_unprepare(sdma->clk_ipg);
1909
1910 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
1911 struct sdma_channel *sdmac = &sdma->channel[i];
1912
1913 tasklet_kill(&sdmac->tasklet);
1914 }
1915
1916 platform_set_drvdata(pdev, NULL);
1917 return 0;
1918}
1919
1920static struct platform_driver sdma_driver = {
1921 .driver = {
1922 .name = "imx-sdma",
1923 .of_match_table = sdma_dt_ids,
1924 },
1925 .id_table = sdma_devtypes,
1926 .remove = sdma_remove,
1927 .probe = sdma_probe,
1928};
1929
1930module_platform_driver(sdma_driver);
1931
1932MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
1933MODULE_DESCRIPTION("i.MX SDMA driver");
1934MODULE_LICENSE("GPL");
1935