1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/init.h>
21#include <linux/iopoll.h>
22#include <linux/module.h>
23#include <linux/types.h>
24#include <linux/bitops.h>
25#include <linux/mm.h>
26#include <linux/interrupt.h>
27#include <linux/clk.h>
28#include <linux/delay.h>
29#include <linux/sched.h>
30#include <linux/semaphore.h>
31#include <linux/spinlock.h>
32#include <linux/device.h>
33#include <linux/dma-mapping.h>
34#include <linux/firmware.h>
35#include <linux/slab.h>
36#include <linux/platform_device.h>
37#include <linux/dmaengine.h>
38#include <linux/of.h>
39#include <linux/of_address.h>
40#include <linux/of_device.h>
41#include <linux/of_dma.h>
42
43#include <asm/irq.h>
44#include <linux/platform_data/dma-imx-sdma.h>
45#include <linux/platform_data/dma-imx.h>
46#include <linux/regmap.h>
47#include <linux/mfd/syscon.h>
48#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
49
50#include "dmaengine.h"
51
52
53#define SDMA_H_C0PTR 0x000
54#define SDMA_H_INTR 0x004
55#define SDMA_H_STATSTOP 0x008
56#define SDMA_H_START 0x00c
57#define SDMA_H_EVTOVR 0x010
58#define SDMA_H_DSPOVR 0x014
59#define SDMA_H_HOSTOVR 0x018
60#define SDMA_H_EVTPEND 0x01c
61#define SDMA_H_DSPENBL 0x020
62#define SDMA_H_RESET 0x024
63#define SDMA_H_EVTERR 0x028
64#define SDMA_H_INTRMSK 0x02c
65#define SDMA_H_PSW 0x030
66#define SDMA_H_EVTERRDBG 0x034
67#define SDMA_H_CONFIG 0x038
68#define SDMA_ONCE_ENB 0x040
69#define SDMA_ONCE_DATA 0x044
70#define SDMA_ONCE_INSTR 0x048
71#define SDMA_ONCE_STAT 0x04c
72#define SDMA_ONCE_CMD 0x050
73#define SDMA_EVT_MIRROR 0x054
74#define SDMA_ILLINSTADDR 0x058
75#define SDMA_CHN0ADDR 0x05c
76#define SDMA_ONCE_RTB 0x060
77#define SDMA_XTRIG_CONF1 0x070
78#define SDMA_XTRIG_CONF2 0x074
79#define SDMA_CHNENBL0_IMX35 0x200
80#define SDMA_CHNENBL0_IMX31 0x080
81#define SDMA_CHNPRI_0 0x100
82
83
84
85
86#define BD_DONE 0x01
87#define BD_WRAP 0x02
88#define BD_CONT 0x04
89#define BD_INTR 0x08
90#define BD_RROR 0x10
91#define BD_LAST 0x20
92#define BD_EXTD 0x80
93
94
95
96
97#define DND_END_OF_FRAME 0x80
98#define DND_END_OF_XFER 0x40
99#define DND_DONE 0x20
100#define DND_UNUSED 0x01
101
102
103
104
105#define BD_IPCV2_END_OF_FRAME 0x40
106
107#define IPCV2_MAX_NODES 50
108
109
110
111
112#define DATA_ERROR 0x10000000
113
114
115
116
117#define C0_ADDR 0x01
118#define C0_LOAD 0x02
119#define C0_DUMP 0x03
120#define C0_SETCTX 0x07
121#define C0_GETCTX 0x03
122#define C0_SETDM 0x01
123#define C0_SETPM 0x04
124#define C0_GETDM 0x02
125#define C0_GETPM 0x08
126
127
128
129#define CHANGE_ENDIANNESS 0x80
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170#define SDMA_WATERMARK_LEVEL_LWML 0xFF
171#define SDMA_WATERMARK_LEVEL_PS BIT(8)
172#define SDMA_WATERMARK_LEVEL_PA BIT(9)
173#define SDMA_WATERMARK_LEVEL_SPDIF BIT(10)
174#define SDMA_WATERMARK_LEVEL_SP BIT(11)
175#define SDMA_WATERMARK_LEVEL_DP BIT(12)
176#define SDMA_WATERMARK_LEVEL_HWML (0xFF << 16)
177#define SDMA_WATERMARK_LEVEL_LWE BIT(28)
178#define SDMA_WATERMARK_LEVEL_HWE BIT(29)
179#define SDMA_WATERMARK_LEVEL_CONT BIT(31)
180
181
182
183
184struct sdma_mode_count {
185 u32 count : 16;
186 u32 status : 8;
187 u32 command : 8;
188};
189
190
191
192
193struct sdma_buffer_descriptor {
194 struct sdma_mode_count mode;
195 u32 buffer_addr;
196 u32 ext_buffer_addr;
197} __attribute__ ((packed));
198
199
200
201
202
203
204
205
206
207struct sdma_channel_control {
208 u32 current_bd_ptr;
209 u32 base_bd_ptr;
210 u32 unused[2];
211} __attribute__ ((packed));
212
213
214
215
216
217
218
219
220
221
222
223
224
225struct sdma_state_registers {
226 u32 pc :14;
227 u32 unused1: 1;
228 u32 t : 1;
229 u32 rpc :14;
230 u32 unused0: 1;
231 u32 sf : 1;
232 u32 spc :14;
233 u32 unused2: 1;
234 u32 df : 1;
235 u32 epc :14;
236 u32 lm : 2;
237} __attribute__ ((packed));
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259struct sdma_context_data {
260 struct sdma_state_registers channel_state;
261 u32 gReg[8];
262 u32 mda;
263 u32 msa;
264 u32 ms;
265 u32 md;
266 u32 pda;
267 u32 psa;
268 u32 ps;
269 u32 pd;
270 u32 ca;
271 u32 cs;
272 u32 dda;
273 u32 dsa;
274 u32 ds;
275 u32 dd;
276 u32 scratch0;
277 u32 scratch1;
278 u32 scratch2;
279 u32 scratch3;
280 u32 scratch4;
281 u32 scratch5;
282 u32 scratch6;
283 u32 scratch7;
284} __attribute__ ((packed));
285
286#define NUM_BD (int)(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor))
287
288struct sdma_engine;
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303struct sdma_channel {
304 struct sdma_engine *sdma;
305 unsigned int channel;
306 enum dma_transfer_direction direction;
307 enum sdma_peripheral_type peripheral_type;
308 unsigned int event_id0;
309 unsigned int event_id1;
310 enum dma_slave_buswidth word_size;
311 unsigned int buf_tail;
312 unsigned int num_bd;
313 unsigned int period_len;
314 struct sdma_buffer_descriptor *bd;
315 dma_addr_t bd_phys;
316 unsigned int pc_from_device, pc_to_device;
317 unsigned int device_to_device;
318 unsigned long flags;
319 dma_addr_t per_address, per_address2;
320 unsigned long event_mask[2];
321 unsigned long watermark_level;
322 u32 shp_addr, per_addr;
323 struct dma_chan chan;
324 spinlock_t lock;
325 struct dma_async_tx_descriptor desc;
326 enum dma_status status;
327 unsigned int chn_count;
328 unsigned int chn_real_count;
329 struct tasklet_struct tasklet;
330 struct imx_dma_data data;
331};
332
333#define IMX_DMA_SG_LOOP BIT(0)
334
335#define MAX_DMA_CHANNELS 32
336#define MXC_SDMA_DEFAULT_PRIORITY 1
337#define MXC_SDMA_MIN_PRIORITY 1
338#define MXC_SDMA_MAX_PRIORITY 7
339
340#define SDMA_FIRMWARE_MAGIC 0x414d4453
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356struct sdma_firmware_header {
357 u32 magic;
358 u32 version_major;
359 u32 version_minor;
360 u32 script_addrs_start;
361 u32 num_script_addrs;
362 u32 ram_code_start;
363 u32 ram_code_size;
364};
365
366struct sdma_driver_data {
367 int chnenbl0;
368 int num_events;
369 struct sdma_script_start_addrs *script_addrs;
370};
371
372struct sdma_engine {
373 struct device *dev;
374 struct device_dma_parameters dma_parms;
375 struct sdma_channel channel[MAX_DMA_CHANNELS];
376 struct sdma_channel_control *channel_control;
377 void __iomem *regs;
378 struct sdma_context_data *context;
379 dma_addr_t context_phys;
380 struct dma_device dma_device;
381 struct clk *clk_ipg;
382 struct clk *clk_ahb;
383 spinlock_t channel_0_lock;
384 u32 script_number;
385 struct sdma_script_start_addrs *script_addrs;
386 const struct sdma_driver_data *drvdata;
387 u32 spba_start_addr;
388 u32 spba_end_addr;
389 unsigned int irq;
390};
391
392static struct sdma_driver_data sdma_imx31 = {
393 .chnenbl0 = SDMA_CHNENBL0_IMX31,
394 .num_events = 32,
395};
396
397static struct sdma_script_start_addrs sdma_script_imx25 = {
398 .ap_2_ap_addr = 729,
399 .uart_2_mcu_addr = 904,
400 .per_2_app_addr = 1255,
401 .mcu_2_app_addr = 834,
402 .uartsh_2_mcu_addr = 1120,
403 .per_2_shp_addr = 1329,
404 .mcu_2_shp_addr = 1048,
405 .ata_2_mcu_addr = 1560,
406 .mcu_2_ata_addr = 1479,
407 .app_2_per_addr = 1189,
408 .app_2_mcu_addr = 770,
409 .shp_2_per_addr = 1407,
410 .shp_2_mcu_addr = 979,
411};
412
413static struct sdma_driver_data sdma_imx25 = {
414 .chnenbl0 = SDMA_CHNENBL0_IMX35,
415 .num_events = 48,
416 .script_addrs = &sdma_script_imx25,
417};
418
419static struct sdma_driver_data sdma_imx35 = {
420 .chnenbl0 = SDMA_CHNENBL0_IMX35,
421 .num_events = 48,
422};
423
424static struct sdma_script_start_addrs sdma_script_imx51 = {
425 .ap_2_ap_addr = 642,
426 .uart_2_mcu_addr = 817,
427 .mcu_2_app_addr = 747,
428 .mcu_2_shp_addr = 961,
429 .ata_2_mcu_addr = 1473,
430 .mcu_2_ata_addr = 1392,
431 .app_2_per_addr = 1033,
432 .app_2_mcu_addr = 683,
433 .shp_2_per_addr = 1251,
434 .shp_2_mcu_addr = 892,
435};
436
437static struct sdma_driver_data sdma_imx51 = {
438 .chnenbl0 = SDMA_CHNENBL0_IMX35,
439 .num_events = 48,
440 .script_addrs = &sdma_script_imx51,
441};
442
443static struct sdma_script_start_addrs sdma_script_imx53 = {
444 .ap_2_ap_addr = 642,
445 .app_2_mcu_addr = 683,
446 .mcu_2_app_addr = 747,
447 .uart_2_mcu_addr = 817,
448 .shp_2_mcu_addr = 891,
449 .mcu_2_shp_addr = 960,
450 .uartsh_2_mcu_addr = 1032,
451 .spdif_2_mcu_addr = 1100,
452 .mcu_2_spdif_addr = 1134,
453 .firi_2_mcu_addr = 1193,
454 .mcu_2_firi_addr = 1290,
455};
456
457static struct sdma_driver_data sdma_imx53 = {
458 .chnenbl0 = SDMA_CHNENBL0_IMX35,
459 .num_events = 48,
460 .script_addrs = &sdma_script_imx53,
461};
462
463static struct sdma_script_start_addrs sdma_script_imx6q = {
464 .ap_2_ap_addr = 642,
465 .uart_2_mcu_addr = 817,
466 .mcu_2_app_addr = 747,
467 .per_2_per_addr = 6331,
468 .uartsh_2_mcu_addr = 1032,
469 .mcu_2_shp_addr = 960,
470 .app_2_mcu_addr = 683,
471 .shp_2_mcu_addr = 891,
472 .spdif_2_mcu_addr = 1100,
473 .mcu_2_spdif_addr = 1134,
474};
475
476static struct sdma_driver_data sdma_imx6q = {
477 .chnenbl0 = SDMA_CHNENBL0_IMX35,
478 .num_events = 48,
479 .script_addrs = &sdma_script_imx6q,
480};
481
482static struct sdma_script_start_addrs sdma_script_imx7d = {
483 .ap_2_ap_addr = 644,
484 .uart_2_mcu_addr = 819,
485 .mcu_2_app_addr = 749,
486 .uartsh_2_mcu_addr = 1034,
487 .mcu_2_shp_addr = 962,
488 .app_2_mcu_addr = 685,
489 .shp_2_mcu_addr = 893,
490 .spdif_2_mcu_addr = 1102,
491 .mcu_2_spdif_addr = 1136,
492};
493
494static struct sdma_driver_data sdma_imx7d = {
495 .chnenbl0 = SDMA_CHNENBL0_IMX35,
496 .num_events = 48,
497 .script_addrs = &sdma_script_imx7d,
498};
499
500static const struct platform_device_id sdma_devtypes[] = {
501 {
502 .name = "imx25-sdma",
503 .driver_data = (unsigned long)&sdma_imx25,
504 }, {
505 .name = "imx31-sdma",
506 .driver_data = (unsigned long)&sdma_imx31,
507 }, {
508 .name = "imx35-sdma",
509 .driver_data = (unsigned long)&sdma_imx35,
510 }, {
511 .name = "imx51-sdma",
512 .driver_data = (unsigned long)&sdma_imx51,
513 }, {
514 .name = "imx53-sdma",
515 .driver_data = (unsigned long)&sdma_imx53,
516 }, {
517 .name = "imx6q-sdma",
518 .driver_data = (unsigned long)&sdma_imx6q,
519 }, {
520 .name = "imx7d-sdma",
521 .driver_data = (unsigned long)&sdma_imx7d,
522 }, {
523
524 }
525};
526MODULE_DEVICE_TABLE(platform, sdma_devtypes);
527
528static const struct of_device_id sdma_dt_ids[] = {
529 { .compatible = "fsl,imx6q-sdma", .data = &sdma_imx6q, },
530 { .compatible = "fsl,imx53-sdma", .data = &sdma_imx53, },
531 { .compatible = "fsl,imx51-sdma", .data = &sdma_imx51, },
532 { .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, },
533 { .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
534 { .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, },
535 { .compatible = "fsl,imx7d-sdma", .data = &sdma_imx7d, },
536 { }
537};
538MODULE_DEVICE_TABLE(of, sdma_dt_ids);
539
540#define SDMA_H_CONFIG_DSPDMA BIT(12)
541#define SDMA_H_CONFIG_RTD_PINS BIT(11)
542#define SDMA_H_CONFIG_ACR BIT(4)
543#define SDMA_H_CONFIG_CSM (3)
544
545static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
546{
547 u32 chnenbl0 = sdma->drvdata->chnenbl0;
548 return chnenbl0 + event * 4;
549}
550
551static int sdma_config_ownership(struct sdma_channel *sdmac,
552 bool event_override, bool mcu_override, bool dsp_override)
553{
554 struct sdma_engine *sdma = sdmac->sdma;
555 int channel = sdmac->channel;
556 unsigned long evt, mcu, dsp;
557
558 if (event_override && mcu_override && dsp_override)
559 return -EINVAL;
560
561 evt = readl_relaxed(sdma->regs + SDMA_H_EVTOVR);
562 mcu = readl_relaxed(sdma->regs + SDMA_H_HOSTOVR);
563 dsp = readl_relaxed(sdma->regs + SDMA_H_DSPOVR);
564
565 if (dsp_override)
566 __clear_bit(channel, &dsp);
567 else
568 __set_bit(channel, &dsp);
569
570 if (event_override)
571 __clear_bit(channel, &evt);
572 else
573 __set_bit(channel, &evt);
574
575 if (mcu_override)
576 __clear_bit(channel, &mcu);
577 else
578 __set_bit(channel, &mcu);
579
580 writel_relaxed(evt, sdma->regs + SDMA_H_EVTOVR);
581 writel_relaxed(mcu, sdma->regs + SDMA_H_HOSTOVR);
582 writel_relaxed(dsp, sdma->regs + SDMA_H_DSPOVR);
583
584 return 0;
585}
586
587static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
588{
589 writel(BIT(channel), sdma->regs + SDMA_H_START);
590}
591
592
593
594
595static int sdma_run_channel0(struct sdma_engine *sdma)
596{
597 int ret;
598 u32 reg;
599
600 sdma_enable_channel(sdma, 0);
601
602 ret = readl_relaxed_poll_timeout_atomic(sdma->regs + SDMA_H_STATSTOP,
603 reg, !(reg & 1), 1, 500);
604 if (ret)
605 dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
606
607
608 if (readl(sdma->regs + SDMA_H_CONFIG) == 0)
609 writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
610
611 return ret;
612}
613
614static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
615 u32 address)
616{
617 struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
618 void *buf_virt;
619 dma_addr_t buf_phys;
620 int ret;
621 unsigned long flags;
622
623 buf_virt = dma_alloc_coherent(NULL,
624 size,
625 &buf_phys, GFP_KERNEL);
626 if (!buf_virt) {
627 return -ENOMEM;
628 }
629
630 spin_lock_irqsave(&sdma->channel_0_lock, flags);
631
632 bd0->mode.command = C0_SETPM;
633 bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
634 bd0->mode.count = size / 2;
635 bd0->buffer_addr = buf_phys;
636 bd0->ext_buffer_addr = address;
637
638 memcpy(buf_virt, buf, size);
639
640 ret = sdma_run_channel0(sdma);
641
642 spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
643
644 dma_free_coherent(NULL, size, buf_virt, buf_phys);
645
646 return ret;
647}
648
649static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event)
650{
651 struct sdma_engine *sdma = sdmac->sdma;
652 int channel = sdmac->channel;
653 unsigned long val;
654 u32 chnenbl = chnenbl_ofs(sdma, event);
655
656 val = readl_relaxed(sdma->regs + chnenbl);
657 __set_bit(channel, &val);
658 writel_relaxed(val, sdma->regs + chnenbl);
659}
660
661static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
662{
663 struct sdma_engine *sdma = sdmac->sdma;
664 int channel = sdmac->channel;
665 u32 chnenbl = chnenbl_ofs(sdma, event);
666 unsigned long val;
667
668 val = readl_relaxed(sdma->regs + chnenbl);
669 __clear_bit(channel, &val);
670 writel_relaxed(val, sdma->regs + chnenbl);
671}
672
673static void sdma_update_channel_loop(struct sdma_channel *sdmac)
674{
675 struct sdma_buffer_descriptor *bd;
676 int error = 0;
677 enum dma_status old_status = sdmac->status;
678
679
680
681
682
683 while (1) {
684 bd = &sdmac->bd[sdmac->buf_tail];
685
686 if (bd->mode.status & BD_DONE)
687 break;
688
689 if (bd->mode.status & BD_RROR) {
690 bd->mode.status &= ~BD_RROR;
691 sdmac->status = DMA_ERROR;
692 error = -EIO;
693 }
694
695
696
697
698
699
700 sdmac->chn_real_count = bd->mode.count;
701 bd->mode.status |= BD_DONE;
702 bd->mode.count = sdmac->period_len;
703
704
705
706
707
708
709
710
711 dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL);
712
713 sdmac->buf_tail++;
714 sdmac->buf_tail %= sdmac->num_bd;
715
716 if (error)
717 sdmac->status = old_status;
718 }
719}
720
721static void mxc_sdma_handle_channel_normal(unsigned long data)
722{
723 struct sdma_channel *sdmac = (struct sdma_channel *) data;
724 struct sdma_buffer_descriptor *bd;
725 int i, error = 0;
726
727 sdmac->chn_real_count = 0;
728
729
730
731
732 for (i = 0; i < sdmac->num_bd; i++) {
733 bd = &sdmac->bd[i];
734
735 if (bd->mode.status & (BD_DONE | BD_RROR))
736 error = -EIO;
737 sdmac->chn_real_count += bd->mode.count;
738 }
739
740 if (error)
741 sdmac->status = DMA_ERROR;
742 else
743 sdmac->status = DMA_COMPLETE;
744
745 dma_cookie_complete(&sdmac->desc);
746
747 dmaengine_desc_get_callback_invoke(&sdmac->desc, NULL);
748}
749
750static irqreturn_t sdma_int_handler(int irq, void *dev_id)
751{
752 struct sdma_engine *sdma = dev_id;
753 unsigned long stat;
754
755 stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
756 writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
757
758 stat &= ~1;
759
760 while (stat) {
761 int channel = fls(stat) - 1;
762 struct sdma_channel *sdmac = &sdma->channel[channel];
763
764 if (sdmac->flags & IMX_DMA_SG_LOOP)
765 sdma_update_channel_loop(sdmac);
766 else
767 tasklet_schedule(&sdmac->tasklet);
768
769 __clear_bit(channel, &stat);
770 }
771
772 return IRQ_HANDLED;
773}
774
775
776
777
778static void sdma_get_pc(struct sdma_channel *sdmac,
779 enum sdma_peripheral_type peripheral_type)
780{
781 struct sdma_engine *sdma = sdmac->sdma;
782 int per_2_emi = 0, emi_2_per = 0;
783
784
785
786
787 int per_2_per = 0;
788
789 sdmac->pc_from_device = 0;
790 sdmac->pc_to_device = 0;
791 sdmac->device_to_device = 0;
792
793 switch (peripheral_type) {
794 case IMX_DMATYPE_MEMORY:
795 break;
796 case IMX_DMATYPE_DSP:
797 emi_2_per = sdma->script_addrs->bp_2_ap_addr;
798 per_2_emi = sdma->script_addrs->ap_2_bp_addr;
799 break;
800 case IMX_DMATYPE_FIRI:
801 per_2_emi = sdma->script_addrs->firi_2_mcu_addr;
802 emi_2_per = sdma->script_addrs->mcu_2_firi_addr;
803 break;
804 case IMX_DMATYPE_UART:
805 per_2_emi = sdma->script_addrs->uart_2_mcu_addr;
806 emi_2_per = sdma->script_addrs->mcu_2_app_addr;
807 break;
808 case IMX_DMATYPE_UART_SP:
809 per_2_emi = sdma->script_addrs->uartsh_2_mcu_addr;
810 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
811 break;
812 case IMX_DMATYPE_ATA:
813 per_2_emi = sdma->script_addrs->ata_2_mcu_addr;
814 emi_2_per = sdma->script_addrs->mcu_2_ata_addr;
815 break;
816 case IMX_DMATYPE_CSPI:
817 case IMX_DMATYPE_EXT:
818 case IMX_DMATYPE_SSI:
819 case IMX_DMATYPE_SAI:
820 per_2_emi = sdma->script_addrs->app_2_mcu_addr;
821 emi_2_per = sdma->script_addrs->mcu_2_app_addr;
822 break;
823 case IMX_DMATYPE_SSI_DUAL:
824 per_2_emi = sdma->script_addrs->ssish_2_mcu_addr;
825 emi_2_per = sdma->script_addrs->mcu_2_ssish_addr;
826 break;
827 case IMX_DMATYPE_SSI_SP:
828 case IMX_DMATYPE_MMC:
829 case IMX_DMATYPE_SDHC:
830 case IMX_DMATYPE_CSPI_SP:
831 case IMX_DMATYPE_ESAI:
832 case IMX_DMATYPE_MSHC_SP:
833 per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
834 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
835 break;
836 case IMX_DMATYPE_ASRC:
837 per_2_emi = sdma->script_addrs->asrc_2_mcu_addr;
838 emi_2_per = sdma->script_addrs->asrc_2_mcu_addr;
839 per_2_per = sdma->script_addrs->per_2_per_addr;
840 break;
841 case IMX_DMATYPE_ASRC_SP:
842 per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
843 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
844 per_2_per = sdma->script_addrs->per_2_per_addr;
845 break;
846 case IMX_DMATYPE_MSHC:
847 per_2_emi = sdma->script_addrs->mshc_2_mcu_addr;
848 emi_2_per = sdma->script_addrs->mcu_2_mshc_addr;
849 break;
850 case IMX_DMATYPE_CCM:
851 per_2_emi = sdma->script_addrs->dptc_dvfs_addr;
852 break;
853 case IMX_DMATYPE_SPDIF:
854 per_2_emi = sdma->script_addrs->spdif_2_mcu_addr;
855 emi_2_per = sdma->script_addrs->mcu_2_spdif_addr;
856 break;
857 case IMX_DMATYPE_IPU_MEMORY:
858 emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr;
859 break;
860 default:
861 break;
862 }
863
864 sdmac->pc_from_device = per_2_emi;
865 sdmac->pc_to_device = emi_2_per;
866 sdmac->device_to_device = per_2_per;
867}
868
869static int sdma_load_context(struct sdma_channel *sdmac)
870{
871 struct sdma_engine *sdma = sdmac->sdma;
872 int channel = sdmac->channel;
873 int load_address;
874 struct sdma_context_data *context = sdma->context;
875 struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
876 int ret;
877 unsigned long flags;
878
879 if (sdmac->direction == DMA_DEV_TO_MEM)
880 load_address = sdmac->pc_from_device;
881 else if (sdmac->direction == DMA_DEV_TO_DEV)
882 load_address = sdmac->device_to_device;
883 else
884 load_address = sdmac->pc_to_device;
885
886 if (load_address < 0)
887 return load_address;
888
889 dev_dbg(sdma->dev, "load_address = %d\n", load_address);
890 dev_dbg(sdma->dev, "wml = 0x%08x\n", (u32)sdmac->watermark_level);
891 dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr);
892 dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr);
893 dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]);
894 dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]);
895
896 spin_lock_irqsave(&sdma->channel_0_lock, flags);
897
898 memset(context, 0, sizeof(*context));
899 context->channel_state.pc = load_address;
900
901
902
903
904 context->gReg[0] = sdmac->event_mask[1];
905 context->gReg[1] = sdmac->event_mask[0];
906 context->gReg[2] = sdmac->per_addr;
907 context->gReg[6] = sdmac->shp_addr;
908 context->gReg[7] = sdmac->watermark_level;
909
910 bd0->mode.command = C0_SETDM;
911 bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
912 bd0->mode.count = sizeof(*context) / 4;
913 bd0->buffer_addr = sdma->context_phys;
914 bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
915 ret = sdma_run_channel0(sdma);
916
917 spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
918
919 return ret;
920}
921
922static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
923{
924 return container_of(chan, struct sdma_channel, chan);
925}
926
927static int sdma_disable_channel(struct dma_chan *chan)
928{
929 struct sdma_channel *sdmac = to_sdma_chan(chan);
930 struct sdma_engine *sdma = sdmac->sdma;
931 int channel = sdmac->channel;
932
933 writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP);
934 sdmac->status = DMA_ERROR;
935
936 return 0;
937}
938
939static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
940{
941 struct sdma_engine *sdma = sdmac->sdma;
942
943 int lwml = sdmac->watermark_level & SDMA_WATERMARK_LEVEL_LWML;
944 int hwml = (sdmac->watermark_level & SDMA_WATERMARK_LEVEL_HWML) >> 16;
945
946 set_bit(sdmac->event_id0 % 32, &sdmac->event_mask[1]);
947 set_bit(sdmac->event_id1 % 32, &sdmac->event_mask[0]);
948
949 if (sdmac->event_id0 > 31)
950 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_LWE;
951
952 if (sdmac->event_id1 > 31)
953 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_HWE;
954
955
956
957
958
959
960 if (lwml > hwml) {
961 sdmac->watermark_level &= ~(SDMA_WATERMARK_LEVEL_LWML |
962 SDMA_WATERMARK_LEVEL_HWML);
963 sdmac->watermark_level |= hwml;
964 sdmac->watermark_level |= lwml << 16;
965 swap(sdmac->event_mask[0], sdmac->event_mask[1]);
966 }
967
968 if (sdmac->per_address2 >= sdma->spba_start_addr &&
969 sdmac->per_address2 <= sdma->spba_end_addr)
970 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SP;
971
972 if (sdmac->per_address >= sdma->spba_start_addr &&
973 sdmac->per_address <= sdma->spba_end_addr)
974 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DP;
975
976 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_CONT;
977}
978
979static int sdma_config_channel(struct dma_chan *chan)
980{
981 struct sdma_channel *sdmac = to_sdma_chan(chan);
982 int ret;
983
984 sdma_disable_channel(chan);
985
986 sdmac->event_mask[0] = 0;
987 sdmac->event_mask[1] = 0;
988 sdmac->shp_addr = 0;
989 sdmac->per_addr = 0;
990
991 if (sdmac->event_id0) {
992 if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
993 return -EINVAL;
994 sdma_event_enable(sdmac, sdmac->event_id0);
995 }
996
997 if (sdmac->event_id1) {
998 if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events)
999 return -EINVAL;
1000 sdma_event_enable(sdmac, sdmac->event_id1);
1001 }
1002
1003 switch (sdmac->peripheral_type) {
1004 case IMX_DMATYPE_DSP:
1005 sdma_config_ownership(sdmac, false, true, true);
1006 break;
1007 case IMX_DMATYPE_MEMORY:
1008 sdma_config_ownership(sdmac, false, true, false);
1009 break;
1010 default:
1011 sdma_config_ownership(sdmac, true, true, false);
1012 break;
1013 }
1014
1015 sdma_get_pc(sdmac, sdmac->peripheral_type);
1016
1017 if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) &&
1018 (sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
1019
1020 if (sdmac->event_id1) {
1021 if (sdmac->peripheral_type == IMX_DMATYPE_ASRC_SP ||
1022 sdmac->peripheral_type == IMX_DMATYPE_ASRC)
1023 sdma_set_watermarklevel_for_p2p(sdmac);
1024 } else
1025 __set_bit(sdmac->event_id0, sdmac->event_mask);
1026
1027
1028 sdmac->shp_addr = sdmac->per_address;
1029 sdmac->per_addr = sdmac->per_address2;
1030 } else {
1031 sdmac->watermark_level = 0;
1032 }
1033
1034 ret = sdma_load_context(sdmac);
1035
1036 return ret;
1037}
1038
1039static int sdma_set_channel_priority(struct sdma_channel *sdmac,
1040 unsigned int priority)
1041{
1042 struct sdma_engine *sdma = sdmac->sdma;
1043 int channel = sdmac->channel;
1044
1045 if (priority < MXC_SDMA_MIN_PRIORITY
1046 || priority > MXC_SDMA_MAX_PRIORITY) {
1047 return -EINVAL;
1048 }
1049
1050 writel_relaxed(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel);
1051
1052 return 0;
1053}
1054
1055static int sdma_request_channel(struct sdma_channel *sdmac)
1056{
1057 struct sdma_engine *sdma = sdmac->sdma;
1058 int channel = sdmac->channel;
1059 int ret = -EBUSY;
1060
1061 sdmac->bd = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys,
1062 GFP_KERNEL);
1063 if (!sdmac->bd) {
1064 ret = -ENOMEM;
1065 goto out;
1066 }
1067
1068 sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys;
1069 sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
1070
1071 sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY);
1072 return 0;
1073out:
1074
1075 return ret;
1076}
1077
1078static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
1079{
1080 unsigned long flags;
1081 struct sdma_channel *sdmac = to_sdma_chan(tx->chan);
1082 dma_cookie_t cookie;
1083
1084 spin_lock_irqsave(&sdmac->lock, flags);
1085
1086 cookie = dma_cookie_assign(tx);
1087
1088 spin_unlock_irqrestore(&sdmac->lock, flags);
1089
1090 return cookie;
1091}
1092
1093static int sdma_alloc_chan_resources(struct dma_chan *chan)
1094{
1095 struct sdma_channel *sdmac = to_sdma_chan(chan);
1096 struct imx_dma_data *data = chan->private;
1097 int prio, ret;
1098
1099 if (!data)
1100 return -EINVAL;
1101
1102 switch (data->priority) {
1103 case DMA_PRIO_HIGH:
1104 prio = 3;
1105 break;
1106 case DMA_PRIO_MEDIUM:
1107 prio = 2;
1108 break;
1109 case DMA_PRIO_LOW:
1110 default:
1111 prio = 1;
1112 break;
1113 }
1114
1115 sdmac->peripheral_type = data->peripheral_type;
1116 sdmac->event_id0 = data->dma_request;
1117 sdmac->event_id1 = data->dma_request2;
1118
1119 ret = clk_enable(sdmac->sdma->clk_ipg);
1120 if (ret)
1121 return ret;
1122 ret = clk_enable(sdmac->sdma->clk_ahb);
1123 if (ret)
1124 goto disable_clk_ipg;
1125
1126 ret = sdma_request_channel(sdmac);
1127 if (ret)
1128 goto disable_clk_ahb;
1129
1130 ret = sdma_set_channel_priority(sdmac, prio);
1131 if (ret)
1132 goto disable_clk_ahb;
1133
1134 dma_async_tx_descriptor_init(&sdmac->desc, chan);
1135 sdmac->desc.tx_submit = sdma_tx_submit;
1136
1137 sdmac->desc.flags = DMA_CTRL_ACK;
1138
1139 return 0;
1140
1141disable_clk_ahb:
1142 clk_disable(sdmac->sdma->clk_ahb);
1143disable_clk_ipg:
1144 clk_disable(sdmac->sdma->clk_ipg);
1145 return ret;
1146}
1147
1148static void sdma_free_chan_resources(struct dma_chan *chan)
1149{
1150 struct sdma_channel *sdmac = to_sdma_chan(chan);
1151 struct sdma_engine *sdma = sdmac->sdma;
1152
1153 sdma_disable_channel(chan);
1154
1155 if (sdmac->event_id0)
1156 sdma_event_disable(sdmac, sdmac->event_id0);
1157 if (sdmac->event_id1)
1158 sdma_event_disable(sdmac, sdmac->event_id1);
1159
1160 sdmac->event_id0 = 0;
1161 sdmac->event_id1 = 0;
1162
1163 sdma_set_channel_priority(sdmac, 0);
1164
1165 dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys);
1166
1167 clk_disable(sdma->clk_ipg);
1168 clk_disable(sdma->clk_ahb);
1169}
1170
1171static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
1172 struct dma_chan *chan, struct scatterlist *sgl,
1173 unsigned int sg_len, enum dma_transfer_direction direction,
1174 unsigned long flags, void *context)
1175{
1176 struct sdma_channel *sdmac = to_sdma_chan(chan);
1177 struct sdma_engine *sdma = sdmac->sdma;
1178 int ret, i, count;
1179 int channel = sdmac->channel;
1180 struct scatterlist *sg;
1181
1182 if (sdmac->status == DMA_IN_PROGRESS)
1183 return NULL;
1184 sdmac->status = DMA_IN_PROGRESS;
1185
1186 sdmac->flags = 0;
1187
1188 sdmac->buf_tail = 0;
1189
1190 dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
1191 sg_len, channel);
1192
1193 sdmac->direction = direction;
1194 ret = sdma_load_context(sdmac);
1195 if (ret)
1196 goto err_out;
1197
1198 if (sg_len > NUM_BD) {
1199 dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
1200 channel, sg_len, NUM_BD);
1201 ret = -EINVAL;
1202 goto err_out;
1203 }
1204
1205 sdmac->chn_count = 0;
1206 for_each_sg(sgl, sg, sg_len, i) {
1207 struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
1208 int param;
1209
1210 bd->buffer_addr = sg->dma_address;
1211
1212 count = sg_dma_len(sg);
1213
1214 if (count > 0xffff) {
1215 dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
1216 channel, count, 0xffff);
1217 ret = -EINVAL;
1218 goto err_out;
1219 }
1220
1221 bd->mode.count = count;
1222 sdmac->chn_count += count;
1223
1224 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
1225 ret = -EINVAL;
1226 goto err_out;
1227 }
1228
1229 switch (sdmac->word_size) {
1230 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1231 bd->mode.command = 0;
1232 if (count & 3 || sg->dma_address & 3)
1233 return NULL;
1234 break;
1235 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1236 bd->mode.command = 2;
1237 if (count & 1 || sg->dma_address & 1)
1238 return NULL;
1239 break;
1240 case DMA_SLAVE_BUSWIDTH_1_BYTE:
1241 bd->mode.command = 1;
1242 break;
1243 default:
1244 return NULL;
1245 }
1246
1247 param = BD_DONE | BD_EXTD | BD_CONT;
1248
1249 if (i + 1 == sg_len) {
1250 param |= BD_INTR;
1251 param |= BD_LAST;
1252 param &= ~BD_CONT;
1253 }
1254
1255 dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n",
1256 i, count, (u64)sg->dma_address,
1257 param & BD_WRAP ? "wrap" : "",
1258 param & BD_INTR ? " intr" : "");
1259
1260 bd->mode.status = param;
1261 }
1262
1263 sdmac->num_bd = sg_len;
1264 sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
1265
1266 return &sdmac->desc;
1267err_out:
1268 sdmac->status = DMA_ERROR;
1269 return NULL;
1270}
1271
1272static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
1273 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
1274 size_t period_len, enum dma_transfer_direction direction,
1275 unsigned long flags)
1276{
1277 struct sdma_channel *sdmac = to_sdma_chan(chan);
1278 struct sdma_engine *sdma = sdmac->sdma;
1279 int num_periods = buf_len / period_len;
1280 int channel = sdmac->channel;
1281 int ret, i = 0, buf = 0;
1282
1283 dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
1284
1285 if (sdmac->status == DMA_IN_PROGRESS)
1286 return NULL;
1287
1288 sdmac->status = DMA_IN_PROGRESS;
1289
1290 sdmac->buf_tail = 0;
1291 sdmac->period_len = period_len;
1292
1293 sdmac->flags |= IMX_DMA_SG_LOOP;
1294 sdmac->direction = direction;
1295 ret = sdma_load_context(sdmac);
1296 if (ret)
1297 goto err_out;
1298
1299 if (num_periods > NUM_BD) {
1300 dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
1301 channel, num_periods, NUM_BD);
1302 goto err_out;
1303 }
1304
1305 if (period_len > 0xffff) {
1306 dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %d > %d\n",
1307 channel, period_len, 0xffff);
1308 goto err_out;
1309 }
1310
1311 while (buf < buf_len) {
1312 struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
1313 int param;
1314
1315 bd->buffer_addr = dma_addr;
1316
1317 bd->mode.count = period_len;
1318
1319 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
1320 goto err_out;
1321 if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
1322 bd->mode.command = 0;
1323 else
1324 bd->mode.command = sdmac->word_size;
1325
1326 param = BD_DONE | BD_EXTD | BD_CONT | BD_INTR;
1327 if (i + 1 == num_periods)
1328 param |= BD_WRAP;
1329
1330 dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n",
1331 i, period_len, (u64)dma_addr,
1332 param & BD_WRAP ? "wrap" : "",
1333 param & BD_INTR ? " intr" : "");
1334
1335 bd->mode.status = param;
1336
1337 dma_addr += period_len;
1338 buf += period_len;
1339
1340 i++;
1341 }
1342
1343 sdmac->num_bd = num_periods;
1344 sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
1345
1346 return &sdmac->desc;
1347err_out:
1348 sdmac->status = DMA_ERROR;
1349 return NULL;
1350}
1351
1352static int sdma_config(struct dma_chan *chan,
1353 struct dma_slave_config *dmaengine_cfg)
1354{
1355 struct sdma_channel *sdmac = to_sdma_chan(chan);
1356
1357 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
1358 sdmac->per_address = dmaengine_cfg->src_addr;
1359 sdmac->watermark_level = dmaengine_cfg->src_maxburst *
1360 dmaengine_cfg->src_addr_width;
1361 sdmac->word_size = dmaengine_cfg->src_addr_width;
1362 } else if (dmaengine_cfg->direction == DMA_DEV_TO_DEV) {
1363 sdmac->per_address2 = dmaengine_cfg->src_addr;
1364 sdmac->per_address = dmaengine_cfg->dst_addr;
1365 sdmac->watermark_level = dmaengine_cfg->src_maxburst &
1366 SDMA_WATERMARK_LEVEL_LWML;
1367 sdmac->watermark_level |= (dmaengine_cfg->dst_maxburst << 16) &
1368 SDMA_WATERMARK_LEVEL_HWML;
1369 sdmac->word_size = dmaengine_cfg->dst_addr_width;
1370 } else {
1371 sdmac->per_address = dmaengine_cfg->dst_addr;
1372 sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
1373 dmaengine_cfg->dst_addr_width;
1374 sdmac->word_size = dmaengine_cfg->dst_addr_width;
1375 }
1376 sdmac->direction = dmaengine_cfg->direction;
1377 return sdma_config_channel(chan);
1378}
1379
1380static enum dma_status sdma_tx_status(struct dma_chan *chan,
1381 dma_cookie_t cookie,
1382 struct dma_tx_state *txstate)
1383{
1384 struct sdma_channel *sdmac = to_sdma_chan(chan);
1385 u32 residue;
1386
1387 if (sdmac->flags & IMX_DMA_SG_LOOP)
1388 residue = (sdmac->num_bd - sdmac->buf_tail) *
1389 sdmac->period_len - sdmac->chn_real_count;
1390 else
1391 residue = sdmac->chn_count - sdmac->chn_real_count;
1392
1393 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
1394 residue);
1395
1396 return sdmac->status;
1397}
1398
1399static void sdma_issue_pending(struct dma_chan *chan)
1400{
1401 struct sdma_channel *sdmac = to_sdma_chan(chan);
1402 struct sdma_engine *sdma = sdmac->sdma;
1403
1404 if (sdmac->status == DMA_IN_PROGRESS)
1405 sdma_enable_channel(sdma, sdmac->channel);
1406}
1407
1408#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34
1409#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38
1410#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3 41
1411#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4 42
1412
1413static void sdma_add_scripts(struct sdma_engine *sdma,
1414 const struct sdma_script_start_addrs *addr)
1415{
1416 s32 *addr_arr = (u32 *)addr;
1417 s32 *saddr_arr = (u32 *)sdma->script_addrs;
1418 int i;
1419
1420
1421 if (!sdma->script_number)
1422 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
1423
1424 for (i = 0; i < sdma->script_number; i++)
1425 if (addr_arr[i] > 0)
1426 saddr_arr[i] = addr_arr[i];
1427}
1428
1429static void sdma_load_firmware(const struct firmware *fw, void *context)
1430{
1431 struct sdma_engine *sdma = context;
1432 const struct sdma_firmware_header *header;
1433 const struct sdma_script_start_addrs *addr;
1434 unsigned short *ram_code;
1435
1436 if (!fw) {
1437 dev_info(sdma->dev, "external firmware not found, using ROM firmware\n");
1438
1439 return;
1440 }
1441
1442 if (fw->size < sizeof(*header))
1443 goto err_firmware;
1444
1445 header = (struct sdma_firmware_header *)fw->data;
1446
1447 if (header->magic != SDMA_FIRMWARE_MAGIC)
1448 goto err_firmware;
1449 if (header->ram_code_start + header->ram_code_size > fw->size)
1450 goto err_firmware;
1451 switch (header->version_major) {
1452 case 1:
1453 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
1454 break;
1455 case 2:
1456 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2;
1457 break;
1458 case 3:
1459 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3;
1460 break;
1461 case 4:
1462 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4;
1463 break;
1464 default:
1465 dev_err(sdma->dev, "unknown firmware version\n");
1466 goto err_firmware;
1467 }
1468
1469 addr = (void *)header + header->script_addrs_start;
1470 ram_code = (void *)header + header->ram_code_start;
1471
1472 clk_enable(sdma->clk_ipg);
1473 clk_enable(sdma->clk_ahb);
1474
1475 sdma_load_script(sdma, ram_code,
1476 header->ram_code_size,
1477 addr->ram_code_start_addr);
1478 clk_disable(sdma->clk_ipg);
1479 clk_disable(sdma->clk_ahb);
1480
1481 sdma_add_scripts(sdma, addr);
1482
1483 dev_info(sdma->dev, "loaded firmware %d.%d\n",
1484 header->version_major,
1485 header->version_minor);
1486
1487err_firmware:
1488 release_firmware(fw);
1489}
1490
1491#define EVENT_REMAP_CELLS 3
1492
1493static int sdma_event_remap(struct sdma_engine *sdma)
1494{
1495 struct device_node *np = sdma->dev->of_node;
1496 struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0);
1497 struct property *event_remap;
1498 struct regmap *gpr;
1499 char propname[] = "fsl,sdma-event-remap";
1500 u32 reg, val, shift, num_map, i;
1501 int ret = 0;
1502
1503 if (IS_ERR(np) || IS_ERR(gpr_np))
1504 goto out;
1505
1506 event_remap = of_find_property(np, propname, NULL);
1507 num_map = event_remap ? (event_remap->length / sizeof(u32)) : 0;
1508 if (!num_map) {
1509 dev_dbg(sdma->dev, "no event needs to be remapped\n");
1510 goto out;
1511 } else if (num_map % EVENT_REMAP_CELLS) {
1512 dev_err(sdma->dev, "the property %s must modulo %d\n",
1513 propname, EVENT_REMAP_CELLS);
1514 ret = -EINVAL;
1515 goto out;
1516 }
1517
1518 gpr = syscon_node_to_regmap(gpr_np);
1519 if (IS_ERR(gpr)) {
1520 dev_err(sdma->dev, "failed to get gpr regmap\n");
1521 ret = PTR_ERR(gpr);
1522 goto out;
1523 }
1524
1525 for (i = 0; i < num_map; i += EVENT_REMAP_CELLS) {
1526 ret = of_property_read_u32_index(np, propname, i, ®);
1527 if (ret) {
1528 dev_err(sdma->dev, "failed to read property %s index %d\n",
1529 propname, i);
1530 goto out;
1531 }
1532
1533 ret = of_property_read_u32_index(np, propname, i + 1, &shift);
1534 if (ret) {
1535 dev_err(sdma->dev, "failed to read property %s index %d\n",
1536 propname, i + 1);
1537 goto out;
1538 }
1539
1540 ret = of_property_read_u32_index(np, propname, i + 2, &val);
1541 if (ret) {
1542 dev_err(sdma->dev, "failed to read property %s index %d\n",
1543 propname, i + 2);
1544 goto out;
1545 }
1546
1547 regmap_update_bits(gpr, reg, BIT(shift), val << shift);
1548 }
1549
1550out:
1551 if (!IS_ERR(gpr_np))
1552 of_node_put(gpr_np);
1553
1554 return ret;
1555}
1556
1557static int sdma_get_firmware(struct sdma_engine *sdma,
1558 const char *fw_name)
1559{
1560 int ret;
1561
1562 ret = request_firmware_nowait(THIS_MODULE,
1563 FW_ACTION_HOTPLUG, fw_name, sdma->dev,
1564 GFP_KERNEL, sdma, sdma_load_firmware);
1565
1566 return ret;
1567}
1568
1569static int sdma_init(struct sdma_engine *sdma)
1570{
1571 int i, ret;
1572 dma_addr_t ccb_phys;
1573
1574 ret = clk_enable(sdma->clk_ipg);
1575 if (ret)
1576 return ret;
1577 ret = clk_enable(sdma->clk_ahb);
1578 if (ret)
1579 goto disable_clk_ipg;
1580
1581
1582 writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
1583
1584 sdma->channel_control = dma_alloc_coherent(NULL,
1585 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) +
1586 sizeof(struct sdma_context_data),
1587 &ccb_phys, GFP_KERNEL);
1588
1589 if (!sdma->channel_control) {
1590 ret = -ENOMEM;
1591 goto err_dma_alloc;
1592 }
1593
1594 sdma->context = (void *)sdma->channel_control +
1595 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1596 sdma->context_phys = ccb_phys +
1597 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1598
1599
1600 memset(sdma->channel_control, 0,
1601 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control));
1602
1603
1604 for (i = 0; i < sdma->drvdata->num_events; i++)
1605 writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i));
1606
1607
1608 for (i = 0; i < MAX_DMA_CHANNELS; i++)
1609 writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
1610
1611 ret = sdma_request_channel(&sdma->channel[0]);
1612 if (ret)
1613 goto err_dma_alloc;
1614
1615 sdma_config_ownership(&sdma->channel[0], false, true, false);
1616
1617
1618 writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR);
1619
1620
1621
1622 writel_relaxed(0, sdma->regs + SDMA_H_CONFIG);
1623
1624 writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR);
1625
1626
1627 sdma_set_channel_priority(&sdma->channel[0], 7);
1628
1629 clk_disable(sdma->clk_ipg);
1630 clk_disable(sdma->clk_ahb);
1631
1632 return 0;
1633
1634err_dma_alloc:
1635 clk_disable(sdma->clk_ahb);
1636disable_clk_ipg:
1637 clk_disable(sdma->clk_ipg);
1638 dev_err(sdma->dev, "initialisation failed with %d\n", ret);
1639 return ret;
1640}
1641
1642static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param)
1643{
1644 struct sdma_channel *sdmac = to_sdma_chan(chan);
1645 struct imx_dma_data *data = fn_param;
1646
1647 if (!imx_dma_is_general_purpose(chan))
1648 return false;
1649
1650 sdmac->data = *data;
1651 chan->private = &sdmac->data;
1652
1653 return true;
1654}
1655
1656static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec,
1657 struct of_dma *ofdma)
1658{
1659 struct sdma_engine *sdma = ofdma->of_dma_data;
1660 dma_cap_mask_t mask = sdma->dma_device.cap_mask;
1661 struct imx_dma_data data;
1662
1663 if (dma_spec->args_count != 3)
1664 return NULL;
1665
1666 data.dma_request = dma_spec->args[0];
1667 data.peripheral_type = dma_spec->args[1];
1668 data.priority = dma_spec->args[2];
1669
1670
1671
1672
1673
1674
1675
1676 data.dma_request2 = 0;
1677
1678 return dma_request_channel(mask, sdma_filter_fn, &data);
1679}
1680
1681static int sdma_probe(struct platform_device *pdev)
1682{
1683 const struct of_device_id *of_id =
1684 of_match_device(sdma_dt_ids, &pdev->dev);
1685 struct device_node *np = pdev->dev.of_node;
1686 struct device_node *spba_bus;
1687 const char *fw_name;
1688 int ret;
1689 int irq;
1690 struct resource *iores;
1691 struct resource spba_res;
1692 struct sdma_platform_data *pdata = dev_get_platdata(&pdev->dev);
1693 int i;
1694 struct sdma_engine *sdma;
1695 s32 *saddr_arr;
1696 const struct sdma_driver_data *drvdata = NULL;
1697
1698 if (of_id)
1699 drvdata = of_id->data;
1700 else if (pdev->id_entry)
1701 drvdata = (void *)pdev->id_entry->driver_data;
1702
1703 if (!drvdata) {
1704 dev_err(&pdev->dev, "unable to find driver data\n");
1705 return -EINVAL;
1706 }
1707
1708 ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1709 if (ret)
1710 return ret;
1711
1712 sdma = devm_kzalloc(&pdev->dev, sizeof(*sdma), GFP_KERNEL);
1713 if (!sdma)
1714 return -ENOMEM;
1715
1716 spin_lock_init(&sdma->channel_0_lock);
1717
1718 sdma->dev = &pdev->dev;
1719 sdma->drvdata = drvdata;
1720
1721 irq = platform_get_irq(pdev, 0);
1722 if (irq < 0)
1723 return irq;
1724
1725 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1726 sdma->regs = devm_ioremap_resource(&pdev->dev, iores);
1727 if (IS_ERR(sdma->regs))
1728 return PTR_ERR(sdma->regs);
1729
1730 sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1731 if (IS_ERR(sdma->clk_ipg))
1732 return PTR_ERR(sdma->clk_ipg);
1733
1734 sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1735 if (IS_ERR(sdma->clk_ahb))
1736 return PTR_ERR(sdma->clk_ahb);
1737
1738 clk_prepare(sdma->clk_ipg);
1739 clk_prepare(sdma->clk_ahb);
1740
1741 ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma",
1742 sdma);
1743 if (ret)
1744 return ret;
1745
1746 sdma->irq = irq;
1747
1748 sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
1749 if (!sdma->script_addrs)
1750 return -ENOMEM;
1751
1752
1753 saddr_arr = (s32 *)sdma->script_addrs;
1754 for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
1755 saddr_arr[i] = -EINVAL;
1756
1757 dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
1758 dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
1759
1760 INIT_LIST_HEAD(&sdma->dma_device.channels);
1761
1762 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
1763 struct sdma_channel *sdmac = &sdma->channel[i];
1764
1765 sdmac->sdma = sdma;
1766 spin_lock_init(&sdmac->lock);
1767
1768 sdmac->chan.device = &sdma->dma_device;
1769 dma_cookie_init(&sdmac->chan);
1770 sdmac->channel = i;
1771
1772 tasklet_init(&sdmac->tasklet, mxc_sdma_handle_channel_normal,
1773 (unsigned long) sdmac);
1774
1775
1776
1777
1778
1779 if (i)
1780 list_add_tail(&sdmac->chan.device_node,
1781 &sdma->dma_device.channels);
1782 }
1783
1784 ret = sdma_init(sdma);
1785 if (ret)
1786 goto err_init;
1787
1788 ret = sdma_event_remap(sdma);
1789 if (ret)
1790 goto err_init;
1791
1792 if (sdma->drvdata->script_addrs)
1793 sdma_add_scripts(sdma, sdma->drvdata->script_addrs);
1794 if (pdata && pdata->script_addrs)
1795 sdma_add_scripts(sdma, pdata->script_addrs);
1796
1797 if (pdata) {
1798 ret = sdma_get_firmware(sdma, pdata->fw_name);
1799 if (ret)
1800 dev_warn(&pdev->dev, "failed to get firmware from platform data\n");
1801 } else {
1802
1803
1804
1805
1806
1807 ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
1808 &fw_name);
1809 if (ret)
1810 dev_warn(&pdev->dev, "failed to get firmware name\n");
1811 else {
1812 ret = sdma_get_firmware(sdma, fw_name);
1813 if (ret)
1814 dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
1815 }
1816 }
1817
1818 sdma->dma_device.dev = &pdev->dev;
1819
1820 sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
1821 sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources;
1822 sdma->dma_device.device_tx_status = sdma_tx_status;
1823 sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
1824 sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
1825 sdma->dma_device.device_config = sdma_config;
1826 sdma->dma_device.device_terminate_all = sdma_disable_channel;
1827 sdma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1828 sdma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1829 sdma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1830 sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1831 sdma->dma_device.device_issue_pending = sdma_issue_pending;
1832 sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
1833 dma_set_max_seg_size(sdma->dma_device.dev, 65535);
1834
1835 platform_set_drvdata(pdev, sdma);
1836
1837 ret = dma_async_device_register(&sdma->dma_device);
1838 if (ret) {
1839 dev_err(&pdev->dev, "unable to register\n");
1840 goto err_init;
1841 }
1842
1843 if (np) {
1844 ret = of_dma_controller_register(np, sdma_xlate, sdma);
1845 if (ret) {
1846 dev_err(&pdev->dev, "failed to register controller\n");
1847 goto err_register;
1848 }
1849
1850 spba_bus = of_find_compatible_node(NULL, NULL, "fsl,spba-bus");
1851 ret = of_address_to_resource(spba_bus, 0, &spba_res);
1852 if (!ret) {
1853 sdma->spba_start_addr = spba_res.start;
1854 sdma->spba_end_addr = spba_res.end;
1855 }
1856 of_node_put(spba_bus);
1857 }
1858
1859 return 0;
1860
1861err_register:
1862 dma_async_device_unregister(&sdma->dma_device);
1863err_init:
1864 kfree(sdma->script_addrs);
1865 return ret;
1866}
1867
1868static int sdma_remove(struct platform_device *pdev)
1869{
1870 struct sdma_engine *sdma = platform_get_drvdata(pdev);
1871 int i;
1872
1873 devm_free_irq(&pdev->dev, sdma->irq, sdma);
1874 dma_async_device_unregister(&sdma->dma_device);
1875 kfree(sdma->script_addrs);
1876
1877 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
1878 struct sdma_channel *sdmac = &sdma->channel[i];
1879
1880 tasklet_kill(&sdmac->tasklet);
1881 }
1882
1883 platform_set_drvdata(pdev, NULL);
1884 return 0;
1885}
1886
1887static struct platform_driver sdma_driver = {
1888 .driver = {
1889 .name = "imx-sdma",
1890 .of_match_table = sdma_dt_ids,
1891 },
1892 .id_table = sdma_devtypes,
1893 .remove = sdma_remove,
1894 .probe = sdma_probe,
1895};
1896
1897module_platform_driver(sdma_driver);
1898
1899MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
1900MODULE_DESCRIPTION("i.MX SDMA driver");
1901MODULE_LICENSE("GPL");
1902