1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/init.h>
21#include <linux/module.h>
22#include <linux/types.h>
23#include <linux/bitops.h>
24#include <linux/mm.h>
25#include <linux/interrupt.h>
26#include <linux/clk.h>
27#include <linux/delay.h>
28#include <linux/sched.h>
29#include <linux/semaphore.h>
30#include <linux/spinlock.h>
31#include <linux/device.h>
32#include <linux/dma-mapping.h>
33#include <linux/firmware.h>
34#include <linux/slab.h>
35#include <linux/platform_device.h>
36#include <linux/dmaengine.h>
37#include <linux/of.h>
38#include <linux/of_address.h>
39#include <linux/of_device.h>
40#include <linux/of_dma.h>
41
42#include <asm/irq.h>
43#include <linux/platform_data/dma-imx-sdma.h>
44#include <linux/platform_data/dma-imx.h>
45#include <linux/regmap.h>
46#include <linux/mfd/syscon.h>
47#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
48
49#include "dmaengine.h"
50
51
52#define SDMA_H_C0PTR 0x000
53#define SDMA_H_INTR 0x004
54#define SDMA_H_STATSTOP 0x008
55#define SDMA_H_START 0x00c
56#define SDMA_H_EVTOVR 0x010
57#define SDMA_H_DSPOVR 0x014
58#define SDMA_H_HOSTOVR 0x018
59#define SDMA_H_EVTPEND 0x01c
60#define SDMA_H_DSPENBL 0x020
61#define SDMA_H_RESET 0x024
62#define SDMA_H_EVTERR 0x028
63#define SDMA_H_INTRMSK 0x02c
64#define SDMA_H_PSW 0x030
65#define SDMA_H_EVTERRDBG 0x034
66#define SDMA_H_CONFIG 0x038
67#define SDMA_ONCE_ENB 0x040
68#define SDMA_ONCE_DATA 0x044
69#define SDMA_ONCE_INSTR 0x048
70#define SDMA_ONCE_STAT 0x04c
71#define SDMA_ONCE_CMD 0x050
72#define SDMA_EVT_MIRROR 0x054
73#define SDMA_ILLINSTADDR 0x058
74#define SDMA_CHN0ADDR 0x05c
75#define SDMA_ONCE_RTB 0x060
76#define SDMA_XTRIG_CONF1 0x070
77#define SDMA_XTRIG_CONF2 0x074
78#define SDMA_CHNENBL0_IMX35 0x200
79#define SDMA_CHNENBL0_IMX31 0x080
80#define SDMA_CHNPRI_0 0x100
81
82
83
84
85#define BD_DONE 0x01
86#define BD_WRAP 0x02
87#define BD_CONT 0x04
88#define BD_INTR 0x08
89#define BD_RROR 0x10
90#define BD_LAST 0x20
91#define BD_EXTD 0x80
92
93
94
95
96#define DND_END_OF_FRAME 0x80
97#define DND_END_OF_XFER 0x40
98#define DND_DONE 0x20
99#define DND_UNUSED 0x01
100
101
102
103
104#define BD_IPCV2_END_OF_FRAME 0x40
105
106#define IPCV2_MAX_NODES 50
107
108
109
110
111#define DATA_ERROR 0x10000000
112
113
114
115
116#define C0_ADDR 0x01
117#define C0_LOAD 0x02
118#define C0_DUMP 0x03
119#define C0_SETCTX 0x07
120#define C0_GETCTX 0x03
121#define C0_SETDM 0x01
122#define C0_SETPM 0x04
123#define C0_GETDM 0x02
124#define C0_GETPM 0x08
125
126
127
128#define CHANGE_ENDIANNESS 0x80
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169#define SDMA_WATERMARK_LEVEL_LWML 0xFF
170#define SDMA_WATERMARK_LEVEL_PS BIT(8)
171#define SDMA_WATERMARK_LEVEL_PA BIT(9)
172#define SDMA_WATERMARK_LEVEL_SPDIF BIT(10)
173#define SDMA_WATERMARK_LEVEL_SP BIT(11)
174#define SDMA_WATERMARK_LEVEL_DP BIT(12)
175#define SDMA_WATERMARK_LEVEL_HWML (0xFF << 16)
176#define SDMA_WATERMARK_LEVEL_LWE BIT(28)
177#define SDMA_WATERMARK_LEVEL_HWE BIT(29)
178#define SDMA_WATERMARK_LEVEL_CONT BIT(31)
179
180
181
182
183struct sdma_mode_count {
184 u32 count : 16;
185 u32 status : 8;
186 u32 command : 8;
187};
188
189
190
191
192struct sdma_buffer_descriptor {
193 struct sdma_mode_count mode;
194 u32 buffer_addr;
195 u32 ext_buffer_addr;
196} __attribute__ ((packed));
197
198
199
200
201
202
203
204
205
206struct sdma_channel_control {
207 u32 current_bd_ptr;
208 u32 base_bd_ptr;
209 u32 unused[2];
210} __attribute__ ((packed));
211
212
213
214
215
216
217
218
219
220
221
222
223
224struct sdma_state_registers {
225 u32 pc :14;
226 u32 unused1: 1;
227 u32 t : 1;
228 u32 rpc :14;
229 u32 unused0: 1;
230 u32 sf : 1;
231 u32 spc :14;
232 u32 unused2: 1;
233 u32 df : 1;
234 u32 epc :14;
235 u32 lm : 2;
236} __attribute__ ((packed));
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258struct sdma_context_data {
259 struct sdma_state_registers channel_state;
260 u32 gReg[8];
261 u32 mda;
262 u32 msa;
263 u32 ms;
264 u32 md;
265 u32 pda;
266 u32 psa;
267 u32 ps;
268 u32 pd;
269 u32 ca;
270 u32 cs;
271 u32 dda;
272 u32 dsa;
273 u32 ds;
274 u32 dd;
275 u32 scratch0;
276 u32 scratch1;
277 u32 scratch2;
278 u32 scratch3;
279 u32 scratch4;
280 u32 scratch5;
281 u32 scratch6;
282 u32 scratch7;
283} __attribute__ ((packed));
284
285#define NUM_BD (int)(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor))
286
287struct sdma_engine;
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302struct sdma_channel {
303 struct sdma_engine *sdma;
304 unsigned int channel;
305 enum dma_transfer_direction direction;
306 enum sdma_peripheral_type peripheral_type;
307 unsigned int event_id0;
308 unsigned int event_id1;
309 enum dma_slave_buswidth word_size;
310 unsigned int buf_tail;
311 unsigned int num_bd;
312 unsigned int period_len;
313 struct sdma_buffer_descriptor *bd;
314 dma_addr_t bd_phys;
315 unsigned int pc_from_device, pc_to_device;
316 unsigned int device_to_device;
317 unsigned long flags;
318 dma_addr_t per_address, per_address2;
319 unsigned long event_mask[2];
320 unsigned long watermark_level;
321 u32 shp_addr, per_addr;
322 struct dma_chan chan;
323 spinlock_t lock;
324 struct dma_async_tx_descriptor desc;
325 enum dma_status status;
326 unsigned int chn_count;
327 unsigned int chn_real_count;
328 struct tasklet_struct tasklet;
329 struct imx_dma_data data;
330};
331
332#define IMX_DMA_SG_LOOP BIT(0)
333
334#define MAX_DMA_CHANNELS 32
335#define MXC_SDMA_DEFAULT_PRIORITY 1
336#define MXC_SDMA_MIN_PRIORITY 1
337#define MXC_SDMA_MAX_PRIORITY 7
338
339#define SDMA_FIRMWARE_MAGIC 0x414d4453
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355struct sdma_firmware_header {
356 u32 magic;
357 u32 version_major;
358 u32 version_minor;
359 u32 script_addrs_start;
360 u32 num_script_addrs;
361 u32 ram_code_start;
362 u32 ram_code_size;
363};
364
365struct sdma_driver_data {
366 int chnenbl0;
367 int num_events;
368 struct sdma_script_start_addrs *script_addrs;
369};
370
371struct sdma_engine {
372 struct device *dev;
373 struct device_dma_parameters dma_parms;
374 struct sdma_channel channel[MAX_DMA_CHANNELS];
375 struct sdma_channel_control *channel_control;
376 void __iomem *regs;
377 struct sdma_context_data *context;
378 dma_addr_t context_phys;
379 struct dma_device dma_device;
380 struct clk *clk_ipg;
381 struct clk *clk_ahb;
382 spinlock_t channel_0_lock;
383 u32 script_number;
384 struct sdma_script_start_addrs *script_addrs;
385 const struct sdma_driver_data *drvdata;
386 u32 spba_start_addr;
387 u32 spba_end_addr;
388};
389
390static struct sdma_driver_data sdma_imx31 = {
391 .chnenbl0 = SDMA_CHNENBL0_IMX31,
392 .num_events = 32,
393};
394
395static struct sdma_script_start_addrs sdma_script_imx25 = {
396 .ap_2_ap_addr = 729,
397 .uart_2_mcu_addr = 904,
398 .per_2_app_addr = 1255,
399 .mcu_2_app_addr = 834,
400 .uartsh_2_mcu_addr = 1120,
401 .per_2_shp_addr = 1329,
402 .mcu_2_shp_addr = 1048,
403 .ata_2_mcu_addr = 1560,
404 .mcu_2_ata_addr = 1479,
405 .app_2_per_addr = 1189,
406 .app_2_mcu_addr = 770,
407 .shp_2_per_addr = 1407,
408 .shp_2_mcu_addr = 979,
409};
410
411static struct sdma_driver_data sdma_imx25 = {
412 .chnenbl0 = SDMA_CHNENBL0_IMX35,
413 .num_events = 48,
414 .script_addrs = &sdma_script_imx25,
415};
416
417static struct sdma_driver_data sdma_imx35 = {
418 .chnenbl0 = SDMA_CHNENBL0_IMX35,
419 .num_events = 48,
420};
421
422static struct sdma_script_start_addrs sdma_script_imx51 = {
423 .ap_2_ap_addr = 642,
424 .uart_2_mcu_addr = 817,
425 .mcu_2_app_addr = 747,
426 .mcu_2_shp_addr = 961,
427 .ata_2_mcu_addr = 1473,
428 .mcu_2_ata_addr = 1392,
429 .app_2_per_addr = 1033,
430 .app_2_mcu_addr = 683,
431 .shp_2_per_addr = 1251,
432 .shp_2_mcu_addr = 892,
433};
434
435static struct sdma_driver_data sdma_imx51 = {
436 .chnenbl0 = SDMA_CHNENBL0_IMX35,
437 .num_events = 48,
438 .script_addrs = &sdma_script_imx51,
439};
440
441static struct sdma_script_start_addrs sdma_script_imx53 = {
442 .ap_2_ap_addr = 642,
443 .app_2_mcu_addr = 683,
444 .mcu_2_app_addr = 747,
445 .uart_2_mcu_addr = 817,
446 .shp_2_mcu_addr = 891,
447 .mcu_2_shp_addr = 960,
448 .uartsh_2_mcu_addr = 1032,
449 .spdif_2_mcu_addr = 1100,
450 .mcu_2_spdif_addr = 1134,
451 .firi_2_mcu_addr = 1193,
452 .mcu_2_firi_addr = 1290,
453};
454
455static struct sdma_driver_data sdma_imx53 = {
456 .chnenbl0 = SDMA_CHNENBL0_IMX35,
457 .num_events = 48,
458 .script_addrs = &sdma_script_imx53,
459};
460
461static struct sdma_script_start_addrs sdma_script_imx6q = {
462 .ap_2_ap_addr = 642,
463 .uart_2_mcu_addr = 817,
464 .mcu_2_app_addr = 747,
465 .per_2_per_addr = 6331,
466 .uartsh_2_mcu_addr = 1032,
467 .mcu_2_shp_addr = 960,
468 .app_2_mcu_addr = 683,
469 .shp_2_mcu_addr = 891,
470 .spdif_2_mcu_addr = 1100,
471 .mcu_2_spdif_addr = 1134,
472};
473
474static struct sdma_driver_data sdma_imx6q = {
475 .chnenbl0 = SDMA_CHNENBL0_IMX35,
476 .num_events = 48,
477 .script_addrs = &sdma_script_imx6q,
478};
479
480static const struct platform_device_id sdma_devtypes[] = {
481 {
482 .name = "imx25-sdma",
483 .driver_data = (unsigned long)&sdma_imx25,
484 }, {
485 .name = "imx31-sdma",
486 .driver_data = (unsigned long)&sdma_imx31,
487 }, {
488 .name = "imx35-sdma",
489 .driver_data = (unsigned long)&sdma_imx35,
490 }, {
491 .name = "imx51-sdma",
492 .driver_data = (unsigned long)&sdma_imx51,
493 }, {
494 .name = "imx53-sdma",
495 .driver_data = (unsigned long)&sdma_imx53,
496 }, {
497 .name = "imx6q-sdma",
498 .driver_data = (unsigned long)&sdma_imx6q,
499 }, {
500
501 }
502};
503MODULE_DEVICE_TABLE(platform, sdma_devtypes);
504
505static const struct of_device_id sdma_dt_ids[] = {
506 { .compatible = "fsl,imx6q-sdma", .data = &sdma_imx6q, },
507 { .compatible = "fsl,imx53-sdma", .data = &sdma_imx53, },
508 { .compatible = "fsl,imx51-sdma", .data = &sdma_imx51, },
509 { .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, },
510 { .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
511 { .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, },
512 { }
513};
514MODULE_DEVICE_TABLE(of, sdma_dt_ids);
515
516#define SDMA_H_CONFIG_DSPDMA BIT(12)
517#define SDMA_H_CONFIG_RTD_PINS BIT(11)
518#define SDMA_H_CONFIG_ACR BIT(4)
519#define SDMA_H_CONFIG_CSM (3)
520
521static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
522{
523 u32 chnenbl0 = sdma->drvdata->chnenbl0;
524 return chnenbl0 + event * 4;
525}
526
527static int sdma_config_ownership(struct sdma_channel *sdmac,
528 bool event_override, bool mcu_override, bool dsp_override)
529{
530 struct sdma_engine *sdma = sdmac->sdma;
531 int channel = sdmac->channel;
532 unsigned long evt, mcu, dsp;
533
534 if (event_override && mcu_override && dsp_override)
535 return -EINVAL;
536
537 evt = readl_relaxed(sdma->regs + SDMA_H_EVTOVR);
538 mcu = readl_relaxed(sdma->regs + SDMA_H_HOSTOVR);
539 dsp = readl_relaxed(sdma->regs + SDMA_H_DSPOVR);
540
541 if (dsp_override)
542 __clear_bit(channel, &dsp);
543 else
544 __set_bit(channel, &dsp);
545
546 if (event_override)
547 __clear_bit(channel, &evt);
548 else
549 __set_bit(channel, &evt);
550
551 if (mcu_override)
552 __clear_bit(channel, &mcu);
553 else
554 __set_bit(channel, &mcu);
555
556 writel_relaxed(evt, sdma->regs + SDMA_H_EVTOVR);
557 writel_relaxed(mcu, sdma->regs + SDMA_H_HOSTOVR);
558 writel_relaxed(dsp, sdma->regs + SDMA_H_DSPOVR);
559
560 return 0;
561}
562
563static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
564{
565 writel(BIT(channel), sdma->regs + SDMA_H_START);
566}
567
568
569
570
571static int sdma_run_channel0(struct sdma_engine *sdma)
572{
573 int ret;
574 unsigned long timeout = 500;
575
576 sdma_enable_channel(sdma, 0);
577
578 while (!(ret = readl_relaxed(sdma->regs + SDMA_H_INTR) & 1)) {
579 if (timeout-- <= 0)
580 break;
581 udelay(1);
582 }
583
584 if (ret) {
585
586 writel_relaxed(ret, sdma->regs + SDMA_H_INTR);
587 } else {
588 dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
589 }
590
591
592 if (readl(sdma->regs + SDMA_H_CONFIG) == 0)
593 writel_relaxed(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG);
594
595 return ret ? 0 : -ETIMEDOUT;
596}
597
598static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
599 u32 address)
600{
601 struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
602 void *buf_virt;
603 dma_addr_t buf_phys;
604 int ret;
605 unsigned long flags;
606
607 buf_virt = dma_alloc_coherent(NULL,
608 size,
609 &buf_phys, GFP_KERNEL);
610 if (!buf_virt) {
611 return -ENOMEM;
612 }
613
614 spin_lock_irqsave(&sdma->channel_0_lock, flags);
615
616 bd0->mode.command = C0_SETPM;
617 bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
618 bd0->mode.count = size / 2;
619 bd0->buffer_addr = buf_phys;
620 bd0->ext_buffer_addr = address;
621
622 memcpy(buf_virt, buf, size);
623
624 ret = sdma_run_channel0(sdma);
625
626 spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
627
628 dma_free_coherent(NULL, size, buf_virt, buf_phys);
629
630 return ret;
631}
632
633static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event)
634{
635 struct sdma_engine *sdma = sdmac->sdma;
636 int channel = sdmac->channel;
637 unsigned long val;
638 u32 chnenbl = chnenbl_ofs(sdma, event);
639
640 val = readl_relaxed(sdma->regs + chnenbl);
641 __set_bit(channel, &val);
642 writel_relaxed(val, sdma->regs + chnenbl);
643}
644
645static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
646{
647 struct sdma_engine *sdma = sdmac->sdma;
648 int channel = sdmac->channel;
649 u32 chnenbl = chnenbl_ofs(sdma, event);
650 unsigned long val;
651
652 val = readl_relaxed(sdma->regs + chnenbl);
653 __clear_bit(channel, &val);
654 writel_relaxed(val, sdma->regs + chnenbl);
655}
656
657static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
658{
659 if (sdmac->desc.callback)
660 sdmac->desc.callback(sdmac->desc.callback_param);
661}
662
663static void sdma_update_channel_loop(struct sdma_channel *sdmac)
664{
665 struct sdma_buffer_descriptor *bd;
666
667
668
669
670
671 while (1) {
672 bd = &sdmac->bd[sdmac->buf_tail];
673
674 if (bd->mode.status & BD_DONE)
675 break;
676
677 if (bd->mode.status & BD_RROR)
678 sdmac->status = DMA_ERROR;
679
680 bd->mode.status |= BD_DONE;
681 sdmac->buf_tail++;
682 sdmac->buf_tail %= sdmac->num_bd;
683 }
684}
685
686static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
687{
688 struct sdma_buffer_descriptor *bd;
689 int i, error = 0;
690
691 sdmac->chn_real_count = 0;
692
693
694
695
696 for (i = 0; i < sdmac->num_bd; i++) {
697 bd = &sdmac->bd[i];
698
699 if (bd->mode.status & (BD_DONE | BD_RROR))
700 error = -EIO;
701 sdmac->chn_real_count += bd->mode.count;
702 }
703
704 if (error)
705 sdmac->status = DMA_ERROR;
706 else
707 sdmac->status = DMA_COMPLETE;
708
709 dma_cookie_complete(&sdmac->desc);
710 if (sdmac->desc.callback)
711 sdmac->desc.callback(sdmac->desc.callback_param);
712}
713
714static void sdma_tasklet(unsigned long data)
715{
716 struct sdma_channel *sdmac = (struct sdma_channel *) data;
717
718 if (sdmac->flags & IMX_DMA_SG_LOOP)
719 sdma_handle_channel_loop(sdmac);
720 else
721 mxc_sdma_handle_channel_normal(sdmac);
722}
723
724static irqreturn_t sdma_int_handler(int irq, void *dev_id)
725{
726 struct sdma_engine *sdma = dev_id;
727 unsigned long stat;
728
729 stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
730
731 stat &= ~1;
732 writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
733
734 while (stat) {
735 int channel = fls(stat) - 1;
736 struct sdma_channel *sdmac = &sdma->channel[channel];
737
738 if (sdmac->flags & IMX_DMA_SG_LOOP)
739 sdma_update_channel_loop(sdmac);
740
741 tasklet_schedule(&sdmac->tasklet);
742
743 __clear_bit(channel, &stat);
744 }
745
746 return IRQ_HANDLED;
747}
748
749
750
751
752static void sdma_get_pc(struct sdma_channel *sdmac,
753 enum sdma_peripheral_type peripheral_type)
754{
755 struct sdma_engine *sdma = sdmac->sdma;
756 int per_2_emi = 0, emi_2_per = 0;
757
758
759
760
761 int per_2_per = 0, emi_2_emi = 0;
762
763 sdmac->pc_from_device = 0;
764 sdmac->pc_to_device = 0;
765 sdmac->device_to_device = 0;
766
767 switch (peripheral_type) {
768 case IMX_DMATYPE_MEMORY:
769 emi_2_emi = sdma->script_addrs->ap_2_ap_addr;
770 break;
771 case IMX_DMATYPE_DSP:
772 emi_2_per = sdma->script_addrs->bp_2_ap_addr;
773 per_2_emi = sdma->script_addrs->ap_2_bp_addr;
774 break;
775 case IMX_DMATYPE_FIRI:
776 per_2_emi = sdma->script_addrs->firi_2_mcu_addr;
777 emi_2_per = sdma->script_addrs->mcu_2_firi_addr;
778 break;
779 case IMX_DMATYPE_UART:
780 per_2_emi = sdma->script_addrs->uart_2_mcu_addr;
781 emi_2_per = sdma->script_addrs->mcu_2_app_addr;
782 break;
783 case IMX_DMATYPE_UART_SP:
784 per_2_emi = sdma->script_addrs->uartsh_2_mcu_addr;
785 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
786 break;
787 case IMX_DMATYPE_ATA:
788 per_2_emi = sdma->script_addrs->ata_2_mcu_addr;
789 emi_2_per = sdma->script_addrs->mcu_2_ata_addr;
790 break;
791 case IMX_DMATYPE_CSPI:
792 case IMX_DMATYPE_EXT:
793 case IMX_DMATYPE_SSI:
794 case IMX_DMATYPE_SAI:
795 per_2_emi = sdma->script_addrs->app_2_mcu_addr;
796 emi_2_per = sdma->script_addrs->mcu_2_app_addr;
797 break;
798 case IMX_DMATYPE_SSI_DUAL:
799 per_2_emi = sdma->script_addrs->ssish_2_mcu_addr;
800 emi_2_per = sdma->script_addrs->mcu_2_ssish_addr;
801 break;
802 case IMX_DMATYPE_SSI_SP:
803 case IMX_DMATYPE_MMC:
804 case IMX_DMATYPE_SDHC:
805 case IMX_DMATYPE_CSPI_SP:
806 case IMX_DMATYPE_ESAI:
807 case IMX_DMATYPE_MSHC_SP:
808 per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
809 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
810 break;
811 case IMX_DMATYPE_ASRC:
812 per_2_emi = sdma->script_addrs->asrc_2_mcu_addr;
813 emi_2_per = sdma->script_addrs->asrc_2_mcu_addr;
814 per_2_per = sdma->script_addrs->per_2_per_addr;
815 break;
816 case IMX_DMATYPE_ASRC_SP:
817 per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
818 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
819 per_2_per = sdma->script_addrs->per_2_per_addr;
820 break;
821 case IMX_DMATYPE_MSHC:
822 per_2_emi = sdma->script_addrs->mshc_2_mcu_addr;
823 emi_2_per = sdma->script_addrs->mcu_2_mshc_addr;
824 break;
825 case IMX_DMATYPE_CCM:
826 per_2_emi = sdma->script_addrs->dptc_dvfs_addr;
827 break;
828 case IMX_DMATYPE_SPDIF:
829 per_2_emi = sdma->script_addrs->spdif_2_mcu_addr;
830 emi_2_per = sdma->script_addrs->mcu_2_spdif_addr;
831 break;
832 case IMX_DMATYPE_IPU_MEMORY:
833 emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr;
834 break;
835 default:
836 break;
837 }
838
839 sdmac->pc_from_device = per_2_emi;
840 sdmac->pc_to_device = emi_2_per;
841 sdmac->device_to_device = per_2_per;
842}
843
844static int sdma_load_context(struct sdma_channel *sdmac)
845{
846 struct sdma_engine *sdma = sdmac->sdma;
847 int channel = sdmac->channel;
848 int load_address;
849 struct sdma_context_data *context = sdma->context;
850 struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
851 int ret;
852 unsigned long flags;
853
854 if (sdmac->direction == DMA_DEV_TO_MEM)
855 load_address = sdmac->pc_from_device;
856 else if (sdmac->direction == DMA_DEV_TO_DEV)
857 load_address = sdmac->device_to_device;
858 else
859 load_address = sdmac->pc_to_device;
860
861 if (load_address < 0)
862 return load_address;
863
864 dev_dbg(sdma->dev, "load_address = %d\n", load_address);
865 dev_dbg(sdma->dev, "wml = 0x%08x\n", (u32)sdmac->watermark_level);
866 dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr);
867 dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr);
868 dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]);
869 dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]);
870
871 spin_lock_irqsave(&sdma->channel_0_lock, flags);
872
873 memset(context, 0, sizeof(*context));
874 context->channel_state.pc = load_address;
875
876
877
878
879 context->gReg[0] = sdmac->event_mask[1];
880 context->gReg[1] = sdmac->event_mask[0];
881 context->gReg[2] = sdmac->per_addr;
882 context->gReg[6] = sdmac->shp_addr;
883 context->gReg[7] = sdmac->watermark_level;
884
885 bd0->mode.command = C0_SETDM;
886 bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
887 bd0->mode.count = sizeof(*context) / 4;
888 bd0->buffer_addr = sdma->context_phys;
889 bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
890 ret = sdma_run_channel0(sdma);
891
892 spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
893
894 return ret;
895}
896
897static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
898{
899 return container_of(chan, struct sdma_channel, chan);
900}
901
902static int sdma_disable_channel(struct dma_chan *chan)
903{
904 struct sdma_channel *sdmac = to_sdma_chan(chan);
905 struct sdma_engine *sdma = sdmac->sdma;
906 int channel = sdmac->channel;
907
908 writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP);
909 sdmac->status = DMA_ERROR;
910
911 return 0;
912}
913
914static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
915{
916 struct sdma_engine *sdma = sdmac->sdma;
917
918 int lwml = sdmac->watermark_level & SDMA_WATERMARK_LEVEL_LWML;
919 int hwml = (sdmac->watermark_level & SDMA_WATERMARK_LEVEL_HWML) >> 16;
920
921 set_bit(sdmac->event_id0 % 32, &sdmac->event_mask[1]);
922 set_bit(sdmac->event_id1 % 32, &sdmac->event_mask[0]);
923
924 if (sdmac->event_id0 > 31)
925 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_LWE;
926
927 if (sdmac->event_id1 > 31)
928 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_HWE;
929
930
931
932
933
934
935 if (lwml > hwml) {
936 sdmac->watermark_level &= ~(SDMA_WATERMARK_LEVEL_LWML |
937 SDMA_WATERMARK_LEVEL_HWML);
938 sdmac->watermark_level |= hwml;
939 sdmac->watermark_level |= lwml << 16;
940 swap(sdmac->event_mask[0], sdmac->event_mask[1]);
941 }
942
943 if (sdmac->per_address2 >= sdma->spba_start_addr &&
944 sdmac->per_address2 <= sdma->spba_end_addr)
945 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SP;
946
947 if (sdmac->per_address >= sdma->spba_start_addr &&
948 sdmac->per_address <= sdma->spba_end_addr)
949 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DP;
950
951 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_CONT;
952}
953
954static int sdma_config_channel(struct dma_chan *chan)
955{
956 struct sdma_channel *sdmac = to_sdma_chan(chan);
957 int ret;
958
959 sdma_disable_channel(chan);
960
961 sdmac->event_mask[0] = 0;
962 sdmac->event_mask[1] = 0;
963 sdmac->shp_addr = 0;
964 sdmac->per_addr = 0;
965
966 if (sdmac->event_id0) {
967 if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
968 return -EINVAL;
969 sdma_event_enable(sdmac, sdmac->event_id0);
970 }
971
972 if (sdmac->event_id1) {
973 if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events)
974 return -EINVAL;
975 sdma_event_enable(sdmac, sdmac->event_id1);
976 }
977
978 switch (sdmac->peripheral_type) {
979 case IMX_DMATYPE_DSP:
980 sdma_config_ownership(sdmac, false, true, true);
981 break;
982 case IMX_DMATYPE_MEMORY:
983 sdma_config_ownership(sdmac, false, true, false);
984 break;
985 default:
986 sdma_config_ownership(sdmac, true, true, false);
987 break;
988 }
989
990 sdma_get_pc(sdmac, sdmac->peripheral_type);
991
992 if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) &&
993 (sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
994
995 if (sdmac->event_id1) {
996 if (sdmac->peripheral_type == IMX_DMATYPE_ASRC_SP ||
997 sdmac->peripheral_type == IMX_DMATYPE_ASRC)
998 sdma_set_watermarklevel_for_p2p(sdmac);
999 } else
1000 __set_bit(sdmac->event_id0, sdmac->event_mask);
1001
1002
1003 sdmac->watermark_level |= sdmac->watermark_level;
1004
1005 sdmac->shp_addr = sdmac->per_address;
1006 sdmac->per_addr = sdmac->per_address2;
1007 } else {
1008 sdmac->watermark_level = 0;
1009 }
1010
1011 ret = sdma_load_context(sdmac);
1012
1013 return ret;
1014}
1015
1016static int sdma_set_channel_priority(struct sdma_channel *sdmac,
1017 unsigned int priority)
1018{
1019 struct sdma_engine *sdma = sdmac->sdma;
1020 int channel = sdmac->channel;
1021
1022 if (priority < MXC_SDMA_MIN_PRIORITY
1023 || priority > MXC_SDMA_MAX_PRIORITY) {
1024 return -EINVAL;
1025 }
1026
1027 writel_relaxed(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel);
1028
1029 return 0;
1030}
1031
1032static int sdma_request_channel(struct sdma_channel *sdmac)
1033{
1034 struct sdma_engine *sdma = sdmac->sdma;
1035 int channel = sdmac->channel;
1036 int ret = -EBUSY;
1037
1038 sdmac->bd = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys,
1039 GFP_KERNEL);
1040 if (!sdmac->bd) {
1041 ret = -ENOMEM;
1042 goto out;
1043 }
1044
1045 sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys;
1046 sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
1047
1048 sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY);
1049 return 0;
1050out:
1051
1052 return ret;
1053}
1054
1055static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
1056{
1057 unsigned long flags;
1058 struct sdma_channel *sdmac = to_sdma_chan(tx->chan);
1059 dma_cookie_t cookie;
1060
1061 spin_lock_irqsave(&sdmac->lock, flags);
1062
1063 cookie = dma_cookie_assign(tx);
1064
1065 spin_unlock_irqrestore(&sdmac->lock, flags);
1066
1067 return cookie;
1068}
1069
1070static int sdma_alloc_chan_resources(struct dma_chan *chan)
1071{
1072 struct sdma_channel *sdmac = to_sdma_chan(chan);
1073 struct imx_dma_data *data = chan->private;
1074 int prio, ret;
1075
1076 if (!data)
1077 return -EINVAL;
1078
1079 switch (data->priority) {
1080 case DMA_PRIO_HIGH:
1081 prio = 3;
1082 break;
1083 case DMA_PRIO_MEDIUM:
1084 prio = 2;
1085 break;
1086 case DMA_PRIO_LOW:
1087 default:
1088 prio = 1;
1089 break;
1090 }
1091
1092 sdmac->peripheral_type = data->peripheral_type;
1093 sdmac->event_id0 = data->dma_request;
1094 sdmac->event_id1 = data->dma_request2;
1095
1096 ret = clk_enable(sdmac->sdma->clk_ipg);
1097 if (ret)
1098 return ret;
1099 ret = clk_enable(sdmac->sdma->clk_ahb);
1100 if (ret)
1101 goto disable_clk_ipg;
1102
1103 ret = sdma_request_channel(sdmac);
1104 if (ret)
1105 goto disable_clk_ahb;
1106
1107 ret = sdma_set_channel_priority(sdmac, prio);
1108 if (ret)
1109 goto disable_clk_ahb;
1110
1111 dma_async_tx_descriptor_init(&sdmac->desc, chan);
1112 sdmac->desc.tx_submit = sdma_tx_submit;
1113
1114 sdmac->desc.flags = DMA_CTRL_ACK;
1115
1116 return 0;
1117
1118disable_clk_ahb:
1119 clk_disable(sdmac->sdma->clk_ahb);
1120disable_clk_ipg:
1121 clk_disable(sdmac->sdma->clk_ipg);
1122 return ret;
1123}
1124
1125static void sdma_free_chan_resources(struct dma_chan *chan)
1126{
1127 struct sdma_channel *sdmac = to_sdma_chan(chan);
1128 struct sdma_engine *sdma = sdmac->sdma;
1129
1130 sdma_disable_channel(chan);
1131
1132 if (sdmac->event_id0)
1133 sdma_event_disable(sdmac, sdmac->event_id0);
1134 if (sdmac->event_id1)
1135 sdma_event_disable(sdmac, sdmac->event_id1);
1136
1137 sdmac->event_id0 = 0;
1138 sdmac->event_id1 = 0;
1139
1140 sdma_set_channel_priority(sdmac, 0);
1141
1142 dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys);
1143
1144 clk_disable(sdma->clk_ipg);
1145 clk_disable(sdma->clk_ahb);
1146}
1147
1148static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
1149 struct dma_chan *chan, struct scatterlist *sgl,
1150 unsigned int sg_len, enum dma_transfer_direction direction,
1151 unsigned long flags, void *context)
1152{
1153 struct sdma_channel *sdmac = to_sdma_chan(chan);
1154 struct sdma_engine *sdma = sdmac->sdma;
1155 int ret, i, count;
1156 int channel = sdmac->channel;
1157 struct scatterlist *sg;
1158
1159 if (sdmac->status == DMA_IN_PROGRESS)
1160 return NULL;
1161 sdmac->status = DMA_IN_PROGRESS;
1162
1163 sdmac->flags = 0;
1164
1165 sdmac->buf_tail = 0;
1166
1167 dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
1168 sg_len, channel);
1169
1170 sdmac->direction = direction;
1171 ret = sdma_load_context(sdmac);
1172 if (ret)
1173 goto err_out;
1174
1175 if (sg_len > NUM_BD) {
1176 dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
1177 channel, sg_len, NUM_BD);
1178 ret = -EINVAL;
1179 goto err_out;
1180 }
1181
1182 sdmac->chn_count = 0;
1183 for_each_sg(sgl, sg, sg_len, i) {
1184 struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
1185 int param;
1186
1187 bd->buffer_addr = sg->dma_address;
1188
1189 count = sg_dma_len(sg);
1190
1191 if (count > 0xffff) {
1192 dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
1193 channel, count, 0xffff);
1194 ret = -EINVAL;
1195 goto err_out;
1196 }
1197
1198 bd->mode.count = count;
1199 sdmac->chn_count += count;
1200
1201 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
1202 ret = -EINVAL;
1203 goto err_out;
1204 }
1205
1206 switch (sdmac->word_size) {
1207 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1208 bd->mode.command = 0;
1209 if (count & 3 || sg->dma_address & 3)
1210 return NULL;
1211 break;
1212 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1213 bd->mode.command = 2;
1214 if (count & 1 || sg->dma_address & 1)
1215 return NULL;
1216 break;
1217 case DMA_SLAVE_BUSWIDTH_1_BYTE:
1218 bd->mode.command = 1;
1219 break;
1220 default:
1221 return NULL;
1222 }
1223
1224 param = BD_DONE | BD_EXTD | BD_CONT;
1225
1226 if (i + 1 == sg_len) {
1227 param |= BD_INTR;
1228 param |= BD_LAST;
1229 param &= ~BD_CONT;
1230 }
1231
1232 dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n",
1233 i, count, (u64)sg->dma_address,
1234 param & BD_WRAP ? "wrap" : "",
1235 param & BD_INTR ? " intr" : "");
1236
1237 bd->mode.status = param;
1238 }
1239
1240 sdmac->num_bd = sg_len;
1241 sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
1242
1243 return &sdmac->desc;
1244err_out:
1245 sdmac->status = DMA_ERROR;
1246 return NULL;
1247}
1248
1249static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
1250 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
1251 size_t period_len, enum dma_transfer_direction direction,
1252 unsigned long flags)
1253{
1254 struct sdma_channel *sdmac = to_sdma_chan(chan);
1255 struct sdma_engine *sdma = sdmac->sdma;
1256 int num_periods = buf_len / period_len;
1257 int channel = sdmac->channel;
1258 int ret, i = 0, buf = 0;
1259
1260 dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
1261
1262 if (sdmac->status == DMA_IN_PROGRESS)
1263 return NULL;
1264
1265 sdmac->status = DMA_IN_PROGRESS;
1266
1267 sdmac->buf_tail = 0;
1268 sdmac->period_len = period_len;
1269
1270 sdmac->flags |= IMX_DMA_SG_LOOP;
1271 sdmac->direction = direction;
1272 ret = sdma_load_context(sdmac);
1273 if (ret)
1274 goto err_out;
1275
1276 if (num_periods > NUM_BD) {
1277 dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n",
1278 channel, num_periods, NUM_BD);
1279 goto err_out;
1280 }
1281
1282 if (period_len > 0xffff) {
1283 dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %d > %d\n",
1284 channel, period_len, 0xffff);
1285 goto err_out;
1286 }
1287
1288 while (buf < buf_len) {
1289 struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
1290 int param;
1291
1292 bd->buffer_addr = dma_addr;
1293
1294 bd->mode.count = period_len;
1295
1296 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
1297 goto err_out;
1298 if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
1299 bd->mode.command = 0;
1300 else
1301 bd->mode.command = sdmac->word_size;
1302
1303 param = BD_DONE | BD_EXTD | BD_CONT | BD_INTR;
1304 if (i + 1 == num_periods)
1305 param |= BD_WRAP;
1306
1307 dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n",
1308 i, period_len, (u64)dma_addr,
1309 param & BD_WRAP ? "wrap" : "",
1310 param & BD_INTR ? " intr" : "");
1311
1312 bd->mode.status = param;
1313
1314 dma_addr += period_len;
1315 buf += period_len;
1316
1317 i++;
1318 }
1319
1320 sdmac->num_bd = num_periods;
1321 sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys;
1322
1323 return &sdmac->desc;
1324err_out:
1325 sdmac->status = DMA_ERROR;
1326 return NULL;
1327}
1328
1329static int sdma_config(struct dma_chan *chan,
1330 struct dma_slave_config *dmaengine_cfg)
1331{
1332 struct sdma_channel *sdmac = to_sdma_chan(chan);
1333
1334 if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
1335 sdmac->per_address = dmaengine_cfg->src_addr;
1336 sdmac->watermark_level = dmaengine_cfg->src_maxburst *
1337 dmaengine_cfg->src_addr_width;
1338 sdmac->word_size = dmaengine_cfg->src_addr_width;
1339 } else if (dmaengine_cfg->direction == DMA_DEV_TO_DEV) {
1340 sdmac->per_address2 = dmaengine_cfg->src_addr;
1341 sdmac->per_address = dmaengine_cfg->dst_addr;
1342 sdmac->watermark_level = dmaengine_cfg->src_maxburst &
1343 SDMA_WATERMARK_LEVEL_LWML;
1344 sdmac->watermark_level |= (dmaengine_cfg->dst_maxburst << 16) &
1345 SDMA_WATERMARK_LEVEL_HWML;
1346 sdmac->word_size = dmaengine_cfg->dst_addr_width;
1347 } else {
1348 sdmac->per_address = dmaengine_cfg->dst_addr;
1349 sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
1350 dmaengine_cfg->dst_addr_width;
1351 sdmac->word_size = dmaengine_cfg->dst_addr_width;
1352 }
1353 sdmac->direction = dmaengine_cfg->direction;
1354 return sdma_config_channel(chan);
1355}
1356
1357static enum dma_status sdma_tx_status(struct dma_chan *chan,
1358 dma_cookie_t cookie,
1359 struct dma_tx_state *txstate)
1360{
1361 struct sdma_channel *sdmac = to_sdma_chan(chan);
1362 u32 residue;
1363
1364 if (sdmac->flags & IMX_DMA_SG_LOOP)
1365 residue = (sdmac->num_bd - sdmac->buf_tail) * sdmac->period_len;
1366 else
1367 residue = sdmac->chn_count - sdmac->chn_real_count;
1368
1369 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
1370 residue);
1371
1372 return sdmac->status;
1373}
1374
1375static void sdma_issue_pending(struct dma_chan *chan)
1376{
1377 struct sdma_channel *sdmac = to_sdma_chan(chan);
1378 struct sdma_engine *sdma = sdmac->sdma;
1379
1380 if (sdmac->status == DMA_IN_PROGRESS)
1381 sdma_enable_channel(sdma, sdmac->channel);
1382}
1383
1384#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34
1385#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38
1386#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3 41
1387
1388static void sdma_add_scripts(struct sdma_engine *sdma,
1389 const struct sdma_script_start_addrs *addr)
1390{
1391 s32 *addr_arr = (u32 *)addr;
1392 s32 *saddr_arr = (u32 *)sdma->script_addrs;
1393 int i;
1394
1395
1396 if (!sdma->script_number)
1397 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
1398
1399 for (i = 0; i < sdma->script_number; i++)
1400 if (addr_arr[i] > 0)
1401 saddr_arr[i] = addr_arr[i];
1402}
1403
1404static void sdma_load_firmware(const struct firmware *fw, void *context)
1405{
1406 struct sdma_engine *sdma = context;
1407 const struct sdma_firmware_header *header;
1408 const struct sdma_script_start_addrs *addr;
1409 unsigned short *ram_code;
1410
1411 if (!fw) {
1412 dev_info(sdma->dev, "external firmware not found, using ROM firmware\n");
1413
1414 return;
1415 }
1416
1417 if (fw->size < sizeof(*header))
1418 goto err_firmware;
1419
1420 header = (struct sdma_firmware_header *)fw->data;
1421
1422 if (header->magic != SDMA_FIRMWARE_MAGIC)
1423 goto err_firmware;
1424 if (header->ram_code_start + header->ram_code_size > fw->size)
1425 goto err_firmware;
1426 switch (header->version_major) {
1427 case 1:
1428 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
1429 break;
1430 case 2:
1431 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2;
1432 break;
1433 case 3:
1434 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3;
1435 break;
1436 default:
1437 dev_err(sdma->dev, "unknown firmware version\n");
1438 goto err_firmware;
1439 }
1440
1441 addr = (void *)header + header->script_addrs_start;
1442 ram_code = (void *)header + header->ram_code_start;
1443
1444 clk_enable(sdma->clk_ipg);
1445 clk_enable(sdma->clk_ahb);
1446
1447 sdma_load_script(sdma, ram_code,
1448 header->ram_code_size,
1449 addr->ram_code_start_addr);
1450 clk_disable(sdma->clk_ipg);
1451 clk_disable(sdma->clk_ahb);
1452
1453 sdma_add_scripts(sdma, addr);
1454
1455 dev_info(sdma->dev, "loaded firmware %d.%d\n",
1456 header->version_major,
1457 header->version_minor);
1458
1459err_firmware:
1460 release_firmware(fw);
1461}
1462
1463#define EVENT_REMAP_CELLS 3
1464
1465static int __init sdma_event_remap(struct sdma_engine *sdma)
1466{
1467 struct device_node *np = sdma->dev->of_node;
1468 struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0);
1469 struct property *event_remap;
1470 struct regmap *gpr;
1471 char propname[] = "fsl,sdma-event-remap";
1472 u32 reg, val, shift, num_map, i;
1473 int ret = 0;
1474
1475 if (IS_ERR(np) || IS_ERR(gpr_np))
1476 goto out;
1477
1478 event_remap = of_find_property(np, propname, NULL);
1479 num_map = event_remap ? (event_remap->length / sizeof(u32)) : 0;
1480 if (!num_map) {
1481 dev_warn(sdma->dev, "no event needs to be remapped\n");
1482 goto out;
1483 } else if (num_map % EVENT_REMAP_CELLS) {
1484 dev_err(sdma->dev, "the property %s must modulo %d\n",
1485 propname, EVENT_REMAP_CELLS);
1486 ret = -EINVAL;
1487 goto out;
1488 }
1489
1490 gpr = syscon_node_to_regmap(gpr_np);
1491 if (IS_ERR(gpr)) {
1492 dev_err(sdma->dev, "failed to get gpr regmap\n");
1493 ret = PTR_ERR(gpr);
1494 goto out;
1495 }
1496
1497 for (i = 0; i < num_map; i += EVENT_REMAP_CELLS) {
1498 ret = of_property_read_u32_index(np, propname, i, ®);
1499 if (ret) {
1500 dev_err(sdma->dev, "failed to read property %s index %d\n",
1501 propname, i);
1502 goto out;
1503 }
1504
1505 ret = of_property_read_u32_index(np, propname, i + 1, &shift);
1506 if (ret) {
1507 dev_err(sdma->dev, "failed to read property %s index %d\n",
1508 propname, i + 1);
1509 goto out;
1510 }
1511
1512 ret = of_property_read_u32_index(np, propname, i + 2, &val);
1513 if (ret) {
1514 dev_err(sdma->dev, "failed to read property %s index %d\n",
1515 propname, i + 2);
1516 goto out;
1517 }
1518
1519 regmap_update_bits(gpr, reg, BIT(shift), val << shift);
1520 }
1521
1522out:
1523 if (!IS_ERR(gpr_np))
1524 of_node_put(gpr_np);
1525
1526 return ret;
1527}
1528
1529static int sdma_get_firmware(struct sdma_engine *sdma,
1530 const char *fw_name)
1531{
1532 int ret;
1533
1534 ret = request_firmware_nowait(THIS_MODULE,
1535 FW_ACTION_HOTPLUG, fw_name, sdma->dev,
1536 GFP_KERNEL, sdma, sdma_load_firmware);
1537
1538 return ret;
1539}
1540
1541static int sdma_init(struct sdma_engine *sdma)
1542{
1543 int i, ret;
1544 dma_addr_t ccb_phys;
1545
1546 ret = clk_enable(sdma->clk_ipg);
1547 if (ret)
1548 return ret;
1549 ret = clk_enable(sdma->clk_ahb);
1550 if (ret)
1551 goto disable_clk_ipg;
1552
1553
1554 writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
1555
1556 sdma->channel_control = dma_alloc_coherent(NULL,
1557 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) +
1558 sizeof(struct sdma_context_data),
1559 &ccb_phys, GFP_KERNEL);
1560
1561 if (!sdma->channel_control) {
1562 ret = -ENOMEM;
1563 goto err_dma_alloc;
1564 }
1565
1566 sdma->context = (void *)sdma->channel_control +
1567 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1568 sdma->context_phys = ccb_phys +
1569 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1570
1571
1572 memset(sdma->channel_control, 0,
1573 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control));
1574
1575
1576 for (i = 0; i < sdma->drvdata->num_events; i++)
1577 writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i));
1578
1579
1580 for (i = 0; i < MAX_DMA_CHANNELS; i++)
1581 writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
1582
1583 ret = sdma_request_channel(&sdma->channel[0]);
1584 if (ret)
1585 goto err_dma_alloc;
1586
1587 sdma_config_ownership(&sdma->channel[0], false, true, false);
1588
1589
1590 writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR);
1591
1592
1593
1594 writel_relaxed(0, sdma->regs + SDMA_H_CONFIG);
1595
1596 writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR);
1597
1598
1599 sdma_set_channel_priority(&sdma->channel[0], 7);
1600
1601 clk_disable(sdma->clk_ipg);
1602 clk_disable(sdma->clk_ahb);
1603
1604 return 0;
1605
1606err_dma_alloc:
1607 clk_disable(sdma->clk_ahb);
1608disable_clk_ipg:
1609 clk_disable(sdma->clk_ipg);
1610 dev_err(sdma->dev, "initialisation failed with %d\n", ret);
1611 return ret;
1612}
1613
1614static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param)
1615{
1616 struct sdma_channel *sdmac = to_sdma_chan(chan);
1617 struct imx_dma_data *data = fn_param;
1618
1619 if (!imx_dma_is_general_purpose(chan))
1620 return false;
1621
1622 sdmac->data = *data;
1623 chan->private = &sdmac->data;
1624
1625 return true;
1626}
1627
1628static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec,
1629 struct of_dma *ofdma)
1630{
1631 struct sdma_engine *sdma = ofdma->of_dma_data;
1632 dma_cap_mask_t mask = sdma->dma_device.cap_mask;
1633 struct imx_dma_data data;
1634
1635 if (dma_spec->args_count != 3)
1636 return NULL;
1637
1638 data.dma_request = dma_spec->args[0];
1639 data.peripheral_type = dma_spec->args[1];
1640 data.priority = dma_spec->args[2];
1641
1642
1643
1644
1645
1646
1647
1648 data.dma_request2 = 0;
1649
1650 return dma_request_channel(mask, sdma_filter_fn, &data);
1651}
1652
1653static int sdma_probe(struct platform_device *pdev)
1654{
1655 const struct of_device_id *of_id =
1656 of_match_device(sdma_dt_ids, &pdev->dev);
1657 struct device_node *np = pdev->dev.of_node;
1658 struct device_node *spba_bus;
1659 const char *fw_name;
1660 int ret;
1661 int irq;
1662 struct resource *iores;
1663 struct resource spba_res;
1664 struct sdma_platform_data *pdata = dev_get_platdata(&pdev->dev);
1665 int i;
1666 struct sdma_engine *sdma;
1667 s32 *saddr_arr;
1668 const struct sdma_driver_data *drvdata = NULL;
1669
1670 if (of_id)
1671 drvdata = of_id->data;
1672 else if (pdev->id_entry)
1673 drvdata = (void *)pdev->id_entry->driver_data;
1674
1675 if (!drvdata) {
1676 dev_err(&pdev->dev, "unable to find driver data\n");
1677 return -EINVAL;
1678 }
1679
1680 ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1681 if (ret)
1682 return ret;
1683
1684 sdma = devm_kzalloc(&pdev->dev, sizeof(*sdma), GFP_KERNEL);
1685 if (!sdma)
1686 return -ENOMEM;
1687
1688 spin_lock_init(&sdma->channel_0_lock);
1689
1690 sdma->dev = &pdev->dev;
1691 sdma->drvdata = drvdata;
1692
1693 irq = platform_get_irq(pdev, 0);
1694 if (irq < 0)
1695 return irq;
1696
1697 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1698 sdma->regs = devm_ioremap_resource(&pdev->dev, iores);
1699 if (IS_ERR(sdma->regs))
1700 return PTR_ERR(sdma->regs);
1701
1702 sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1703 if (IS_ERR(sdma->clk_ipg))
1704 return PTR_ERR(sdma->clk_ipg);
1705
1706 sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1707 if (IS_ERR(sdma->clk_ahb))
1708 return PTR_ERR(sdma->clk_ahb);
1709
1710 clk_prepare(sdma->clk_ipg);
1711 clk_prepare(sdma->clk_ahb);
1712
1713 ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma",
1714 sdma);
1715 if (ret)
1716 return ret;
1717
1718 sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
1719 if (!sdma->script_addrs)
1720 return -ENOMEM;
1721
1722
1723 saddr_arr = (s32 *)sdma->script_addrs;
1724 for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++)
1725 saddr_arr[i] = -EINVAL;
1726
1727 dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
1728 dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
1729
1730 INIT_LIST_HEAD(&sdma->dma_device.channels);
1731
1732 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
1733 struct sdma_channel *sdmac = &sdma->channel[i];
1734
1735 sdmac->sdma = sdma;
1736 spin_lock_init(&sdmac->lock);
1737
1738 sdmac->chan.device = &sdma->dma_device;
1739 dma_cookie_init(&sdmac->chan);
1740 sdmac->channel = i;
1741
1742 tasklet_init(&sdmac->tasklet, sdma_tasklet,
1743 (unsigned long) sdmac);
1744
1745
1746
1747
1748
1749 if (i)
1750 list_add_tail(&sdmac->chan.device_node,
1751 &sdma->dma_device.channels);
1752 }
1753
1754 ret = sdma_init(sdma);
1755 if (ret)
1756 goto err_init;
1757
1758 ret = sdma_event_remap(sdma);
1759 if (ret)
1760 goto err_init;
1761
1762 if (sdma->drvdata->script_addrs)
1763 sdma_add_scripts(sdma, sdma->drvdata->script_addrs);
1764 if (pdata && pdata->script_addrs)
1765 sdma_add_scripts(sdma, pdata->script_addrs);
1766
1767 if (pdata) {
1768 ret = sdma_get_firmware(sdma, pdata->fw_name);
1769 if (ret)
1770 dev_warn(&pdev->dev, "failed to get firmware from platform data\n");
1771 } else {
1772
1773
1774
1775
1776
1777 ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
1778 &fw_name);
1779 if (ret)
1780 dev_warn(&pdev->dev, "failed to get firmware name\n");
1781 else {
1782 ret = sdma_get_firmware(sdma, fw_name);
1783 if (ret)
1784 dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
1785 }
1786 }
1787
1788 sdma->dma_device.dev = &pdev->dev;
1789
1790 sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
1791 sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources;
1792 sdma->dma_device.device_tx_status = sdma_tx_status;
1793 sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
1794 sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
1795 sdma->dma_device.device_config = sdma_config;
1796 sdma->dma_device.device_terminate_all = sdma_disable_channel;
1797 sdma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1798 sdma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1799 sdma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1800 sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1801 sdma->dma_device.device_issue_pending = sdma_issue_pending;
1802 sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
1803 dma_set_max_seg_size(sdma->dma_device.dev, 65535);
1804
1805 platform_set_drvdata(pdev, sdma);
1806
1807 ret = dma_async_device_register(&sdma->dma_device);
1808 if (ret) {
1809 dev_err(&pdev->dev, "unable to register\n");
1810 goto err_init;
1811 }
1812
1813 if (np) {
1814 ret = of_dma_controller_register(np, sdma_xlate, sdma);
1815 if (ret) {
1816 dev_err(&pdev->dev, "failed to register controller\n");
1817 goto err_register;
1818 }
1819
1820 spba_bus = of_find_compatible_node(NULL, NULL, "fsl,spba-bus");
1821 ret = of_address_to_resource(spba_bus, 0, &spba_res);
1822 if (!ret) {
1823 sdma->spba_start_addr = spba_res.start;
1824 sdma->spba_end_addr = spba_res.end;
1825 }
1826 of_node_put(spba_bus);
1827 }
1828
1829 dev_info(sdma->dev, "initialized\n");
1830
1831 return 0;
1832
1833err_register:
1834 dma_async_device_unregister(&sdma->dma_device);
1835err_init:
1836 kfree(sdma->script_addrs);
1837 return ret;
1838}
1839
1840static int sdma_remove(struct platform_device *pdev)
1841{
1842 struct sdma_engine *sdma = platform_get_drvdata(pdev);
1843 int i;
1844
1845 dma_async_device_unregister(&sdma->dma_device);
1846 kfree(sdma->script_addrs);
1847
1848 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
1849 struct sdma_channel *sdmac = &sdma->channel[i];
1850
1851 tasklet_kill(&sdmac->tasklet);
1852 }
1853
1854 platform_set_drvdata(pdev, NULL);
1855 dev_info(&pdev->dev, "Removed...\n");
1856 return 0;
1857}
1858
1859static struct platform_driver sdma_driver = {
1860 .driver = {
1861 .name = "imx-sdma",
1862 .of_match_table = sdma_dt_ids,
1863 },
1864 .id_table = sdma_devtypes,
1865 .remove = sdma_remove,
1866 .probe = sdma_probe,
1867};
1868
1869module_platform_driver(sdma_driver);
1870
1871MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
1872MODULE_DESCRIPTION("i.MX SDMA driver");
1873MODULE_LICENSE("GPL");
1874