1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <cpu_func.h>
15#include <asm/cache.h>
16#include <linux/list.h>
17
18#include <common.h>
19#include <malloc.h>
20#include <linux/errno.h>
21#include <asm/io.h>
22#include <asm/arch/clock.h>
23#include <asm/arch/imx-regs.h>
24#include <asm/arch/sys_proto.h>
25#include <asm/mach-imx/dma.h>
26#include <asm/mach-imx/regs-apbh.h>
27
28static struct mxs_dma_chan mxs_dma_channels[MXS_MAX_DMA_CHANNELS];
29
30
31
32
33int mxs_dma_validate_chan(int channel)
34{
35 struct mxs_dma_chan *pchan;
36
37 if ((channel < 0) || (channel >= MXS_MAX_DMA_CHANNELS))
38 return -EINVAL;
39
40 pchan = mxs_dma_channels + channel;
41 if (!(pchan->flags & MXS_DMA_FLAGS_ALLOCATED))
42 return -EINVAL;
43
44 return 0;
45}
46
47
48
49
50static unsigned int mxs_dma_cmd_address(struct mxs_dma_desc *desc)
51{
52 return desc->address + offsetof(struct mxs_dma_desc, cmd);
53}
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69static int mxs_dma_read_semaphore(int channel)
70{
71 struct mxs_apbh_regs *apbh_regs =
72 (struct mxs_apbh_regs *)MXS_APBH_BASE;
73 uint32_t tmp;
74 int ret;
75
76 ret = mxs_dma_validate_chan(channel);
77 if (ret)
78 return ret;
79
80 tmp = readl(&apbh_regs->ch[channel].hw_apbh_ch_sema);
81
82 tmp &= APBH_CHn_SEMA_PHORE_MASK;
83 tmp >>= APBH_CHn_SEMA_PHORE_OFFSET;
84
85 return tmp;
86}
87
88#if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
89void mxs_dma_flush_desc(struct mxs_dma_desc *desc)
90{
91 uint32_t addr;
92 uint32_t size;
93
94 addr = (uintptr_t)desc;
95 size = roundup(sizeof(struct mxs_dma_desc), MXS_DMA_ALIGNMENT);
96
97 flush_dcache_range(addr, addr + size);
98}
99#else
100inline void mxs_dma_flush_desc(struct mxs_dma_desc *desc) {}
101#endif
102
103
104
105
106
107
108
109
110
111
112static int mxs_dma_enable(int channel)
113{
114 struct mxs_apbh_regs *apbh_regs =
115 (struct mxs_apbh_regs *)MXS_APBH_BASE;
116 unsigned int sem;
117 struct mxs_dma_chan *pchan;
118 struct mxs_dma_desc *pdesc;
119 int ret;
120
121 ret = mxs_dma_validate_chan(channel);
122 if (ret)
123 return ret;
124
125 pchan = mxs_dma_channels + channel;
126
127 if (pchan->pending_num == 0) {
128 pchan->flags |= MXS_DMA_FLAGS_BUSY;
129 return 0;
130 }
131
132 pdesc = list_first_entry(&pchan->active, struct mxs_dma_desc, node);
133 if (pdesc == NULL)
134 return -EFAULT;
135
136 if (pchan->flags & MXS_DMA_FLAGS_BUSY) {
137 if (!(pdesc->cmd.data & MXS_DMA_DESC_CHAIN))
138 return 0;
139
140 sem = mxs_dma_read_semaphore(channel);
141 if (sem == 0)
142 return 0;
143
144 if (sem == 1) {
145 pdesc = list_entry(pdesc->node.next,
146 struct mxs_dma_desc, node);
147 writel(mxs_dma_cmd_address(pdesc),
148 &apbh_regs->ch[channel].hw_apbh_ch_nxtcmdar);
149 }
150 writel(pchan->pending_num,
151 &apbh_regs->ch[channel].hw_apbh_ch_sema);
152 pchan->active_num += pchan->pending_num;
153 pchan->pending_num = 0;
154 } else {
155 pchan->active_num += pchan->pending_num;
156 pchan->pending_num = 0;
157 writel(mxs_dma_cmd_address(pdesc),
158 &apbh_regs->ch[channel].hw_apbh_ch_nxtcmdar);
159 writel(pchan->active_num,
160 &apbh_regs->ch[channel].hw_apbh_ch_sema);
161 writel(1 << (channel + APBH_CTRL0_CLKGATE_CHANNEL_OFFSET),
162 &apbh_regs->hw_apbh_ctrl0_clr);
163 }
164
165 pchan->flags |= MXS_DMA_FLAGS_BUSY;
166 return 0;
167}
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183static int mxs_dma_disable(int channel)
184{
185 struct mxs_dma_chan *pchan;
186 struct mxs_apbh_regs *apbh_regs =
187 (struct mxs_apbh_regs *)MXS_APBH_BASE;
188 int ret;
189
190 ret = mxs_dma_validate_chan(channel);
191 if (ret)
192 return ret;
193
194 pchan = mxs_dma_channels + channel;
195
196 if (!(pchan->flags & MXS_DMA_FLAGS_BUSY))
197 return -EINVAL;
198
199 writel(1 << (channel + APBH_CTRL0_CLKGATE_CHANNEL_OFFSET),
200 &apbh_regs->hw_apbh_ctrl0_set);
201
202 pchan->flags &= ~MXS_DMA_FLAGS_BUSY;
203 pchan->active_num = 0;
204 pchan->pending_num = 0;
205 list_splice_init(&pchan->active, &pchan->done);
206
207 return 0;
208}
209
210
211
212
213static int mxs_dma_reset(int channel)
214{
215 struct mxs_apbh_regs *apbh_regs =
216 (struct mxs_apbh_regs *)MXS_APBH_BASE;
217 int ret;
218#if defined(CONFIG_MX23)
219 uint32_t setreg = (uint32_t)(&apbh_regs->hw_apbh_ctrl0_set);
220 uint32_t offset = APBH_CTRL0_RESET_CHANNEL_OFFSET;
221#elif defined(CONFIG_MX28) || defined(CONFIG_MX6) || defined(CONFIG_MX7) || \
222 defined(CONFIG_IMX8) || defined(CONFIG_IMX8M)
223 u32 setreg = (uintptr_t)(&apbh_regs->hw_apbh_channel_ctrl_set);
224 u32 offset = APBH_CHANNEL_CTRL_RESET_CHANNEL_OFFSET;
225#endif
226
227 ret = mxs_dma_validate_chan(channel);
228 if (ret)
229 return ret;
230
231 writel(1 << (channel + offset), (uintptr_t)setreg);
232
233 return 0;
234}
235
236
237
238
239
240
241static int mxs_dma_enable_irq(int channel, int enable)
242{
243 struct mxs_apbh_regs *apbh_regs =
244 (struct mxs_apbh_regs *)MXS_APBH_BASE;
245 int ret;
246
247 ret = mxs_dma_validate_chan(channel);
248 if (ret)
249 return ret;
250
251 if (enable)
252 writel(1 << (channel + APBH_CTRL1_CH_CMDCMPLT_IRQ_EN_OFFSET),
253 &apbh_regs->hw_apbh_ctrl1_set);
254 else
255 writel(1 << (channel + APBH_CTRL1_CH_CMDCMPLT_IRQ_EN_OFFSET),
256 &apbh_regs->hw_apbh_ctrl1_clr);
257
258 return 0;
259}
260
261
262
263
264
265
266
267static int mxs_dma_ack_irq(int channel)
268{
269 struct mxs_apbh_regs *apbh_regs =
270 (struct mxs_apbh_regs *)MXS_APBH_BASE;
271 int ret;
272
273 ret = mxs_dma_validate_chan(channel);
274 if (ret)
275 return ret;
276
277 writel(1 << channel, &apbh_regs->hw_apbh_ctrl1_clr);
278 writel(1 << channel, &apbh_regs->hw_apbh_ctrl2_clr);
279
280 return 0;
281}
282
283
284
285
286static int mxs_dma_request(int channel)
287{
288 struct mxs_dma_chan *pchan;
289
290 if ((channel < 0) || (channel >= MXS_MAX_DMA_CHANNELS))
291 return -EINVAL;
292
293 pchan = mxs_dma_channels + channel;
294 if ((pchan->flags & MXS_DMA_FLAGS_VALID) != MXS_DMA_FLAGS_VALID)
295 return -ENODEV;
296
297 if (pchan->flags & MXS_DMA_FLAGS_ALLOCATED)
298 return -EBUSY;
299
300 pchan->flags |= MXS_DMA_FLAGS_ALLOCATED;
301 pchan->active_num = 0;
302 pchan->pending_num = 0;
303
304 INIT_LIST_HEAD(&pchan->active);
305 INIT_LIST_HEAD(&pchan->done);
306
307 return 0;
308}
309
310
311
312
313
314
315
316
317
318int mxs_dma_release(int channel)
319{
320 struct mxs_dma_chan *pchan;
321 int ret;
322
323 ret = mxs_dma_validate_chan(channel);
324 if (ret)
325 return ret;
326
327 pchan = mxs_dma_channels + channel;
328
329 if (pchan->flags & MXS_DMA_FLAGS_BUSY)
330 return -EBUSY;
331
332 pchan->dev = 0;
333 pchan->active_num = 0;
334 pchan->pending_num = 0;
335 pchan->flags &= ~MXS_DMA_FLAGS_ALLOCATED;
336
337 return 0;
338}
339
340
341
342
343struct mxs_dma_desc *mxs_dma_desc_alloc(void)
344{
345 struct mxs_dma_desc *pdesc;
346 uint32_t size;
347
348 size = roundup(sizeof(struct mxs_dma_desc), MXS_DMA_ALIGNMENT);
349 pdesc = memalign(MXS_DMA_ALIGNMENT, size);
350
351 if (pdesc == NULL)
352 return NULL;
353
354 memset(pdesc, 0, sizeof(*pdesc));
355 pdesc->address = (dma_addr_t)pdesc;
356
357 return pdesc;
358};
359
360
361
362
363void mxs_dma_desc_free(struct mxs_dma_desc *pdesc)
364{
365 if (pdesc == NULL)
366 return;
367
368 free(pdesc);
369}
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410int mxs_dma_desc_append(int channel, struct mxs_dma_desc *pdesc)
411{
412 struct mxs_dma_chan *pchan;
413 struct mxs_dma_desc *last;
414 int ret;
415
416 ret = mxs_dma_validate_chan(channel);
417 if (ret)
418 return ret;
419
420 pchan = mxs_dma_channels + channel;
421
422 pdesc->cmd.next = mxs_dma_cmd_address(pdesc);
423 pdesc->flags |= MXS_DMA_DESC_FIRST | MXS_DMA_DESC_LAST;
424
425 if (!list_empty(&pchan->active)) {
426 last = list_entry(pchan->active.prev, struct mxs_dma_desc,
427 node);
428
429 pdesc->flags &= ~MXS_DMA_DESC_FIRST;
430 last->flags &= ~MXS_DMA_DESC_LAST;
431
432 last->cmd.next = mxs_dma_cmd_address(pdesc);
433 last->cmd.data |= MXS_DMA_DESC_CHAIN;
434
435 mxs_dma_flush_desc(last);
436 }
437 pdesc->flags |= MXS_DMA_DESC_READY;
438 if (pdesc->flags & MXS_DMA_DESC_FIRST)
439 pchan->pending_num++;
440 list_add_tail(&pdesc->node, &pchan->active);
441
442 mxs_dma_flush_desc(pdesc);
443
444 return ret;
445}
446
447
448
449
450
451
452
453
454
455
456
457
458static int mxs_dma_finish(int channel, struct list_head *head)
459{
460 int sem;
461 struct mxs_dma_chan *pchan;
462 struct list_head *p, *q;
463 struct mxs_dma_desc *pdesc;
464 int ret;
465
466 ret = mxs_dma_validate_chan(channel);
467 if (ret)
468 return ret;
469
470 pchan = mxs_dma_channels + channel;
471
472 sem = mxs_dma_read_semaphore(channel);
473 if (sem < 0)
474 return sem;
475
476 if (sem == pchan->active_num)
477 return 0;
478
479 list_for_each_safe(p, q, &pchan->active) {
480 if ((pchan->active_num) <= sem)
481 break;
482
483 pdesc = list_entry(p, struct mxs_dma_desc, node);
484 pdesc->flags &= ~MXS_DMA_DESC_READY;
485
486 if (head)
487 list_move_tail(p, head);
488 else
489 list_move_tail(p, &pchan->done);
490
491 if (pdesc->flags & MXS_DMA_DESC_LAST)
492 pchan->active_num--;
493 }
494
495 if (sem == 0)
496 pchan->flags &= ~MXS_DMA_FLAGS_BUSY;
497
498 return 0;
499}
500
501
502
503
504static int mxs_dma_wait_complete(uint32_t timeout, unsigned int chan)
505{
506 struct mxs_apbh_regs *apbh_regs =
507 (struct mxs_apbh_regs *)MXS_APBH_BASE;
508 int ret;
509
510 ret = mxs_dma_validate_chan(chan);
511 if (ret)
512 return ret;
513
514 if (mxs_wait_mask_set(&apbh_regs->hw_apbh_ctrl1_reg,
515 1 << chan, timeout)) {
516 ret = -ETIMEDOUT;
517 mxs_dma_reset(chan);
518 }
519
520 return ret;
521}
522
523
524
525
526int mxs_dma_go(int chan)
527{
528 uint32_t timeout = 10000000;
529 int ret;
530
531 LIST_HEAD(tmp_desc_list);
532
533 mxs_dma_enable_irq(chan, 1);
534 mxs_dma_enable(chan);
535
536
537 ret = mxs_dma_wait_complete(timeout, chan);
538
539
540 mxs_dma_finish(chan, &tmp_desc_list);
541
542
543 mxs_dma_ack_irq(chan);
544 mxs_dma_reset(chan);
545 mxs_dma_enable_irq(chan, 0);
546 mxs_dma_disable(chan);
547
548 return ret;
549}
550
551
552
553
554
555
556
557void mxs_dma_circ_start(int chan, struct mxs_dma_desc *pdesc)
558{
559 struct mxs_apbh_regs *apbh_regs =
560 (struct mxs_apbh_regs *)MXS_APBH_BASE;
561
562 mxs_dma_flush_desc(pdesc);
563
564 mxs_dma_enable_irq(chan, 1);
565
566 writel(mxs_dma_cmd_address(pdesc),
567 &apbh_regs->ch[chan].hw_apbh_ch_nxtcmdar);
568 writel(1, &apbh_regs->ch[chan].hw_apbh_ch_sema);
569 writel(1 << (chan + APBH_CTRL0_CLKGATE_CHANNEL_OFFSET),
570 &apbh_regs->hw_apbh_ctrl0_clr);
571}
572
573
574
575
576void mxs_dma_init(void)
577{
578 struct mxs_apbh_regs *apbh_regs =
579 (struct mxs_apbh_regs *)MXS_APBH_BASE;
580
581 mxs_reset_block(&apbh_regs->hw_apbh_ctrl0_reg);
582
583#ifdef CONFIG_APBH_DMA_BURST8
584 writel(APBH_CTRL0_AHB_BURST8_EN,
585 &apbh_regs->hw_apbh_ctrl0_set);
586#else
587 writel(APBH_CTRL0_AHB_BURST8_EN,
588 &apbh_regs->hw_apbh_ctrl0_clr);
589#endif
590
591#ifdef CONFIG_APBH_DMA_BURST
592 writel(APBH_CTRL0_APB_BURST_EN,
593 &apbh_regs->hw_apbh_ctrl0_set);
594#else
595 writel(APBH_CTRL0_APB_BURST_EN,
596 &apbh_regs->hw_apbh_ctrl0_clr);
597#endif
598}
599
600int mxs_dma_init_channel(int channel)
601{
602 struct mxs_dma_chan *pchan;
603 int ret;
604
605 pchan = mxs_dma_channels + channel;
606 pchan->flags = MXS_DMA_FLAGS_VALID;
607
608 ret = mxs_dma_request(channel);
609
610 if (ret) {
611 printf("MXS DMA: Can't acquire DMA channel %i\n",
612 channel);
613 return ret;
614 }
615
616 mxs_dma_reset(channel);
617 mxs_dma_ack_irq(channel);
618
619 return 0;
620}
621