1
2
3
4
5
6
7
8
9
10
11
12#include <cpu_func.h>
13#include <linux/list.h>
14
15#include <common.h>
16#include <malloc.h>
17#include <linux/errno.h>
18#include <asm/io.h>
19#include <asm/arch/clock.h>
20#include <asm/arch/imx-regs.h>
21#include <asm/arch/sys_proto.h>
22#include <asm/mach-imx/dma.h>
23#include <asm/mach-imx/regs-apbh.h>
24
25static struct mxs_dma_chan mxs_dma_channels[MXS_MAX_DMA_CHANNELS];
26
27
28
29
30int mxs_dma_validate_chan(int channel)
31{
32 struct mxs_dma_chan *pchan;
33
34 if ((channel < 0) || (channel >= MXS_MAX_DMA_CHANNELS))
35 return -EINVAL;
36
37 pchan = mxs_dma_channels + channel;
38 if (!(pchan->flags & MXS_DMA_FLAGS_ALLOCATED))
39 return -EINVAL;
40
41 return 0;
42}
43
44
45
46
47static unsigned int mxs_dma_cmd_address(struct mxs_dma_desc *desc)
48{
49 return desc->address + offsetof(struct mxs_dma_desc, cmd);
50}
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66static int mxs_dma_read_semaphore(int channel)
67{
68 struct mxs_apbh_regs *apbh_regs =
69 (struct mxs_apbh_regs *)MXS_APBH_BASE;
70 uint32_t tmp;
71 int ret;
72
73 ret = mxs_dma_validate_chan(channel);
74 if (ret)
75 return ret;
76
77 tmp = readl(&apbh_regs->ch[channel].hw_apbh_ch_sema);
78
79 tmp &= APBH_CHn_SEMA_PHORE_MASK;
80 tmp >>= APBH_CHn_SEMA_PHORE_OFFSET;
81
82 return tmp;
83}
84
85#if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
86void mxs_dma_flush_desc(struct mxs_dma_desc *desc)
87{
88 uint32_t addr;
89 uint32_t size;
90
91 addr = (uint32_t)desc;
92 size = roundup(sizeof(struct mxs_dma_desc), MXS_DMA_ALIGNMENT);
93
94 flush_dcache_range(addr, addr + size);
95}
96#else
97inline void mxs_dma_flush_desc(struct mxs_dma_desc *desc) {}
98#endif
99
100
101
102
103
104
105
106
107
108
109static int mxs_dma_enable(int channel)
110{
111 struct mxs_apbh_regs *apbh_regs =
112 (struct mxs_apbh_regs *)MXS_APBH_BASE;
113 unsigned int sem;
114 struct mxs_dma_chan *pchan;
115 struct mxs_dma_desc *pdesc;
116 int ret;
117
118 ret = mxs_dma_validate_chan(channel);
119 if (ret)
120 return ret;
121
122 pchan = mxs_dma_channels + channel;
123
124 if (pchan->pending_num == 0) {
125 pchan->flags |= MXS_DMA_FLAGS_BUSY;
126 return 0;
127 }
128
129 pdesc = list_first_entry(&pchan->active, struct mxs_dma_desc, node);
130 if (pdesc == NULL)
131 return -EFAULT;
132
133 if (pchan->flags & MXS_DMA_FLAGS_BUSY) {
134 if (!(pdesc->cmd.data & MXS_DMA_DESC_CHAIN))
135 return 0;
136
137 sem = mxs_dma_read_semaphore(channel);
138 if (sem == 0)
139 return 0;
140
141 if (sem == 1) {
142 pdesc = list_entry(pdesc->node.next,
143 struct mxs_dma_desc, node);
144 writel(mxs_dma_cmd_address(pdesc),
145 &apbh_regs->ch[channel].hw_apbh_ch_nxtcmdar);
146 }
147 writel(pchan->pending_num,
148 &apbh_regs->ch[channel].hw_apbh_ch_sema);
149 pchan->active_num += pchan->pending_num;
150 pchan->pending_num = 0;
151 } else {
152 pchan->active_num += pchan->pending_num;
153 pchan->pending_num = 0;
154 writel(mxs_dma_cmd_address(pdesc),
155 &apbh_regs->ch[channel].hw_apbh_ch_nxtcmdar);
156 writel(pchan->active_num,
157 &apbh_regs->ch[channel].hw_apbh_ch_sema);
158 writel(1 << (channel + APBH_CTRL0_CLKGATE_CHANNEL_OFFSET),
159 &apbh_regs->hw_apbh_ctrl0_clr);
160 }
161
162 pchan->flags |= MXS_DMA_FLAGS_BUSY;
163 return 0;
164}
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180static int mxs_dma_disable(int channel)
181{
182 struct mxs_dma_chan *pchan;
183 struct mxs_apbh_regs *apbh_regs =
184 (struct mxs_apbh_regs *)MXS_APBH_BASE;
185 int ret;
186
187 ret = mxs_dma_validate_chan(channel);
188 if (ret)
189 return ret;
190
191 pchan = mxs_dma_channels + channel;
192
193 if (!(pchan->flags & MXS_DMA_FLAGS_BUSY))
194 return -EINVAL;
195
196 writel(1 << (channel + APBH_CTRL0_CLKGATE_CHANNEL_OFFSET),
197 &apbh_regs->hw_apbh_ctrl0_set);
198
199 pchan->flags &= ~MXS_DMA_FLAGS_BUSY;
200 pchan->active_num = 0;
201 pchan->pending_num = 0;
202 list_splice_init(&pchan->active, &pchan->done);
203
204 return 0;
205}
206
207
208
209
210static int mxs_dma_reset(int channel)
211{
212 struct mxs_apbh_regs *apbh_regs =
213 (struct mxs_apbh_regs *)MXS_APBH_BASE;
214 int ret;
215#if defined(CONFIG_MX23)
216 uint32_t setreg = (uint32_t)(&apbh_regs->hw_apbh_ctrl0_set);
217 uint32_t offset = APBH_CTRL0_RESET_CHANNEL_OFFSET;
218#elif (defined(CONFIG_MX28) || defined(CONFIG_MX6) || defined(CONFIG_MX7))
219 uint32_t setreg = (uint32_t)(&apbh_regs->hw_apbh_channel_ctrl_set);
220 uint32_t offset = APBH_CHANNEL_CTRL_RESET_CHANNEL_OFFSET;
221#endif
222
223 ret = mxs_dma_validate_chan(channel);
224 if (ret)
225 return ret;
226
227 writel(1 << (channel + offset), setreg);
228
229 return 0;
230}
231
232
233
234
235
236
237static int mxs_dma_enable_irq(int channel, int enable)
238{
239 struct mxs_apbh_regs *apbh_regs =
240 (struct mxs_apbh_regs *)MXS_APBH_BASE;
241 int ret;
242
243 ret = mxs_dma_validate_chan(channel);
244 if (ret)
245 return ret;
246
247 if (enable)
248 writel(1 << (channel + APBH_CTRL1_CH_CMDCMPLT_IRQ_EN_OFFSET),
249 &apbh_regs->hw_apbh_ctrl1_set);
250 else
251 writel(1 << (channel + APBH_CTRL1_CH_CMDCMPLT_IRQ_EN_OFFSET),
252 &apbh_regs->hw_apbh_ctrl1_clr);
253
254 return 0;
255}
256
257
258
259
260
261
262
263static int mxs_dma_ack_irq(int channel)
264{
265 struct mxs_apbh_regs *apbh_regs =
266 (struct mxs_apbh_regs *)MXS_APBH_BASE;
267 int ret;
268
269 ret = mxs_dma_validate_chan(channel);
270 if (ret)
271 return ret;
272
273 writel(1 << channel, &apbh_regs->hw_apbh_ctrl1_clr);
274 writel(1 << channel, &apbh_regs->hw_apbh_ctrl2_clr);
275
276 return 0;
277}
278
279
280
281
282static int mxs_dma_request(int channel)
283{
284 struct mxs_dma_chan *pchan;
285
286 if ((channel < 0) || (channel >= MXS_MAX_DMA_CHANNELS))
287 return -EINVAL;
288
289 pchan = mxs_dma_channels + channel;
290 if ((pchan->flags & MXS_DMA_FLAGS_VALID) != MXS_DMA_FLAGS_VALID)
291 return -ENODEV;
292
293 if (pchan->flags & MXS_DMA_FLAGS_ALLOCATED)
294 return -EBUSY;
295
296 pchan->flags |= MXS_DMA_FLAGS_ALLOCATED;
297 pchan->active_num = 0;
298 pchan->pending_num = 0;
299
300 INIT_LIST_HEAD(&pchan->active);
301 INIT_LIST_HEAD(&pchan->done);
302
303 return 0;
304}
305
306
307
308
309
310
311
312
313
314int mxs_dma_release(int channel)
315{
316 struct mxs_dma_chan *pchan;
317 int ret;
318
319 ret = mxs_dma_validate_chan(channel);
320 if (ret)
321 return ret;
322
323 pchan = mxs_dma_channels + channel;
324
325 if (pchan->flags & MXS_DMA_FLAGS_BUSY)
326 return -EBUSY;
327
328 pchan->dev = 0;
329 pchan->active_num = 0;
330 pchan->pending_num = 0;
331 pchan->flags &= ~MXS_DMA_FLAGS_ALLOCATED;
332
333 return 0;
334}
335
336
337
338
339struct mxs_dma_desc *mxs_dma_desc_alloc(void)
340{
341 struct mxs_dma_desc *pdesc;
342 uint32_t size;
343
344 size = roundup(sizeof(struct mxs_dma_desc), MXS_DMA_ALIGNMENT);
345 pdesc = memalign(MXS_DMA_ALIGNMENT, size);
346
347 if (pdesc == NULL)
348 return NULL;
349
350 memset(pdesc, 0, sizeof(*pdesc));
351 pdesc->address = (dma_addr_t)pdesc;
352
353 return pdesc;
354};
355
356
357
358
359void mxs_dma_desc_free(struct mxs_dma_desc *pdesc)
360{
361 if (pdesc == NULL)
362 return;
363
364 free(pdesc);
365}
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406int mxs_dma_desc_append(int channel, struct mxs_dma_desc *pdesc)
407{
408 struct mxs_dma_chan *pchan;
409 struct mxs_dma_desc *last;
410 int ret;
411
412 ret = mxs_dma_validate_chan(channel);
413 if (ret)
414 return ret;
415
416 pchan = mxs_dma_channels + channel;
417
418 pdesc->cmd.next = mxs_dma_cmd_address(pdesc);
419 pdesc->flags |= MXS_DMA_DESC_FIRST | MXS_DMA_DESC_LAST;
420
421 if (!list_empty(&pchan->active)) {
422 last = list_entry(pchan->active.prev, struct mxs_dma_desc,
423 node);
424
425 pdesc->flags &= ~MXS_DMA_DESC_FIRST;
426 last->flags &= ~MXS_DMA_DESC_LAST;
427
428 last->cmd.next = mxs_dma_cmd_address(pdesc);
429 last->cmd.data |= MXS_DMA_DESC_CHAIN;
430
431 mxs_dma_flush_desc(last);
432 }
433 pdesc->flags |= MXS_DMA_DESC_READY;
434 if (pdesc->flags & MXS_DMA_DESC_FIRST)
435 pchan->pending_num++;
436 list_add_tail(&pdesc->node, &pchan->active);
437
438 mxs_dma_flush_desc(pdesc);
439
440 return ret;
441}
442
443
444
445
446
447
448
449
450
451
452
453
454static int mxs_dma_finish(int channel, struct list_head *head)
455{
456 int sem;
457 struct mxs_dma_chan *pchan;
458 struct list_head *p, *q;
459 struct mxs_dma_desc *pdesc;
460 int ret;
461
462 ret = mxs_dma_validate_chan(channel);
463 if (ret)
464 return ret;
465
466 pchan = mxs_dma_channels + channel;
467
468 sem = mxs_dma_read_semaphore(channel);
469 if (sem < 0)
470 return sem;
471
472 if (sem == pchan->active_num)
473 return 0;
474
475 list_for_each_safe(p, q, &pchan->active) {
476 if ((pchan->active_num) <= sem)
477 break;
478
479 pdesc = list_entry(p, struct mxs_dma_desc, node);
480 pdesc->flags &= ~MXS_DMA_DESC_READY;
481
482 if (head)
483 list_move_tail(p, head);
484 else
485 list_move_tail(p, &pchan->done);
486
487 if (pdesc->flags & MXS_DMA_DESC_LAST)
488 pchan->active_num--;
489 }
490
491 if (sem == 0)
492 pchan->flags &= ~MXS_DMA_FLAGS_BUSY;
493
494 return 0;
495}
496
497
498
499
500static int mxs_dma_wait_complete(uint32_t timeout, unsigned int chan)
501{
502 struct mxs_apbh_regs *apbh_regs =
503 (struct mxs_apbh_regs *)MXS_APBH_BASE;
504 int ret;
505
506 ret = mxs_dma_validate_chan(chan);
507 if (ret)
508 return ret;
509
510 if (mxs_wait_mask_set(&apbh_regs->hw_apbh_ctrl1_reg,
511 1 << chan, timeout)) {
512 ret = -ETIMEDOUT;
513 mxs_dma_reset(chan);
514 }
515
516 return ret;
517}
518
519
520
521
522int mxs_dma_go(int chan)
523{
524 uint32_t timeout = 10000000;
525 int ret;
526
527 LIST_HEAD(tmp_desc_list);
528
529 mxs_dma_enable_irq(chan, 1);
530 mxs_dma_enable(chan);
531
532
533 ret = mxs_dma_wait_complete(timeout, chan);
534
535
536 mxs_dma_finish(chan, &tmp_desc_list);
537
538
539 mxs_dma_ack_irq(chan);
540 mxs_dma_reset(chan);
541 mxs_dma_enable_irq(chan, 0);
542 mxs_dma_disable(chan);
543
544 return ret;
545}
546
547
548
549
550
551
552
553void mxs_dma_circ_start(int chan, struct mxs_dma_desc *pdesc)
554{
555 struct mxs_apbh_regs *apbh_regs =
556 (struct mxs_apbh_regs *)MXS_APBH_BASE;
557
558 mxs_dma_flush_desc(pdesc);
559
560 mxs_dma_enable_irq(chan, 1);
561
562 writel(mxs_dma_cmd_address(pdesc),
563 &apbh_regs->ch[chan].hw_apbh_ch_nxtcmdar);
564 writel(1, &apbh_regs->ch[chan].hw_apbh_ch_sema);
565 writel(1 << (chan + APBH_CTRL0_CLKGATE_CHANNEL_OFFSET),
566 &apbh_regs->hw_apbh_ctrl0_clr);
567}
568
569
570
571
572void mxs_dma_init(void)
573{
574 struct mxs_apbh_regs *apbh_regs =
575 (struct mxs_apbh_regs *)MXS_APBH_BASE;
576
577 mxs_reset_block(&apbh_regs->hw_apbh_ctrl0_reg);
578
579#ifdef CONFIG_APBH_DMA_BURST8
580 writel(APBH_CTRL0_AHB_BURST8_EN,
581 &apbh_regs->hw_apbh_ctrl0_set);
582#else
583 writel(APBH_CTRL0_AHB_BURST8_EN,
584 &apbh_regs->hw_apbh_ctrl0_clr);
585#endif
586
587#ifdef CONFIG_APBH_DMA_BURST
588 writel(APBH_CTRL0_APB_BURST_EN,
589 &apbh_regs->hw_apbh_ctrl0_set);
590#else
591 writel(APBH_CTRL0_APB_BURST_EN,
592 &apbh_regs->hw_apbh_ctrl0_clr);
593#endif
594}
595
596int mxs_dma_init_channel(int channel)
597{
598 struct mxs_dma_chan *pchan;
599 int ret;
600
601 pchan = mxs_dma_channels + channel;
602 pchan->flags = MXS_DMA_FLAGS_VALID;
603
604 ret = mxs_dma_request(channel);
605
606 if (ret) {
607 printf("MXS DMA: Can't acquire DMA channel %i\n",
608 channel);
609 return ret;
610 }
611
612 mxs_dma_reset(channel);
613 mxs_dma_ack_irq(channel);
614
615 return 0;
616}
617