1
2
3
4
5
6
7
8
9#include <linux/errno.h>
10#include <linux/interrupt.h>
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/param.h>
14#include <linux/proc_fs.h>
15#include <linux/sched.h>
16#include <linux/seq_file.h>
17#include <linux/spinlock.h>
18
19#include <asm/blackfin.h>
20#include <asm/cacheflush.h>
21#include <asm/dma.h>
22#include <asm/uaccess.h>
23#include <asm/early_printk.h>
24
25
26
27
28
29
30struct dma_channel dma_ch[MAX_DMA_CHANNELS];
31EXPORT_SYMBOL(dma_ch);
32
33static int __init blackfin_dma_init(void)
34{
35 int i;
36
37 printk(KERN_INFO "Blackfin DMA Controller\n");
38
39 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
40 atomic_set(&dma_ch[i].chan_status, 0);
41 dma_ch[i].regs = dma_io_base_addr[i];
42 }
43
44 request_dma(CH_MEM_STREAM0_DEST, "Blackfin dma_memcpy");
45 request_dma(CH_MEM_STREAM0_SRC, "Blackfin dma_memcpy");
46
47#if defined(CONFIG_DEB_DMA_URGENT)
48 bfin_write_EBIU_DDRQUE(bfin_read_EBIU_DDRQUE()
49 | DEB1_URGENT | DEB2_URGENT | DEB3_URGENT);
50#endif
51
52 return 0;
53}
54arch_initcall(blackfin_dma_init);
55
56#ifdef CONFIG_PROC_FS
57static int proc_dma_show(struct seq_file *m, void *v)
58{
59 int i;
60
61 for (i = 0; i < MAX_DMA_CHANNELS; ++i)
62 if (dma_channel_active(i))
63 seq_printf(m, "%2d: %s\n", i, dma_ch[i].device_id);
64
65 return 0;
66}
67
68static int proc_dma_open(struct inode *inode, struct file *file)
69{
70 return single_open(file, proc_dma_show, NULL);
71}
72
73static const struct file_operations proc_dma_operations = {
74 .open = proc_dma_open,
75 .read = seq_read,
76 .llseek = seq_lseek,
77 .release = single_release,
78};
79
80static int __init proc_dma_init(void)
81{
82 return proc_create("dma", 0, NULL, &proc_dma_operations) != NULL;
83}
84late_initcall(proc_dma_init);
85#endif
86
87
88
89
90
91
92int request_dma(unsigned int channel, const char *device_id)
93{
94 pr_debug("request_dma() : BEGIN\n");
95
96 if (device_id == NULL)
97 printk(KERN_WARNING "request_dma(%u): no device_id given\n", channel);
98
99#if defined(CONFIG_BF561) && ANOMALY_05000182
100 if (channel >= CH_IMEM_STREAM0_DEST && channel <= CH_IMEM_STREAM1_DEST) {
101 if (get_cclk() > 500000000) {
102 printk(KERN_WARNING
103 "Request IMDMA failed due to ANOMALY 05000182\n");
104 return -EFAULT;
105 }
106 }
107#endif
108
109 if (atomic_cmpxchg(&dma_ch[channel].chan_status, 0, 1)) {
110 pr_debug("DMA CHANNEL IN USE\n");
111 return -EBUSY;
112 }
113
114#ifdef CONFIG_BF54x
115 if (channel >= CH_UART2_RX && channel <= CH_UART3_TX) {
116 unsigned int per_map;
117 per_map = dma_ch[channel].regs->peripheral_map & 0xFFF;
118 if (strncmp(device_id, "BFIN_UART", 9) == 0)
119 dma_ch[channel].regs->peripheral_map = per_map |
120 ((channel - CH_UART2_RX + 0xC)<<12);
121 else
122 dma_ch[channel].regs->peripheral_map = per_map |
123 ((channel - CH_UART2_RX + 0x6)<<12);
124 }
125#endif
126
127 dma_ch[channel].device_id = device_id;
128 dma_ch[channel].irq = 0;
129
130
131
132
133
134 pr_debug("request_dma() : END\n");
135 return 0;
136}
137EXPORT_SYMBOL(request_dma);
138
139int set_dma_callback(unsigned int channel, irq_handler_t callback, void *data)
140{
141 int ret;
142 unsigned int irq;
143
144 BUG_ON(channel >= MAX_DMA_CHANNELS || !callback ||
145 !atomic_read(&dma_ch[channel].chan_status));
146
147 irq = channel2irq(channel);
148 ret = request_irq(irq, callback, 0, dma_ch[channel].device_id, data);
149 if (ret)
150 return ret;
151
152 dma_ch[channel].irq = irq;
153 dma_ch[channel].data = data;
154
155 return 0;
156}
157EXPORT_SYMBOL(set_dma_callback);
158
159
160
161
162
163
164
165static void clear_dma_buffer(unsigned int channel)
166{
167 dma_ch[channel].regs->cfg |= RESTART;
168 SSYNC();
169 dma_ch[channel].regs->cfg &= ~RESTART;
170}
171
172void free_dma(unsigned int channel)
173{
174 pr_debug("freedma() : BEGIN\n");
175 BUG_ON(channel >= MAX_DMA_CHANNELS ||
176 !atomic_read(&dma_ch[channel].chan_status));
177
178
179 disable_dma(channel);
180 clear_dma_buffer(channel);
181
182 if (dma_ch[channel].irq)
183 free_irq(dma_ch[channel].irq, dma_ch[channel].data);
184
185
186 atomic_set(&dma_ch[channel].chan_status, 0);
187
188 pr_debug("freedma() : END\n");
189}
190EXPORT_SYMBOL(free_dma);
191
192#ifdef CONFIG_PM
193# ifndef MAX_DMA_SUSPEND_CHANNELS
194# define MAX_DMA_SUSPEND_CHANNELS MAX_DMA_CHANNELS
195# endif
196int blackfin_dma_suspend(void)
197{
198 int i;
199
200 for (i = 0; i < MAX_DMA_CHANNELS; ++i) {
201 if (dma_ch[i].regs->cfg & DMAEN) {
202 printk(KERN_ERR "DMA Channel %d failed to suspend\n", i);
203 return -EBUSY;
204 }
205
206 if (i < MAX_DMA_SUSPEND_CHANNELS)
207 dma_ch[i].saved_peripheral_map = dma_ch[i].regs->peripheral_map;
208 }
209
210 return 0;
211}
212
213void blackfin_dma_resume(void)
214{
215 int i;
216
217 for (i = 0; i < MAX_DMA_CHANNELS; ++i) {
218 dma_ch[i].regs->cfg = 0;
219
220 if (i < MAX_DMA_SUSPEND_CHANNELS)
221 dma_ch[i].regs->peripheral_map = dma_ch[i].saved_peripheral_map;
222 }
223}
224#endif
225
226
227
228
229
230
231
232void __init blackfin_dma_early_init(void)
233{
234 early_shadow_stamp();
235 bfin_write_MDMA_S0_CONFIG(0);
236 bfin_write_MDMA_S1_CONFIG(0);
237}
238
239void __init early_dma_memcpy(void *pdst, const void *psrc, size_t size)
240{
241 unsigned long dst = (unsigned long)pdst;
242 unsigned long src = (unsigned long)psrc;
243 struct dma_register *dst_ch, *src_ch;
244
245 early_shadow_stamp();
246
247
248
249
250 BUG_ON(dst % 4);
251 BUG_ON(src % 4);
252 BUG_ON(size % 4);
253
254 src_ch = 0;
255
256 while (1) {
257 if (src_ch == (struct dma_register *)MDMA_S0_NEXT_DESC_PTR) {
258 dst_ch = (struct dma_register *)MDMA_D1_NEXT_DESC_PTR;
259 src_ch = (struct dma_register *)MDMA_S1_NEXT_DESC_PTR;
260 } else {
261 dst_ch = (struct dma_register *)MDMA_D0_NEXT_DESC_PTR;
262 src_ch = (struct dma_register *)MDMA_S0_NEXT_DESC_PTR;
263 }
264
265 if (!bfin_read16(&src_ch->cfg))
266 break;
267 else if (bfin_read16(&dst_ch->irq_status) & DMA_DONE) {
268 bfin_write16(&src_ch->cfg, 0);
269 break;
270 }
271 }
272
273
274
275
276
277 __builtin_bfin_ssync();
278
279
280 bfin_write32(&dst_ch->start_addr, dst);
281 bfin_write16(&dst_ch->x_count, size >> 2);
282 bfin_write16(&dst_ch->x_modify, 1 << 2);
283 bfin_write16(&dst_ch->irq_status, DMA_DONE | DMA_ERR);
284
285
286 bfin_write32(&src_ch->start_addr, src);
287 bfin_write16(&src_ch->x_count, size >> 2);
288 bfin_write16(&src_ch->x_modify, 1 << 2);
289 bfin_write16(&src_ch->irq_status, DMA_DONE | DMA_ERR);
290
291
292 bfin_write16(&src_ch->cfg, DMAEN | WDSIZE_32);
293 bfin_write16(&dst_ch->cfg, WNR | DI_EN | DMAEN | WDSIZE_32);
294
295
296 __builtin_bfin_ssync();
297}
298
299void __init early_dma_memcpy_done(void)
300{
301 early_shadow_stamp();
302
303 while ((bfin_read_MDMA_S0_CONFIG() && !(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)) ||
304 (bfin_read_MDMA_S1_CONFIG() && !(bfin_read_MDMA_D1_IRQ_STATUS() & DMA_DONE)))
305 continue;
306
307 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
308 bfin_write_MDMA_D1_IRQ_STATUS(DMA_DONE | DMA_ERR);
309
310
311
312
313
314 bfin_write_MDMA_S0_CONFIG(0);
315 bfin_write_MDMA_S1_CONFIG(0);
316 bfin_write_MDMA_D0_CONFIG(0);
317 bfin_write_MDMA_D1_CONFIG(0);
318
319 __builtin_bfin_ssync();
320}
321
322
323
324
325
326
327
328
329
330static void __dma_memcpy(u32 daddr, s16 dmod, u32 saddr, s16 smod, size_t cnt, u32 conf)
331{
332 static DEFINE_SPINLOCK(mdma_lock);
333 unsigned long flags;
334
335 spin_lock_irqsave(&mdma_lock, flags);
336
337
338
339
340
341
342 __builtin_bfin_ssync();
343
344 if (bfin_read_MDMA_S0_CONFIG())
345 while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE))
346 continue;
347
348 if (conf & DMA2D) {
349
350
351
352
353
354
355
356
357 u32 shift = abs(dmod) >> 1;
358 size_t ycnt = cnt >> (16 - shift);
359 cnt = 1 << (16 - shift);
360 bfin_write_MDMA_D0_Y_COUNT(ycnt);
361 bfin_write_MDMA_S0_Y_COUNT(ycnt);
362 bfin_write_MDMA_D0_Y_MODIFY(dmod);
363 bfin_write_MDMA_S0_Y_MODIFY(smod);
364 }
365
366 bfin_write_MDMA_D0_START_ADDR(daddr);
367 bfin_write_MDMA_D0_X_COUNT(cnt);
368 bfin_write_MDMA_D0_X_MODIFY(dmod);
369 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
370
371 bfin_write_MDMA_S0_START_ADDR(saddr);
372 bfin_write_MDMA_S0_X_COUNT(cnt);
373 bfin_write_MDMA_S0_X_MODIFY(smod);
374 bfin_write_MDMA_S0_IRQ_STATUS(DMA_DONE | DMA_ERR);
375
376 bfin_write_MDMA_S0_CONFIG(DMAEN | conf);
377 bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | conf);
378
379 spin_unlock_irqrestore(&mdma_lock, flags);
380
381 SSYNC();
382
383 while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE))
384 if (bfin_read_MDMA_S0_CONFIG())
385 continue;
386 else
387 return;
388
389 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
390
391 bfin_write_MDMA_S0_CONFIG(0);
392 bfin_write_MDMA_D0_CONFIG(0);
393}
394
395
396
397
398
399
400
401static void *_dma_memcpy(void *pdst, const void *psrc, size_t size)
402{
403 u32 conf, shift;
404 s16 mod;
405 unsigned long dst = (unsigned long)pdst;
406 unsigned long src = (unsigned long)psrc;
407
408 if (size == 0)
409 return NULL;
410
411 if (dst % 4 == 0 && src % 4 == 0 && size % 4 == 0) {
412 conf = WDSIZE_32;
413 shift = 2;
414 } else if (dst % 2 == 0 && src % 2 == 0 && size % 2 == 0) {
415 conf = WDSIZE_16;
416 shift = 1;
417 } else {
418 conf = WDSIZE_8;
419 shift = 0;
420 }
421
422
423
424
425
426 mod = 1 << shift;
427 if (src < dst) {
428 mod *= -1;
429 dst += size + mod;
430 src += size + mod;
431 }
432 size >>= shift;
433
434 if (size > 0x10000)
435 conf |= DMA2D;
436
437 __dma_memcpy(dst, mod, src, mod, size, conf);
438
439 return pdst;
440}
441
442
443
444
445
446
447
448
449void *dma_memcpy(void *pdst, const void *psrc, size_t size)
450{
451 unsigned long dst = (unsigned long)pdst;
452 unsigned long src = (unsigned long)psrc;
453
454 if (bfin_addr_dcacheable(src))
455 blackfin_dcache_flush_range(src, src + size);
456
457 if (bfin_addr_dcacheable(dst))
458 blackfin_dcache_invalidate_range(dst, dst + size);
459
460 return dma_memcpy_nocache(pdst, psrc, size);
461}
462EXPORT_SYMBOL(dma_memcpy);
463
464
465
466
467
468
469
470
471
472void *dma_memcpy_nocache(void *pdst, const void *psrc, size_t size)
473{
474 size_t bulk, rest;
475
476 bulk = size & ~0xffff;
477 rest = size - bulk;
478 if (bulk)
479 _dma_memcpy(pdst, psrc, bulk);
480 _dma_memcpy(pdst + bulk, psrc + bulk, rest);
481 return pdst;
482}
483EXPORT_SYMBOL(dma_memcpy_nocache);
484
485
486
487
488
489
490void *safe_dma_memcpy(void *dst, const void *src, size_t size)
491{
492 if (!access_ok(VERIFY_WRITE, dst, size))
493 return NULL;
494 if (!access_ok(VERIFY_READ, src, size))
495 return NULL;
496 return dma_memcpy(dst, src, size);
497}
498EXPORT_SYMBOL(safe_dma_memcpy);
499
500static void _dma_out(unsigned long addr, unsigned long buf, unsigned short len,
501 u16 size, u16 dma_size)
502{
503 blackfin_dcache_flush_range(buf, buf + len * size);
504 __dma_memcpy(addr, 0, buf, size, len, dma_size);
505}
506
507static void _dma_in(unsigned long addr, unsigned long buf, unsigned short len,
508 u16 size, u16 dma_size)
509{
510 blackfin_dcache_invalidate_range(buf, buf + len * size);
511 __dma_memcpy(buf, size, addr, 0, len, dma_size);
512}
513
514#define MAKE_DMA_IO(io, bwl, isize, dmasize, cnst) \
515void dma_##io##s##bwl(unsigned long addr, cnst void *buf, unsigned short len) \
516{ \
517 _dma_##io(addr, (unsigned long)buf, len, isize, WDSIZE_##dmasize); \
518} \
519EXPORT_SYMBOL(dma_##io##s##bwl)
520MAKE_DMA_IO(out, b, 1, 8, const);
521MAKE_DMA_IO(in, b, 1, 8, );
522MAKE_DMA_IO(out, w, 2, 16, const);
523MAKE_DMA_IO(in, w, 2, 16, );
524MAKE_DMA_IO(out, l, 4, 32, const);
525MAKE_DMA_IO(in, l, 4, 32, );
526