1
2
3
4
5
6
7
8
9
10
11#include <linux/kernel.h>
12#include <linux/init.h>
13#include <linux/export.h>
14#include <linux/errno.h>
15#include <linux/mm.h>
16#include <linux/memblock.h>
17#include <linux/spinlock.h>
18#include <linux/gfp.h>
19#include <linux/dma-direct.h>
20#include <linux/dma-noncoherent.h>
21#include <asm/mipsregs.h>
22#include <asm/jazz.h>
23#include <asm/io.h>
24#include <linux/uaccess.h>
25#include <asm/dma.h>
26#include <asm/jazzdma.h>
27
28
29
30
31#define CONF_DEBUG_VDMA 0
32
33static VDMA_PGTBL_ENTRY *pgtbl;
34
35static DEFINE_SPINLOCK(vdma_lock);
36
37
38
39
40#define vdma_debug ((CONF_DEBUG_VDMA) ? debuglvl : 0)
41
42static int debuglvl = 3;
43
44
45
46
47
48
49
50static inline void vdma_pgtbl_init(void)
51{
52 unsigned long paddr = 0;
53 int i;
54
55 for (i = 0; i < VDMA_PGTBL_ENTRIES; i++) {
56 pgtbl[i].frame = paddr;
57 pgtbl[i].owner = VDMA_PAGE_EMPTY;
58 paddr += VDMA_PAGESIZE;
59 }
60}
61
62
63
64
65static int __init vdma_init(void)
66{
67
68
69
70
71
72 pgtbl = (VDMA_PGTBL_ENTRY *)__get_free_pages(GFP_KERNEL | GFP_DMA,
73 get_order(VDMA_PGTBL_SIZE));
74 BUG_ON(!pgtbl);
75 dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE);
76 pgtbl = (VDMA_PGTBL_ENTRY *)CKSEG1ADDR((unsigned long)pgtbl);
77
78
79
80
81 vdma_pgtbl_init();
82
83 r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE,
84 CPHYSADDR((unsigned long)pgtbl));
85 r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE);
86 r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0);
87
88 printk(KERN_INFO "VDMA: R4030 DMA pagetables initialized.\n");
89 return 0;
90}
91arch_initcall(vdma_init);
92
93
94
95
96unsigned long vdma_alloc(unsigned long paddr, unsigned long size)
97{
98 int first, last, pages, frame, i;
99 unsigned long laddr, flags;
100
101
102
103 if (paddr > 0x1fffffff) {
104 if (vdma_debug)
105 printk("vdma_alloc: Invalid physical address: %08lx\n",
106 paddr);
107 return DMA_MAPPING_ERROR;
108 }
109 if (size > 0x400000 || size == 0) {
110 if (vdma_debug)
111 printk("vdma_alloc: Invalid size: %08lx\n", size);
112 return DMA_MAPPING_ERROR;
113 }
114
115 spin_lock_irqsave(&vdma_lock, flags);
116
117
118
119 pages = VDMA_PAGE(paddr + size) - VDMA_PAGE(paddr) + 1;
120 first = 0;
121 while (1) {
122 while (pgtbl[first].owner != VDMA_PAGE_EMPTY &&
123 first < VDMA_PGTBL_ENTRIES) first++;
124 if (first + pages > VDMA_PGTBL_ENTRIES) {
125 spin_unlock_irqrestore(&vdma_lock, flags);
126 return DMA_MAPPING_ERROR;
127 }
128
129 last = first + 1;
130 while (pgtbl[last].owner == VDMA_PAGE_EMPTY
131 && last - first < pages)
132 last++;
133
134 if (last - first == pages)
135 break;
136 first = last + 1;
137 }
138
139
140
141
142 laddr = (first << 12) + (paddr & (VDMA_PAGESIZE - 1));
143 frame = paddr & ~(VDMA_PAGESIZE - 1);
144
145 for (i = first; i < last; i++) {
146 pgtbl[i].frame = frame;
147 pgtbl[i].owner = laddr;
148 frame += VDMA_PAGESIZE;
149 }
150
151
152
153
154 r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0);
155
156 if (vdma_debug > 1)
157 printk("vdma_alloc: Allocated %d pages starting from %08lx\n",
158 pages, laddr);
159
160 if (vdma_debug > 2) {
161 printk("LADDR: ");
162 for (i = first; i < last; i++)
163 printk("%08x ", i << 12);
164 printk("\nPADDR: ");
165 for (i = first; i < last; i++)
166 printk("%08x ", pgtbl[i].frame);
167 printk("\nOWNER: ");
168 for (i = first; i < last; i++)
169 printk("%08x ", pgtbl[i].owner);
170 printk("\n");
171 }
172
173 spin_unlock_irqrestore(&vdma_lock, flags);
174
175 return laddr;
176}
177
178EXPORT_SYMBOL(vdma_alloc);
179
180
181
182
183
184
185int vdma_free(unsigned long laddr)
186{
187 int i;
188
189 i = laddr >> 12;
190
191 if (pgtbl[i].owner != laddr) {
192 printk
193 ("vdma_free: trying to free other's dma pages, laddr=%8lx\n",
194 laddr);
195 return -1;
196 }
197
198 while (i < VDMA_PGTBL_ENTRIES && pgtbl[i].owner == laddr) {
199 pgtbl[i].owner = VDMA_PAGE_EMPTY;
200 i++;
201 }
202
203 if (vdma_debug > 1)
204 printk("vdma_free: freed %ld pages starting from %08lx\n",
205 i - (laddr >> 12), laddr);
206
207 return 0;
208}
209
210EXPORT_SYMBOL(vdma_free);
211
212
213
214
215
216int vdma_remap(unsigned long laddr, unsigned long paddr, unsigned long size)
217{
218 int first, pages;
219
220 if (laddr > 0xffffff) {
221 if (vdma_debug)
222 printk
223 ("vdma_map: Invalid logical address: %08lx\n",
224 laddr);
225 return -EINVAL;
226 }
227 if (paddr > 0x1fffffff) {
228 if (vdma_debug)
229 printk
230 ("vdma_map: Invalid physical address: %08lx\n",
231 paddr);
232 return -EINVAL;
233 }
234
235 pages = (((paddr & (VDMA_PAGESIZE - 1)) + size) >> 12) + 1;
236 first = laddr >> 12;
237 if (vdma_debug)
238 printk("vdma_remap: first=%x, pages=%x\n", first, pages);
239 if (first + pages > VDMA_PGTBL_ENTRIES) {
240 if (vdma_debug)
241 printk("vdma_alloc: Invalid size: %08lx\n", size);
242 return -EINVAL;
243 }
244
245 paddr &= ~(VDMA_PAGESIZE - 1);
246 while (pages > 0 && first < VDMA_PGTBL_ENTRIES) {
247 if (pgtbl[first].owner != laddr) {
248 if (vdma_debug)
249 printk("Trying to remap other's pages.\n");
250 return -EPERM;
251 }
252 pgtbl[first].frame = paddr;
253 paddr += VDMA_PAGESIZE;
254 first++;
255 pages--;
256 }
257
258
259
260
261 r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0);
262
263 if (vdma_debug > 2) {
264 int i;
265 pages = (((paddr & (VDMA_PAGESIZE - 1)) + size) >> 12) + 1;
266 first = laddr >> 12;
267 printk("LADDR: ");
268 for (i = first; i < first + pages; i++)
269 printk("%08x ", i << 12);
270 printk("\nPADDR: ");
271 for (i = first; i < first + pages; i++)
272 printk("%08x ", pgtbl[i].frame);
273 printk("\nOWNER: ");
274 for (i = first; i < first + pages; i++)
275 printk("%08x ", pgtbl[i].owner);
276 printk("\n");
277 }
278
279 return 0;
280}
281
282
283
284
285
286
287unsigned long vdma_phys2log(unsigned long paddr)
288{
289 int i;
290 int frame;
291
292 frame = paddr & ~(VDMA_PAGESIZE - 1);
293
294 for (i = 0; i < VDMA_PGTBL_ENTRIES; i++) {
295 if (pgtbl[i].frame == frame)
296 break;
297 }
298
299 if (i == VDMA_PGTBL_ENTRIES)
300 return ~0UL;
301
302 return (i << 12) + (paddr & (VDMA_PAGESIZE - 1));
303}
304
305EXPORT_SYMBOL(vdma_phys2log);
306
307
308
309
310unsigned long vdma_log2phys(unsigned long laddr)
311{
312 return pgtbl[laddr >> 12].frame + (laddr & (VDMA_PAGESIZE - 1));
313}
314
315EXPORT_SYMBOL(vdma_log2phys);
316
317
318
319
320void vdma_stats(void)
321{
322 int i;
323
324 printk("vdma_stats: CONFIG: %08x\n",
325 r4030_read_reg32(JAZZ_R4030_CONFIG));
326 printk("R4030 translation table base: %08x\n",
327 r4030_read_reg32(JAZZ_R4030_TRSTBL_BASE));
328 printk("R4030 translation table limit: %08x\n",
329 r4030_read_reg32(JAZZ_R4030_TRSTBL_LIM));
330 printk("vdma_stats: INV_ADDR: %08x\n",
331 r4030_read_reg32(JAZZ_R4030_INV_ADDR));
332 printk("vdma_stats: R_FAIL_ADDR: %08x\n",
333 r4030_read_reg32(JAZZ_R4030_R_FAIL_ADDR));
334 printk("vdma_stats: M_FAIL_ADDR: %08x\n",
335 r4030_read_reg32(JAZZ_R4030_M_FAIL_ADDR));
336 printk("vdma_stats: IRQ_SOURCE: %08x\n",
337 r4030_read_reg32(JAZZ_R4030_IRQ_SOURCE));
338 printk("vdma_stats: I386_ERROR: %08x\n",
339 r4030_read_reg32(JAZZ_R4030_I386_ERROR));
340 printk("vdma_chnl_modes: ");
341 for (i = 0; i < 8; i++)
342 printk("%04x ",
343 (unsigned) r4030_read_reg32(JAZZ_R4030_CHNL_MODE +
344 (i << 5)));
345 printk("\n");
346 printk("vdma_chnl_enables: ");
347 for (i = 0; i < 8; i++)
348 printk("%04x ",
349 (unsigned) r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
350 (i << 5)));
351 printk("\n");
352}
353
354
355
356
357
358
359
360
361void vdma_enable(int channel)
362{
363 int status;
364
365 if (vdma_debug)
366 printk("vdma_enable: channel %d\n", channel);
367
368
369
370
371 status = r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5));
372 if (status & 0x400)
373 printk("VDMA: Channel %d: Address error!\n", channel);
374 if (status & 0x200)
375 printk("VDMA: Channel %d: Memory error!\n", channel);
376
377
378
379
380 r4030_write_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5),
381 r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
382 (channel << 5)) | R4030_TC_INTR
383 | R4030_MEM_INTR | R4030_ADDR_INTR);
384
385
386
387
388 r4030_write_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5),
389 r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
390 (channel << 5)) |
391 R4030_CHNL_ENABLE);
392}
393
394EXPORT_SYMBOL(vdma_enable);
395
396
397
398
399void vdma_disable(int channel)
400{
401 if (vdma_debug) {
402 int status =
403 r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
404 (channel << 5));
405
406 printk("vdma_disable: channel %d\n", channel);
407 printk("VDMA: channel %d status: %04x (%s) mode: "
408 "%02x addr: %06x count: %06x\n",
409 channel, status,
410 ((status & 0x600) ? "ERROR" : "OK"),
411 (unsigned) r4030_read_reg32(JAZZ_R4030_CHNL_MODE +
412 (channel << 5)),
413 (unsigned) r4030_read_reg32(JAZZ_R4030_CHNL_ADDR +
414 (channel << 5)),
415 (unsigned) r4030_read_reg32(JAZZ_R4030_CHNL_COUNT +
416 (channel << 5)));
417 }
418
419 r4030_write_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5),
420 r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
421 (channel << 5)) &
422 ~R4030_CHNL_ENABLE);
423
424
425
426
427
428 *((volatile unsigned int *) JAZZ_DUMMY_DEVICE);
429}
430
431EXPORT_SYMBOL(vdma_disable);
432
433
434
435
436
437
438
439
440
441
442void vdma_set_mode(int channel, int mode)
443{
444 if (vdma_debug)
445 printk("vdma_set_mode: channel %d, mode 0x%x\n", channel,
446 mode);
447
448 switch (channel) {
449 case JAZZ_SCSI_DMA:
450 r4030_write_reg32(JAZZ_R4030_CHNL_MODE + (channel << 5),
451
452
453 R4030_MODE_INTR_EN |
454 R4030_MODE_WIDTH_16 |
455 R4030_MODE_ATIME_80);
456 break;
457
458 case JAZZ_FLOPPY_DMA:
459 r4030_write_reg32(JAZZ_R4030_CHNL_MODE + (channel << 5),
460
461
462 R4030_MODE_INTR_EN |
463 R4030_MODE_WIDTH_8 |
464 R4030_MODE_ATIME_120);
465 break;
466
467 case JAZZ_AUDIOL_DMA:
468 case JAZZ_AUDIOR_DMA:
469 printk("VDMA: Audio DMA not supported yet.\n");
470 break;
471
472 default:
473 printk
474 ("VDMA: vdma_set_mode() called with unsupported channel %d!\n",
475 channel);
476 }
477
478 switch (mode) {
479 case DMA_MODE_READ:
480 r4030_write_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5),
481 r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
482 (channel << 5)) &
483 ~R4030_CHNL_WRITE);
484 break;
485
486 case DMA_MODE_WRITE:
487 r4030_write_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5),
488 r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE +
489 (channel << 5)) |
490 R4030_CHNL_WRITE);
491 break;
492
493 default:
494 printk
495 ("VDMA: vdma_set_mode() called with unknown dma mode 0x%x\n",
496 mode);
497 }
498}
499
500EXPORT_SYMBOL(vdma_set_mode);
501
502
503
504
505void vdma_set_addr(int channel, long addr)
506{
507 if (vdma_debug)
508 printk("vdma_set_addr: channel %d, addr %lx\n", channel,
509 addr);
510
511 r4030_write_reg32(JAZZ_R4030_CHNL_ADDR + (channel << 5), addr);
512}
513
514EXPORT_SYMBOL(vdma_set_addr);
515
516
517
518
519void vdma_set_count(int channel, int count)
520{
521 if (vdma_debug)
522 printk("vdma_set_count: channel %d, count %08x\n", channel,
523 (unsigned) count);
524
525 r4030_write_reg32(JAZZ_R4030_CHNL_COUNT + (channel << 5), count);
526}
527
528EXPORT_SYMBOL(vdma_set_count);
529
530
531
532
533int vdma_get_residue(int channel)
534{
535 int residual;
536
537 residual = r4030_read_reg32(JAZZ_R4030_CHNL_COUNT + (channel << 5));
538
539 if (vdma_debug)
540 printk("vdma_get_residual: channel %d: residual=%d\n",
541 channel, residual);
542
543 return residual;
544}
545
546
547
548
549int vdma_get_enable(int channel)
550{
551 int enable;
552
553 enable = r4030_read_reg32(JAZZ_R4030_CHNL_ENABLE + (channel << 5));
554
555 if (vdma_debug)
556 printk("vdma_get_enable: channel %d: enable=%d\n", channel,
557 enable);
558
559 return enable;
560}
561
562static void *jazz_dma_alloc(struct device *dev, size_t size,
563 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
564{
565 void *ret;
566
567 ret = dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
568 if (!ret)
569 return NULL;
570
571 *dma_handle = vdma_alloc(virt_to_phys(ret), size);
572 if (*dma_handle == DMA_MAPPING_ERROR) {
573 dma_direct_free_pages(dev, size, ret, *dma_handle, attrs);
574 return NULL;
575 }
576
577 return ret;
578}
579
580static void jazz_dma_free(struct device *dev, size_t size, void *vaddr,
581 dma_addr_t dma_handle, unsigned long attrs)
582{
583 vdma_free(dma_handle);
584 dma_direct_free_pages(dev, size, vaddr, dma_handle, attrs);
585}
586
587static dma_addr_t jazz_dma_map_page(struct device *dev, struct page *page,
588 unsigned long offset, size_t size, enum dma_data_direction dir,
589 unsigned long attrs)
590{
591 phys_addr_t phys = page_to_phys(page) + offset;
592
593 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
594 arch_sync_dma_for_device(phys, size, dir);
595 return vdma_alloc(phys, size);
596}
597
598static void jazz_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
599 size_t size, enum dma_data_direction dir, unsigned long attrs)
600{
601 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
602 arch_sync_dma_for_cpu(vdma_log2phys(dma_addr), size, dir);
603 vdma_free(dma_addr);
604}
605
606static int jazz_dma_map_sg(struct device *dev, struct scatterlist *sglist,
607 int nents, enum dma_data_direction dir, unsigned long attrs)
608{
609 int i;
610 struct scatterlist *sg;
611
612 for_each_sg(sglist, sg, nents, i) {
613 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
614 arch_sync_dma_for_device(sg_phys(sg), sg->length,
615 dir);
616 sg->dma_address = vdma_alloc(sg_phys(sg), sg->length);
617 if (sg->dma_address == DMA_MAPPING_ERROR)
618 return 0;
619 sg_dma_len(sg) = sg->length;
620 }
621
622 return nents;
623}
624
625static void jazz_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
626 int nents, enum dma_data_direction dir, unsigned long attrs)
627{
628 int i;
629 struct scatterlist *sg;
630
631 for_each_sg(sglist, sg, nents, i) {
632 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
633 arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
634 vdma_free(sg->dma_address);
635 }
636}
637
638static void jazz_dma_sync_single_for_device(struct device *dev,
639 dma_addr_t addr, size_t size, enum dma_data_direction dir)
640{
641 arch_sync_dma_for_device(vdma_log2phys(addr), size, dir);
642}
643
644static void jazz_dma_sync_single_for_cpu(struct device *dev,
645 dma_addr_t addr, size_t size, enum dma_data_direction dir)
646{
647 arch_sync_dma_for_cpu(vdma_log2phys(addr), size, dir);
648}
649
650static void jazz_dma_sync_sg_for_device(struct device *dev,
651 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
652{
653 struct scatterlist *sg;
654 int i;
655
656 for_each_sg(sgl, sg, nents, i)
657 arch_sync_dma_for_device(sg_phys(sg), sg->length, dir);
658}
659
660static void jazz_dma_sync_sg_for_cpu(struct device *dev,
661 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
662{
663 struct scatterlist *sg;
664 int i;
665
666 for_each_sg(sgl, sg, nents, i)
667 arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
668}
669
670const struct dma_map_ops jazz_dma_ops = {
671 .alloc = jazz_dma_alloc,
672 .free = jazz_dma_free,
673 .map_page = jazz_dma_map_page,
674 .unmap_page = jazz_dma_unmap_page,
675 .map_sg = jazz_dma_map_sg,
676 .unmap_sg = jazz_dma_unmap_sg,
677 .sync_single_for_cpu = jazz_dma_sync_single_for_cpu,
678 .sync_single_for_device = jazz_dma_sync_single_for_device,
679 .sync_sg_for_cpu = jazz_dma_sync_sg_for_cpu,
680 .sync_sg_for_device = jazz_dma_sync_sg_for_device,
681 .dma_supported = dma_direct_supported,
682 .cache_sync = arch_dma_cache_sync,
683 .mmap = dma_common_mmap,
684 .get_sgtable = dma_common_get_sgtable,
685};
686EXPORT_SYMBOL(jazz_dma_ops);
687