1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/types.h>
14#include <linux/mm.h>
15#include <linux/string.h>
16#include <linux/dma-mapping.h>
17#include <linux/io.h>
18#include <linux/cache.h>
19#include <asm/cacheflush.h>
20
21void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
22 enum dma_data_direction dir)
23{
24 void *vaddr = phys_to_virt(paddr);
25
26 switch (dir) {
27 case DMA_FROM_DEVICE:
28 invalidate_dcache_range((unsigned long)vaddr,
29 (unsigned long)(vaddr + size));
30 break;
31 case DMA_TO_DEVICE:
32
33
34
35
36 case DMA_BIDIRECTIONAL:
37 flush_dcache_range((unsigned long)vaddr,
38 (unsigned long)(vaddr + size));
39 break;
40 default:
41 BUG();
42 }
43}
44
45void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
46 enum dma_data_direction dir)
47{
48 void *vaddr = phys_to_virt(paddr);
49
50 switch (dir) {
51 case DMA_BIDIRECTIONAL:
52 case DMA_FROM_DEVICE:
53 invalidate_dcache_range((unsigned long)vaddr,
54 (unsigned long)(vaddr + size));
55 break;
56 case DMA_TO_DEVICE:
57 break;
58 default:
59 BUG();
60 }
61}
62
63void arch_dma_prep_coherent(struct page *page, size_t size)
64{
65 unsigned long start = (unsigned long)page_address(page);
66
67 flush_dcache_range(start, start + size);
68}
69
70void *arch_dma_set_uncached(void *ptr, size_t size)
71{
72 unsigned long addr = (unsigned long)ptr;
73
74 addr |= CONFIG_NIOS2_IO_REGION_BASE;
75
76 return (void *)ptr;
77}
78