1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/dma-noncoherent.h>
20#include <asm/cache.h>
21#include <asm/cacheflush.h>
22
23void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
24 gfp_t gfp, unsigned long attrs)
25{
26 unsigned long order = get_order(size);
27 struct page *page;
28 phys_addr_t paddr;
29 void *kvaddr;
30 int need_coh = 1, need_kvaddr = 0;
31
32 page = alloc_pages(gfp, order);
33 if (!page)
34 return NULL;
35
36
37
38
39
40
41
42
43
44
45
46
47 if ((is_isa_arcv2() && ioc_enable) ||
48 (attrs & DMA_ATTR_NON_CONSISTENT))
49 need_coh = 0;
50
51
52
53
54
55
56 if (PageHighMem(page) || need_coh)
57 need_kvaddr = 1;
58
59
60 paddr = page_to_phys(page);
61
62 *dma_handle = paddr;
63
64
65 if (need_kvaddr) {
66 kvaddr = ioremap_nocache(paddr, size);
67 if (kvaddr == NULL) {
68 __free_pages(page, order);
69 return NULL;
70 }
71 } else {
72 kvaddr = (void *)(u32)paddr;
73 }
74
75
76
77
78
79
80
81
82
83
84
85 if (need_coh)
86 dma_cache_wback_inv(paddr, size);
87
88 return kvaddr;
89}
90
91void arch_dma_free(struct device *dev, size_t size, void *vaddr,
92 dma_addr_t dma_handle, unsigned long attrs)
93{
94 phys_addr_t paddr = dma_handle;
95 struct page *page = virt_to_page(paddr);
96 int is_non_coh = 1;
97
98 is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) ||
99 (is_isa_arcv2() && ioc_enable);
100
101 if (PageHighMem(page) || !is_non_coh)
102 iounmap((void __force __iomem *)vaddr);
103
104 __free_pages(page, get_order(size));
105}
106
107int arch_dma_mmap(struct device *dev, struct vm_area_struct *vma,
108 void *cpu_addr, dma_addr_t dma_addr, size_t size,
109 unsigned long attrs)
110{
111 unsigned long user_count = vma_pages(vma);
112 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
113 unsigned long pfn = __phys_to_pfn(dma_addr);
114 unsigned long off = vma->vm_pgoff;
115 int ret = -ENXIO;
116
117 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
118
119 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
120 return ret;
121
122 if (off < count && user_count <= (count - off)) {
123 ret = remap_pfn_range(vma, vma->vm_start,
124 pfn + off,
125 user_count << PAGE_SHIFT,
126 vma->vm_page_prot);
127 }
128
129 return ret;
130}
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
151 size_t size, enum dma_data_direction dir)
152{
153 switch (dir) {
154 case DMA_TO_DEVICE:
155 dma_cache_wback(paddr, size);
156 break;
157
158 case DMA_FROM_DEVICE:
159 dma_cache_inv(paddr, size);
160 break;
161
162 case DMA_BIDIRECTIONAL:
163 dma_cache_wback_inv(paddr, size);
164 break;
165
166 default:
167 break;
168 }
169}
170
171void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
172 size_t size, enum dma_data_direction dir)
173{
174 switch (dir) {
175 case DMA_TO_DEVICE:
176 break;
177
178
179 case DMA_FROM_DEVICE:
180 case DMA_BIDIRECTIONAL:
181 dma_cache_inv(paddr, size);
182 break;
183
184 default:
185 break;
186 }
187}
188