1
2
3
4
5
6
7
8
9
10
11#include <linux/types.h>
12#include <linux/dma-mapping.h>
13#include <linux/mm.h>
14#include <linux/module.h>
15#include <linux/scatterlist.h>
16#include <linux/string.h>
17
18#include <asm/cache.h>
19#include <asm/io.h>
20
21#include <dma-coherence.h>
22
23static inline unsigned long dma_addr_to_virt(struct device *dev,
24 dma_addr_t dma_addr)
25{
26 unsigned long addr = plat_dma_addr_to_phys(dev, dma_addr);
27
28 return (unsigned long)phys_to_virt(addr);
29}
30
31
32
33
34
35
36
37static inline int cpu_is_noncoherent_r10000(struct device *dev)
38{
39 return !plat_device_is_coherent(dev) &&
40 (current_cpu_type() == CPU_R10000 ||
41 current_cpu_type() == CPU_R12000);
42}
43
44static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
45{
46
47 gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
48
49#ifdef CONFIG_ZONE_DMA
50 if (dev == NULL)
51 gfp |= __GFP_DMA;
52 else if (dev->coherent_dma_mask < DMA_BIT_MASK(24))
53 gfp |= __GFP_DMA;
54 else
55#endif
56#ifdef CONFIG_ZONE_DMA32
57 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
58 gfp |= __GFP_DMA32;
59 else
60#endif
61 ;
62
63
64 gfp |= __GFP_NORETRY;
65
66 return gfp;
67}
68
69void *dma_alloc_noncoherent(struct device *dev, size_t size,
70 dma_addr_t * dma_handle, gfp_t gfp)
71{
72 void *ret;
73
74 gfp = massage_gfp_flags(dev, gfp);
75
76 ret = (void *) __get_free_pages(gfp, get_order(size));
77
78 if (ret != NULL) {
79 memset(ret, 0, size);
80 *dma_handle = plat_map_dma_mem(dev, ret, size);
81 }
82
83 return ret;
84}
85
86EXPORT_SYMBOL(dma_alloc_noncoherent);
87
88void *dma_alloc_coherent(struct device *dev, size_t size,
89 dma_addr_t * dma_handle, gfp_t gfp)
90{
91 void *ret;
92
93 if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
94 return ret;
95
96 gfp = massage_gfp_flags(dev, gfp);
97
98 ret = (void *) __get_free_pages(gfp, get_order(size));
99
100 if (ret) {
101 memset(ret, 0, size);
102 *dma_handle = plat_map_dma_mem(dev, ret, size);
103
104 if (!plat_device_is_coherent(dev)) {
105 dma_cache_wback_inv((unsigned long) ret, size);
106 ret = UNCAC_ADDR(ret);
107 }
108 }
109
110 return ret;
111}
112
113EXPORT_SYMBOL(dma_alloc_coherent);
114
115void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
116 dma_addr_t dma_handle)
117{
118 plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
119 free_pages((unsigned long) vaddr, get_order(size));
120}
121
122EXPORT_SYMBOL(dma_free_noncoherent);
123
124void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
125 dma_addr_t dma_handle)
126{
127 unsigned long addr = (unsigned long) vaddr;
128 int order = get_order(size);
129
130 if (dma_release_from_coherent(dev, order, vaddr))
131 return;
132
133 plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
134
135 if (!plat_device_is_coherent(dev))
136 addr = CAC_ADDR(addr);
137
138 free_pages(addr, get_order(size));
139}
140
141EXPORT_SYMBOL(dma_free_coherent);
142
143static inline void __dma_sync(unsigned long addr, size_t size,
144 enum dma_data_direction direction)
145{
146 switch (direction) {
147 case DMA_TO_DEVICE:
148 dma_cache_wback(addr, size);
149 break;
150
151 case DMA_FROM_DEVICE:
152 dma_cache_inv(addr, size);
153 break;
154
155 case DMA_BIDIRECTIONAL:
156 dma_cache_wback_inv(addr, size);
157 break;
158
159 default:
160 BUG();
161 }
162}
163
164dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
165 enum dma_data_direction direction)
166{
167 unsigned long addr = (unsigned long) ptr;
168
169 if (!plat_device_is_coherent(dev))
170 __dma_sync(addr, size, direction);
171
172 return plat_map_dma_mem(dev, ptr, size);
173}
174
175EXPORT_SYMBOL(dma_map_single);
176
177void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
178 enum dma_data_direction direction)
179{
180 if (cpu_is_noncoherent_r10000(dev))
181 __dma_sync(dma_addr_to_virt(dev, dma_addr), size,
182 direction);
183
184 plat_unmap_dma_mem(dev, dma_addr, size, direction);
185}
186
187EXPORT_SYMBOL(dma_unmap_single);
188
189int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
190 enum dma_data_direction direction)
191{
192 int i;
193
194 BUG_ON(direction == DMA_NONE);
195
196 for (i = 0; i < nents; i++, sg++) {
197 unsigned long addr;
198
199 addr = (unsigned long) sg_virt(sg);
200 if (!plat_device_is_coherent(dev) && addr)
201 __dma_sync(addr, sg->length, direction);
202 sg->dma_address = plat_map_dma_mem(dev,
203 (void *)addr, sg->length);
204 }
205
206 return nents;
207}
208
209EXPORT_SYMBOL(dma_map_sg);
210
211dma_addr_t dma_map_page(struct device *dev, struct page *page,
212 unsigned long offset, size_t size, enum dma_data_direction direction)
213{
214 BUG_ON(direction == DMA_NONE);
215
216 if (!plat_device_is_coherent(dev)) {
217 unsigned long addr;
218
219 addr = (unsigned long) page_address(page) + offset;
220 __dma_sync(addr, size, direction);
221 }
222
223 return plat_map_dma_mem_page(dev, page) + offset;
224}
225
226EXPORT_SYMBOL(dma_map_page);
227
228void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
229 enum dma_data_direction direction)
230{
231 unsigned long addr;
232 int i;
233
234 BUG_ON(direction == DMA_NONE);
235
236 for (i = 0; i < nhwentries; i++, sg++) {
237 if (!plat_device_is_coherent(dev) &&
238 direction != DMA_TO_DEVICE) {
239 addr = (unsigned long) sg_virt(sg);
240 if (addr)
241 __dma_sync(addr, sg->length, direction);
242 }
243 plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
244 }
245}
246
247EXPORT_SYMBOL(dma_unmap_sg);
248
249void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
250 size_t size, enum dma_data_direction direction)
251{
252 BUG_ON(direction == DMA_NONE);
253
254 if (cpu_is_noncoherent_r10000(dev)) {
255 unsigned long addr;
256
257 addr = dma_addr_to_virt(dev, dma_handle);
258 __dma_sync(addr, size, direction);
259 }
260}
261
262EXPORT_SYMBOL(dma_sync_single_for_cpu);
263
264void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
265 size_t size, enum dma_data_direction direction)
266{
267 BUG_ON(direction == DMA_NONE);
268
269 plat_extra_sync_for_device(dev);
270 if (!plat_device_is_coherent(dev)) {
271 unsigned long addr;
272
273 addr = dma_addr_to_virt(dev, dma_handle);
274 __dma_sync(addr, size, direction);
275 }
276}
277
278EXPORT_SYMBOL(dma_sync_single_for_device);
279
280void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
281 unsigned long offset, size_t size, enum dma_data_direction direction)
282{
283 BUG_ON(direction == DMA_NONE);
284
285 if (cpu_is_noncoherent_r10000(dev)) {
286 unsigned long addr;
287
288 addr = dma_addr_to_virt(dev, dma_handle);
289 __dma_sync(addr + offset, size, direction);
290 }
291}
292
293EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
294
295void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
296 unsigned long offset, size_t size, enum dma_data_direction direction)
297{
298 BUG_ON(direction == DMA_NONE);
299
300 plat_extra_sync_for_device(dev);
301 if (!plat_device_is_coherent(dev)) {
302 unsigned long addr;
303
304 addr = dma_addr_to_virt(dev, dma_handle);
305 __dma_sync(addr + offset, size, direction);
306 }
307}
308
309EXPORT_SYMBOL(dma_sync_single_range_for_device);
310
311void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
312 enum dma_data_direction direction)
313{
314 int i;
315
316 BUG_ON(direction == DMA_NONE);
317
318
319 for (i = 0; i < nelems; i++, sg++) {
320 if (cpu_is_noncoherent_r10000(dev))
321 __dma_sync((unsigned long)page_address(sg_page(sg)),
322 sg->length, direction);
323 }
324}
325
326EXPORT_SYMBOL(dma_sync_sg_for_cpu);
327
328void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
329 enum dma_data_direction direction)
330{
331 int i;
332
333 BUG_ON(direction == DMA_NONE);
334
335
336 for (i = 0; i < nelems; i++, sg++) {
337 if (!plat_device_is_coherent(dev))
338 __dma_sync((unsigned long)page_address(sg_page(sg)),
339 sg->length, direction);
340 }
341}
342
343EXPORT_SYMBOL(dma_sync_sg_for_device);
344
345int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
346{
347 return plat_dma_mapping_error(dev, dma_addr);
348}
349
350EXPORT_SYMBOL(dma_mapping_error);
351
352int dma_supported(struct device *dev, u64 mask)
353{
354 return plat_dma_supported(dev, mask);
355}
356
357EXPORT_SYMBOL(dma_supported);
358
359int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
360{
361 return plat_device_is_coherent(dev);
362}
363
364EXPORT_SYMBOL(dma_is_consistent);
365
366void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
367 enum dma_data_direction direction)
368{
369 BUG_ON(direction == DMA_NONE);
370
371 plat_extra_sync_for_device(dev);
372 if (!plat_device_is_coherent(dev))
373 __dma_sync((unsigned long)vaddr, size, direction);
374}
375
376EXPORT_SYMBOL(dma_cache_sync);
377