1#ifndef ASMARM_DMA_MAPPING_H
2#define ASMARM_DMA_MAPPING_H
3
4#ifdef __KERNEL__
5
6#include <linux/mm_types.h>
7#include <linux/scatterlist.h>
8#include <linux/dma-debug.h>
9
10#include <asm-generic/dma-coherent.h>
11#include <asm/memory.h>
12
13#ifdef __arch_page_to_dma
14#error Please update to __arch_pfn_to_dma
15#endif
16
17
18
19
20
21
22#ifndef __arch_pfn_to_dma
23static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
24{
25 return (dma_addr_t)__pfn_to_bus(pfn);
26}
27
28static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
29{
30 return __bus_to_pfn(addr);
31}
32
33static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
34{
35 return (void *)__bus_to_virt((unsigned long)addr);
36}
37
38static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
39{
40 return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
41}
42#else
43static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
44{
45 return __arch_pfn_to_dma(dev, pfn);
46}
47
48static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
49{
50 return __arch_dma_to_pfn(dev, addr);
51}
52
53static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
54{
55 return __arch_dma_to_virt(dev, addr);
56}
57
58static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
59{
60 return __arch_virt_to_dma(dev, addr);
61}
62#endif
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
79 enum dma_data_direction dir)
80{
81 extern void ___dma_single_cpu_to_dev(const void *, size_t,
82 enum dma_data_direction);
83
84 if (!arch_is_coherent())
85 ___dma_single_cpu_to_dev(kaddr, size, dir);
86}
87
88static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
89 enum dma_data_direction dir)
90{
91 extern void ___dma_single_dev_to_cpu(const void *, size_t,
92 enum dma_data_direction);
93
94 if (!arch_is_coherent())
95 ___dma_single_dev_to_cpu(kaddr, size, dir);
96}
97
98static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
99 size_t size, enum dma_data_direction dir)
100{
101 extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
102 size_t, enum dma_data_direction);
103
104 if (!arch_is_coherent())
105 ___dma_page_cpu_to_dev(page, off, size, dir);
106}
107
108static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
109 size_t size, enum dma_data_direction dir)
110{
111 extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
112 size_t, enum dma_data_direction);
113
114 if (!arch_is_coherent())
115 ___dma_page_dev_to_cpu(page, off, size, dir);
116}
117
118extern int dma_supported(struct device *, u64);
119extern int dma_set_mask(struct device *, u64);
120
121
122
123
124static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
125{
126 return dma_addr == ~0;
127}
128
129
130
131
132
133static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
134 dma_addr_t *handle, gfp_t gfp)
135{
136 return NULL;
137}
138
139static inline void dma_free_noncoherent(struct device *dev, size_t size,
140 void *cpu_addr, dma_addr_t handle)
141{
142}
143
144
145
146
147
148
149
150
151
152
153
154
155extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
171
172
173
174
175
176
177
178
179
180
181
182
183
184int dma_mmap_coherent(struct device *, struct vm_area_struct *,
185 void *, dma_addr_t, size_t);
186
187
188
189
190
191
192
193
194
195
196
197
198
199extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
200 gfp_t);
201
202#define dma_free_writecombine(dev,size,cpu_addr,handle) \
203 dma_free_coherent(dev,size,cpu_addr,handle)
204
205int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
206 void *, dma_addr_t, size_t);
207
208
209
210
211
212
213extern void __init init_consistent_dma_size(unsigned long size);
214
215
216#ifdef CONFIG_DMABOUNCE
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241extern int dmabounce_register_dev(struct device *, unsigned long,
242 unsigned long, int (*)(struct device *, dma_addr_t, size_t));
243
244
245
246
247
248
249
250
251
252
253
254extern void dmabounce_unregister_dev(struct device *);
255
256
257
258
259extern dma_addr_t __dma_map_page(struct device *, struct page *,
260 unsigned long, size_t, enum dma_data_direction);
261extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
262 enum dma_data_direction);
263
264
265
266
267int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
268 size_t, enum dma_data_direction);
269int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
270 size_t, enum dma_data_direction);
271#else
272static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
273 unsigned long offset, size_t size, enum dma_data_direction dir)
274{
275 return 1;
276}
277
278static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
279 unsigned long offset, size_t size, enum dma_data_direction dir)
280{
281 return 1;
282}
283
284
285static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
286 unsigned long offset, size_t size, enum dma_data_direction dir)
287{
288 __dma_page_cpu_to_dev(page, offset, size, dir);
289 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
290}
291
292static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
293 size_t size, enum dma_data_direction dir)
294{
295 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
296 handle & ~PAGE_MASK, size, dir);
297}
298#endif
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
315 size_t size, enum dma_data_direction dir)
316{
317 unsigned long offset;
318 struct page *page;
319 dma_addr_t addr;
320
321 BUG_ON(!virt_addr_valid(cpu_addr));
322 BUG_ON(!virt_addr_valid(cpu_addr + size - 1));
323 BUG_ON(!valid_dma_direction(dir));
324
325 page = virt_to_page(cpu_addr);
326 offset = (unsigned long)cpu_addr & ~PAGE_MASK;
327 addr = __dma_map_page(dev, page, offset, size, dir);
328 debug_dma_map_page(dev, page, offset, size, dir, addr, true);
329
330 return addr;
331}
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
348 unsigned long offset, size_t size, enum dma_data_direction dir)
349{
350 dma_addr_t addr;
351
352 BUG_ON(!valid_dma_direction(dir));
353
354 addr = __dma_map_page(dev, page, offset, size, dir);
355 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
356
357 return addr;
358}
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
375 size_t size, enum dma_data_direction dir)
376{
377 debug_dma_unmap_page(dev, handle, size, dir, true);
378 __dma_unmap_page(dev, handle, size, dir);
379}
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
396 size_t size, enum dma_data_direction dir)
397{
398 debug_dma_unmap_page(dev, handle, size, dir, false);
399 __dma_unmap_page(dev, handle, size, dir);
400}
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420static inline void dma_sync_single_range_for_cpu(struct device *dev,
421 dma_addr_t handle, unsigned long offset, size_t size,
422 enum dma_data_direction dir)
423{
424 BUG_ON(!valid_dma_direction(dir));
425
426 debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir);
427
428 if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
429 return;
430
431 __dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
432}
433
434static inline void dma_sync_single_range_for_device(struct device *dev,
435 dma_addr_t handle, unsigned long offset, size_t size,
436 enum dma_data_direction dir)
437{
438 BUG_ON(!valid_dma_direction(dir));
439
440 debug_dma_sync_single_for_device(dev, handle + offset, size, dir);
441
442 if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
443 return;
444
445 __dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
446}
447
448static inline void dma_sync_single_for_cpu(struct device *dev,
449 dma_addr_t handle, size_t size, enum dma_data_direction dir)
450{
451 dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
452}
453
454static inline void dma_sync_single_for_device(struct device *dev,
455 dma_addr_t handle, size_t size, enum dma_data_direction dir)
456{
457 dma_sync_single_range_for_device(dev, handle, 0, size, dir);
458}
459
460
461
462
463extern int dma_map_sg(struct device *, struct scatterlist *, int,
464 enum dma_data_direction);
465extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
466 enum dma_data_direction);
467extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
468 enum dma_data_direction);
469extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
470 enum dma_data_direction);
471
472
473#endif
474#endif
475