1#ifndef ASMARM_DMA_MAPPING_H
2#define ASMARM_DMA_MAPPING_H
3
4#ifdef __KERNEL__
5
6#include <linux/mm_types.h>
7#include <linux/scatterlist.h>
8#include <linux/dma-debug.h>
9
10#include <asm-generic/dma-coherent.h>
11#include <asm/memory.h>
12
13#ifdef __arch_page_to_dma
14#error Please update to __arch_pfn_to_dma
15#endif
16
17
18
19
20
21
22#ifndef __arch_pfn_to_dma
23static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
24{
25 return (dma_addr_t)__pfn_to_bus(pfn);
26}
27
28static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
29{
30 return __bus_to_pfn(addr);
31}
32
33static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
34{
35 return (void *)__bus_to_virt(addr);
36}
37
38static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
39{
40 return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
41}
42#else
43static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
44{
45 return __arch_pfn_to_dma(dev, pfn);
46}
47
48static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
49{
50 return __arch_dma_to_pfn(dev, addr);
51}
52
53static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
54{
55 return __arch_dma_to_virt(dev, addr);
56}
57
58static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
59{
60 return __arch_virt_to_dma(dev, addr);
61}
62#endif
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
79 enum dma_data_direction dir)
80{
81 extern void ___dma_single_cpu_to_dev(const void *, size_t,
82 enum dma_data_direction);
83
84 if (!arch_is_coherent())
85 ___dma_single_cpu_to_dev(kaddr, size, dir);
86}
87
88static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
89 enum dma_data_direction dir)
90{
91 extern void ___dma_single_dev_to_cpu(const void *, size_t,
92 enum dma_data_direction);
93
94 if (!arch_is_coherent())
95 ___dma_single_dev_to_cpu(kaddr, size, dir);
96}
97
98static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
99 size_t size, enum dma_data_direction dir)
100{
101 extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
102 size_t, enum dma_data_direction);
103
104 if (!arch_is_coherent())
105 ___dma_page_cpu_to_dev(page, off, size, dir);
106}
107
108static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
109 size_t size, enum dma_data_direction dir)
110{
111 extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
112 size_t, enum dma_data_direction);
113
114 if (!arch_is_coherent())
115 ___dma_page_dev_to_cpu(page, off, size, dir);
116}
117
118extern int dma_supported(struct device *, u64);
119extern int dma_set_mask(struct device *, u64);
120
121
122
123
124static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
125{
126 return dma_addr == ~0;
127}
128
129
130
131
132
133static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
134 dma_addr_t *handle, gfp_t gfp)
135{
136 return NULL;
137}
138
139static inline void dma_free_noncoherent(struct device *dev, size_t size,
140 void *cpu_addr, dma_addr_t handle)
141{
142}
143
144
145
146
147
148
149
150
151
152
153
154
155extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
171
172
173
174
175
176
177
178
179
180
181
182
183
184int dma_mmap_coherent(struct device *, struct vm_area_struct *,
185 void *, dma_addr_t, size_t);
186
187
188
189
190
191
192
193
194
195
196
197
198
199extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
200 gfp_t);
201
202#define dma_free_writecombine(dev,size,cpu_addr,handle) \
203 dma_free_coherent(dev,size,cpu_addr,handle)
204
205int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
206 void *, dma_addr_t, size_t);
207
208
209#ifdef CONFIG_DMABOUNCE
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234extern int dmabounce_register_dev(struct device *, unsigned long,
235 unsigned long, int (*)(struct device *, dma_addr_t, size_t));
236
237
238
239
240
241
242
243
244
245
246
247extern void dmabounce_unregister_dev(struct device *);
248
249
250
251
252extern dma_addr_t __dma_map_page(struct device *, struct page *,
253 unsigned long, size_t, enum dma_data_direction);
254extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
255 enum dma_data_direction);
256
257
258
259
260int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
261 size_t, enum dma_data_direction);
262int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
263 size_t, enum dma_data_direction);
264#else
265static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
266 unsigned long offset, size_t size, enum dma_data_direction dir)
267{
268 return 1;
269}
270
271static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
272 unsigned long offset, size_t size, enum dma_data_direction dir)
273{
274 return 1;
275}
276
277
278static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
279 unsigned long offset, size_t size, enum dma_data_direction dir)
280{
281 __dma_page_cpu_to_dev(page, offset, size, dir);
282 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
283}
284
285static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
286 size_t size, enum dma_data_direction dir)
287{
288 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
289 handle & ~PAGE_MASK, size, dir);
290}
291#endif
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
308 size_t size, enum dma_data_direction dir)
309{
310 unsigned long offset;
311 struct page *page;
312 dma_addr_t addr;
313
314 BUG_ON(!virt_addr_valid(cpu_addr));
315 BUG_ON(!virt_addr_valid(cpu_addr + size - 1));
316 BUG_ON(!valid_dma_direction(dir));
317
318 page = virt_to_page(cpu_addr);
319 offset = (unsigned long)cpu_addr & ~PAGE_MASK;
320 addr = __dma_map_page(dev, page, offset, size, dir);
321 debug_dma_map_page(dev, page, offset, size, dir, addr, true);
322
323 return addr;
324}
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
341 unsigned long offset, size_t size, enum dma_data_direction dir)
342{
343 dma_addr_t addr;
344
345 BUG_ON(!valid_dma_direction(dir));
346
347 addr = __dma_map_page(dev, page, offset, size, dir);
348 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
349
350 return addr;
351}
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
368 size_t size, enum dma_data_direction dir)
369{
370 debug_dma_unmap_page(dev, handle, size, dir, true);
371 __dma_unmap_page(dev, handle, size, dir);
372}
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
389 size_t size, enum dma_data_direction dir)
390{
391 debug_dma_unmap_page(dev, handle, size, dir, false);
392 __dma_unmap_page(dev, handle, size, dir);
393}
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413static inline void dma_sync_single_range_for_cpu(struct device *dev,
414 dma_addr_t handle, unsigned long offset, size_t size,
415 enum dma_data_direction dir)
416{
417 BUG_ON(!valid_dma_direction(dir));
418
419 debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir);
420
421 if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
422 return;
423
424 __dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
425}
426
427static inline void dma_sync_single_range_for_device(struct device *dev,
428 dma_addr_t handle, unsigned long offset, size_t size,
429 enum dma_data_direction dir)
430{
431 BUG_ON(!valid_dma_direction(dir));
432
433 debug_dma_sync_single_for_device(dev, handle + offset, size, dir);
434
435 if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
436 return;
437
438 __dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
439}
440
441static inline void dma_sync_single_for_cpu(struct device *dev,
442 dma_addr_t handle, size_t size, enum dma_data_direction dir)
443{
444 dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
445}
446
447static inline void dma_sync_single_for_device(struct device *dev,
448 dma_addr_t handle, size_t size, enum dma_data_direction dir)
449{
450 dma_sync_single_range_for_device(dev, handle, 0, size, dir);
451}
452
453
454
455
456extern int dma_map_sg(struct device *, struct scatterlist *, int,
457 enum dma_data_direction);
458extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
459 enum dma_data_direction);
460extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
461 enum dma_data_direction);
462extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
463 enum dma_data_direction);
464
465
466#endif
467#endif
468