1
2#ifndef _LINUX_DMA_MAPPING_H
3#define _LINUX_DMA_MAPPING_H
4
5#include <linux/sizes.h>
6#include <linux/string.h>
7#include <linux/device.h>
8#include <linux/err.h>
9#include <linux/dma-debug.h>
10#include <linux/dma-direction.h>
11#include <linux/scatterlist.h>
12#include <linux/bug.h>
13#include <linux/mem_encrypt.h>
14
15
16
17
18
19
20
21
22#define DMA_ATTR_WRITE_BARRIER (1UL << 0)
23
24
25
26
27#define DMA_ATTR_WEAK_ORDERING (1UL << 1)
28
29
30
31
32#define DMA_ATTR_WRITE_COMBINE (1UL << 2)
33
34
35
36
37#define DMA_ATTR_NON_CONSISTENT (1UL << 3)
38
39
40
41
42#define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
43
44
45
46
47
48#define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
49
50
51
52
53#define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
54
55
56
57
58
59#define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
60
61
62
63
64#define DMA_ATTR_NO_WARN (1UL << 8)
65
66
67
68
69
70
71#define DMA_ATTR_PRIVILEGED (1UL << 9)
72
73
74
75
76
77
78
79struct dma_map_ops {
80 void* (*alloc)(struct device *dev, size_t size,
81 dma_addr_t *dma_handle, gfp_t gfp,
82 unsigned long attrs);
83 void (*free)(struct device *dev, size_t size,
84 void *vaddr, dma_addr_t dma_handle,
85 unsigned long attrs);
86 int (*mmap)(struct device *, struct vm_area_struct *,
87 void *, dma_addr_t, size_t,
88 unsigned long attrs);
89
90 int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
91 dma_addr_t, size_t, unsigned long attrs);
92
93 dma_addr_t (*map_page)(struct device *dev, struct page *page,
94 unsigned long offset, size_t size,
95 enum dma_data_direction dir,
96 unsigned long attrs);
97 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
98 size_t size, enum dma_data_direction dir,
99 unsigned long attrs);
100
101
102
103
104 int (*map_sg)(struct device *dev, struct scatterlist *sg,
105 int nents, enum dma_data_direction dir,
106 unsigned long attrs);
107 void (*unmap_sg)(struct device *dev,
108 struct scatterlist *sg, int nents,
109 enum dma_data_direction dir,
110 unsigned long attrs);
111 dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
112 size_t size, enum dma_data_direction dir,
113 unsigned long attrs);
114 void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
115 size_t size, enum dma_data_direction dir,
116 unsigned long attrs);
117 void (*sync_single_for_cpu)(struct device *dev,
118 dma_addr_t dma_handle, size_t size,
119 enum dma_data_direction dir);
120 void (*sync_single_for_device)(struct device *dev,
121 dma_addr_t dma_handle, size_t size,
122 enum dma_data_direction dir);
123 void (*sync_sg_for_cpu)(struct device *dev,
124 struct scatterlist *sg, int nents,
125 enum dma_data_direction dir);
126 void (*sync_sg_for_device)(struct device *dev,
127 struct scatterlist *sg, int nents,
128 enum dma_data_direction dir);
129 void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
130 enum dma_data_direction direction);
131 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
132 int (*dma_supported)(struct device *dev, u64 mask);
133 u64 (*get_required_mask)(struct device *dev);
134};
135
136extern const struct dma_map_ops dma_direct_ops;
137extern const struct dma_map_ops dma_virt_ops;
138
139#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
140
141#define DMA_MASK_NONE 0x0ULL
142
143static inline int valid_dma_direction(int dma_direction)
144{
145 return ((dma_direction == DMA_BIDIRECTIONAL) ||
146 (dma_direction == DMA_TO_DEVICE) ||
147 (dma_direction == DMA_FROM_DEVICE));
148}
149
150static inline int is_device_dma_capable(struct device *dev)
151{
152 return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
153}
154
155#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
156
157
158
159
160int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
161 dma_addr_t *dma_handle, void **ret);
162int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
163
164int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
165 void *cpu_addr, size_t size, int *ret);
166
167void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle);
168int dma_release_from_global_coherent(int order, void *vaddr);
169int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
170 size_t size, int *ret);
171
172#else
173#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
174#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
175#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
176
177static inline void *dma_alloc_from_global_coherent(ssize_t size,
178 dma_addr_t *dma_handle)
179{
180 return NULL;
181}
182
183static inline int dma_release_from_global_coherent(int order, void *vaddr)
184{
185 return 0;
186}
187
188static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
189 void *cpu_addr, size_t size,
190 int *ret)
191{
192 return 0;
193}
194#endif
195
196#ifdef CONFIG_HAS_DMA
197#include <asm/dma-mapping.h>
198static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
199{
200 if (dev && dev->dma_ops)
201 return dev->dma_ops;
202 return get_arch_dma_ops(dev ? dev->bus : NULL);
203}
204
205static inline void set_dma_ops(struct device *dev,
206 const struct dma_map_ops *dma_ops)
207{
208 dev->dma_ops = dma_ops;
209}
210#else
211
212
213
214
215
216
217static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
218{
219 return NULL;
220}
221#endif
222
223static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
224 size_t size,
225 enum dma_data_direction dir,
226 unsigned long attrs)
227{
228 const struct dma_map_ops *ops = get_dma_ops(dev);
229 dma_addr_t addr;
230
231 BUG_ON(!valid_dma_direction(dir));
232 debug_dma_map_single(dev, ptr, size);
233 addr = ops->map_page(dev, virt_to_page(ptr),
234 offset_in_page(ptr), size,
235 dir, attrs);
236 debug_dma_map_page(dev, virt_to_page(ptr),
237 offset_in_page(ptr), size,
238 dir, addr, true);
239 return addr;
240}
241
242static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
243 size_t size,
244 enum dma_data_direction dir,
245 unsigned long attrs)
246{
247 const struct dma_map_ops *ops = get_dma_ops(dev);
248
249 BUG_ON(!valid_dma_direction(dir));
250 if (ops->unmap_page)
251 ops->unmap_page(dev, addr, size, dir, attrs);
252 debug_dma_unmap_page(dev, addr, size, dir, true);
253}
254
255
256
257
258
259static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
260 int nents, enum dma_data_direction dir,
261 unsigned long attrs)
262{
263 const struct dma_map_ops *ops = get_dma_ops(dev);
264 int ents;
265
266 BUG_ON(!valid_dma_direction(dir));
267 ents = ops->map_sg(dev, sg, nents, dir, attrs);
268 BUG_ON(ents < 0);
269 debug_dma_map_sg(dev, sg, nents, ents, dir);
270
271 return ents;
272}
273
274static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
275 int nents, enum dma_data_direction dir,
276 unsigned long attrs)
277{
278 const struct dma_map_ops *ops = get_dma_ops(dev);
279
280 BUG_ON(!valid_dma_direction(dir));
281 debug_dma_unmap_sg(dev, sg, nents, dir);
282 if (ops->unmap_sg)
283 ops->unmap_sg(dev, sg, nents, dir, attrs);
284}
285
286static inline dma_addr_t dma_map_page_attrs(struct device *dev,
287 struct page *page,
288 size_t offset, size_t size,
289 enum dma_data_direction dir,
290 unsigned long attrs)
291{
292 const struct dma_map_ops *ops = get_dma_ops(dev);
293 dma_addr_t addr;
294
295 BUG_ON(!valid_dma_direction(dir));
296 addr = ops->map_page(dev, page, offset, size, dir, attrs);
297 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
298
299 return addr;
300}
301
302static inline void dma_unmap_page_attrs(struct device *dev,
303 dma_addr_t addr, size_t size,
304 enum dma_data_direction dir,
305 unsigned long attrs)
306{
307 const struct dma_map_ops *ops = get_dma_ops(dev);
308
309 BUG_ON(!valid_dma_direction(dir));
310 if (ops->unmap_page)
311 ops->unmap_page(dev, addr, size, dir, attrs);
312 debug_dma_unmap_page(dev, addr, size, dir, false);
313}
314
315static inline dma_addr_t dma_map_resource(struct device *dev,
316 phys_addr_t phys_addr,
317 size_t size,
318 enum dma_data_direction dir,
319 unsigned long attrs)
320{
321 const struct dma_map_ops *ops = get_dma_ops(dev);
322 dma_addr_t addr;
323
324 BUG_ON(!valid_dma_direction(dir));
325
326
327 BUG_ON(pfn_valid(PHYS_PFN(phys_addr)));
328
329 addr = phys_addr;
330 if (ops->map_resource)
331 addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
332
333 debug_dma_map_resource(dev, phys_addr, size, dir, addr);
334
335 return addr;
336}
337
338static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
339 size_t size, enum dma_data_direction dir,
340 unsigned long attrs)
341{
342 const struct dma_map_ops *ops = get_dma_ops(dev);
343
344 BUG_ON(!valid_dma_direction(dir));
345 if (ops->unmap_resource)
346 ops->unmap_resource(dev, addr, size, dir, attrs);
347 debug_dma_unmap_resource(dev, addr, size, dir);
348}
349
350static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
351 size_t size,
352 enum dma_data_direction dir)
353{
354 const struct dma_map_ops *ops = get_dma_ops(dev);
355
356 BUG_ON(!valid_dma_direction(dir));
357 if (ops->sync_single_for_cpu)
358 ops->sync_single_for_cpu(dev, addr, size, dir);
359 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
360}
361
362static inline void dma_sync_single_for_device(struct device *dev,
363 dma_addr_t addr, size_t size,
364 enum dma_data_direction dir)
365{
366 const struct dma_map_ops *ops = get_dma_ops(dev);
367
368 BUG_ON(!valid_dma_direction(dir));
369 if (ops->sync_single_for_device)
370 ops->sync_single_for_device(dev, addr, size, dir);
371 debug_dma_sync_single_for_device(dev, addr, size, dir);
372}
373
374static inline void dma_sync_single_range_for_cpu(struct device *dev,
375 dma_addr_t addr,
376 unsigned long offset,
377 size_t size,
378 enum dma_data_direction dir)
379{
380 const struct dma_map_ops *ops = get_dma_ops(dev);
381
382 BUG_ON(!valid_dma_direction(dir));
383 if (ops->sync_single_for_cpu)
384 ops->sync_single_for_cpu(dev, addr + offset, size, dir);
385 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
386}
387
388static inline void dma_sync_single_range_for_device(struct device *dev,
389 dma_addr_t addr,
390 unsigned long offset,
391 size_t size,
392 enum dma_data_direction dir)
393{
394 const struct dma_map_ops *ops = get_dma_ops(dev);
395
396 BUG_ON(!valid_dma_direction(dir));
397 if (ops->sync_single_for_device)
398 ops->sync_single_for_device(dev, addr + offset, size, dir);
399 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
400}
401
402static inline void
403dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
404 int nelems, enum dma_data_direction dir)
405{
406 const struct dma_map_ops *ops = get_dma_ops(dev);
407
408 BUG_ON(!valid_dma_direction(dir));
409 if (ops->sync_sg_for_cpu)
410 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
411 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
412}
413
414static inline void
415dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
416 int nelems, enum dma_data_direction dir)
417{
418 const struct dma_map_ops *ops = get_dma_ops(dev);
419
420 BUG_ON(!valid_dma_direction(dir));
421 if (ops->sync_sg_for_device)
422 ops->sync_sg_for_device(dev, sg, nelems, dir);
423 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
424
425}
426
427#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
428#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
429#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
430#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
431#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
432#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
433
434static inline void
435dma_cache_sync(struct device *dev, void *vaddr, size_t size,
436 enum dma_data_direction dir)
437{
438 const struct dma_map_ops *ops = get_dma_ops(dev);
439
440 BUG_ON(!valid_dma_direction(dir));
441 if (ops->cache_sync)
442 ops->cache_sync(dev, vaddr, size, dir);
443}
444
445extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
446 void *cpu_addr, dma_addr_t dma_addr, size_t size,
447 unsigned long attrs);
448
449void *dma_common_contiguous_remap(struct page *page, size_t size,
450 unsigned long vm_flags,
451 pgprot_t prot, const void *caller);
452
453void *dma_common_pages_remap(struct page **pages, size_t size,
454 unsigned long vm_flags, pgprot_t prot,
455 const void *caller);
456void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471static inline int
472dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
473 dma_addr_t dma_addr, size_t size, unsigned long attrs)
474{
475 const struct dma_map_ops *ops = get_dma_ops(dev);
476 BUG_ON(!ops);
477 if (ops->mmap)
478 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
479 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
480}
481
482#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
483
484int
485dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr,
486 dma_addr_t dma_addr, size_t size, unsigned long attrs);
487
488static inline int
489dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
490 dma_addr_t dma_addr, size_t size,
491 unsigned long attrs)
492{
493 const struct dma_map_ops *ops = get_dma_ops(dev);
494 BUG_ON(!ops);
495 if (ops->get_sgtable)
496 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
497 attrs);
498 return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
499 attrs);
500}
501
502#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
503
504#ifndef arch_dma_alloc_attrs
505#define arch_dma_alloc_attrs(dev) (true)
506#endif
507
508static inline void *dma_alloc_attrs(struct device *dev, size_t size,
509 dma_addr_t *dma_handle, gfp_t flag,
510 unsigned long attrs)
511{
512 const struct dma_map_ops *ops = get_dma_ops(dev);
513 void *cpu_addr;
514
515 BUG_ON(!ops);
516 WARN_ON_ONCE(dev && !dev->coherent_dma_mask);
517
518 if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
519 return cpu_addr;
520
521
522 flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
523
524 if (!arch_dma_alloc_attrs(&dev))
525 return NULL;
526 if (!ops->alloc)
527 return NULL;
528
529 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
530 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
531 return cpu_addr;
532}
533
534static inline void dma_free_attrs(struct device *dev, size_t size,
535 void *cpu_addr, dma_addr_t dma_handle,
536 unsigned long attrs)
537{
538 const struct dma_map_ops *ops = get_dma_ops(dev);
539
540 BUG_ON(!ops);
541
542 if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
543 return;
544
545
546
547
548
549
550
551 WARN_ON(irqs_disabled());
552
553 if (!ops->free || !cpu_addr)
554 return;
555
556 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
557 ops->free(dev, size, cpu_addr, dma_handle, attrs);
558}
559
560static inline void *dma_alloc_coherent(struct device *dev, size_t size,
561 dma_addr_t *dma_handle, gfp_t gfp)
562{
563
564 return dma_alloc_attrs(dev, size, dma_handle, gfp,
565 (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
566}
567
568static inline void dma_free_coherent(struct device *dev, size_t size,
569 void *cpu_addr, dma_addr_t dma_handle)
570{
571 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
572}
573
574static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
575{
576 const struct dma_map_ops *ops = get_dma_ops(dev);
577
578 debug_dma_mapping_error(dev, dma_addr);
579 if (ops->mapping_error)
580 return ops->mapping_error(dev, dma_addr);
581 return 0;
582}
583
584static inline void dma_check_mask(struct device *dev, u64 mask)
585{
586 if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1)))
587 dev_warn(dev, "SME is active, device will require DMA bounce buffers\n");
588}
589
590static inline int dma_supported(struct device *dev, u64 mask)
591{
592 const struct dma_map_ops *ops = get_dma_ops(dev);
593
594 if (!ops)
595 return 0;
596 if (!ops->dma_supported)
597 return 1;
598 return ops->dma_supported(dev, mask);
599}
600
601#ifndef HAVE_ARCH_DMA_SET_MASK
602static inline int dma_set_mask(struct device *dev, u64 mask)
603{
604 if (!dev->dma_mask || !dma_supported(dev, mask))
605 return -EIO;
606
607 dma_check_mask(dev, mask);
608
609 *dev->dma_mask = mask;
610 return 0;
611}
612#endif
613
614static inline u64 dma_get_mask(struct device *dev)
615{
616 if (dev && dev->dma_mask && *dev->dma_mask)
617 return *dev->dma_mask;
618 return DMA_BIT_MASK(32);
619}
620
621#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
622int dma_set_coherent_mask(struct device *dev, u64 mask);
623#else
624static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
625{
626 if (!dma_supported(dev, mask))
627 return -EIO;
628
629 dma_check_mask(dev, mask);
630
631 dev->coherent_dma_mask = mask;
632 return 0;
633}
634#endif
635
636
637
638
639
640
641
642static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
643{
644 int rc = dma_set_mask(dev, mask);
645 if (rc == 0)
646 dma_set_coherent_mask(dev, mask);
647 return rc;
648}
649
650
651
652
653
654static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
655{
656 dev->dma_mask = &dev->coherent_dma_mask;
657 return dma_set_mask_and_coherent(dev, mask);
658}
659
660extern u64 dma_get_required_mask(struct device *dev);
661
662#ifndef arch_setup_dma_ops
663static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
664 u64 size, const struct iommu_ops *iommu,
665 bool coherent) { }
666#endif
667
668#ifndef arch_teardown_dma_ops
669static inline void arch_teardown_dma_ops(struct device *dev) { }
670#endif
671
672static inline unsigned int dma_get_max_seg_size(struct device *dev)
673{
674 if (dev->dma_parms && dev->dma_parms->max_segment_size)
675 return dev->dma_parms->max_segment_size;
676 return SZ_64K;
677}
678
679static inline unsigned int dma_set_max_seg_size(struct device *dev,
680 unsigned int size)
681{
682 if (dev->dma_parms) {
683 dev->dma_parms->max_segment_size = size;
684 return 0;
685 }
686 return -EIO;
687}
688
689static inline unsigned long dma_get_seg_boundary(struct device *dev)
690{
691 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
692 return dev->dma_parms->segment_boundary_mask;
693 return DMA_BIT_MASK(32);
694}
695
696static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
697{
698 if (dev->dma_parms) {
699 dev->dma_parms->segment_boundary_mask = mask;
700 return 0;
701 }
702 return -EIO;
703}
704
705#ifndef dma_max_pfn
706static inline unsigned long dma_max_pfn(struct device *dev)
707{
708 return (*dev->dma_mask >> PAGE_SHIFT) + dev->dma_pfn_offset;
709}
710#endif
711
712static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
713 dma_addr_t *dma_handle, gfp_t flag)
714{
715 void *ret = dma_alloc_coherent(dev, size, dma_handle,
716 flag | __GFP_ZERO);
717 return ret;
718}
719
720static inline int dma_get_cache_alignment(void)
721{
722#ifdef ARCH_DMA_MINALIGN
723 return ARCH_DMA_MINALIGN;
724#endif
725 return 1;
726}
727
728
729#define DMA_MEMORY_EXCLUSIVE 0x01
730
731#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
732int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
733 dma_addr_t device_addr, size_t size, int flags);
734void dma_release_declared_memory(struct device *dev);
735void *dma_mark_declared_memory_occupied(struct device *dev,
736 dma_addr_t device_addr, size_t size);
737#else
738static inline int
739dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
740 dma_addr_t device_addr, size_t size, int flags)
741{
742 return -ENOSYS;
743}
744
745static inline void
746dma_release_declared_memory(struct device *dev)
747{
748}
749
750static inline void *
751dma_mark_declared_memory_occupied(struct device *dev,
752 dma_addr_t device_addr, size_t size)
753{
754 return ERR_PTR(-EBUSY);
755}
756#endif
757
758
759
760
761#ifdef CONFIG_HAS_DMA
762extern void *dmam_alloc_coherent(struct device *dev, size_t size,
763 dma_addr_t *dma_handle, gfp_t gfp);
764extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
765 dma_addr_t dma_handle);
766#else
767static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
768 dma_addr_t *dma_handle, gfp_t gfp)
769{ return NULL; }
770static inline void dmam_free_coherent(struct device *dev, size_t size,
771 void *vaddr, dma_addr_t dma_handle) { }
772#endif
773
774extern void *dmam_alloc_attrs(struct device *dev, size_t size,
775 dma_addr_t *dma_handle, gfp_t gfp,
776 unsigned long attrs);
777#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
778extern int dmam_declare_coherent_memory(struct device *dev,
779 phys_addr_t phys_addr,
780 dma_addr_t device_addr, size_t size,
781 int flags);
782extern void dmam_release_declared_memory(struct device *dev);
783#else
784static inline int dmam_declare_coherent_memory(struct device *dev,
785 phys_addr_t phys_addr, dma_addr_t device_addr,
786 size_t size, gfp_t gfp)
787{
788 return 0;
789}
790
791static inline void dmam_release_declared_memory(struct device *dev)
792{
793}
794#endif
795
796static inline void *dma_alloc_wc(struct device *dev, size_t size,
797 dma_addr_t *dma_addr, gfp_t gfp)
798{
799 unsigned long attrs = DMA_ATTR_WRITE_COMBINE;
800
801 if (gfp & __GFP_NOWARN)
802 attrs |= DMA_ATTR_NO_WARN;
803
804 return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
805}
806#ifndef dma_alloc_writecombine
807#define dma_alloc_writecombine dma_alloc_wc
808#endif
809
810static inline void dma_free_wc(struct device *dev, size_t size,
811 void *cpu_addr, dma_addr_t dma_addr)
812{
813 return dma_free_attrs(dev, size, cpu_addr, dma_addr,
814 DMA_ATTR_WRITE_COMBINE);
815}
816#ifndef dma_free_writecombine
817#define dma_free_writecombine dma_free_wc
818#endif
819
820static inline int dma_mmap_wc(struct device *dev,
821 struct vm_area_struct *vma,
822 void *cpu_addr, dma_addr_t dma_addr,
823 size_t size)
824{
825 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
826 DMA_ATTR_WRITE_COMBINE);
827}
828#ifndef dma_mmap_writecombine
829#define dma_mmap_writecombine dma_mmap_wc
830#endif
831
832#ifdef CONFIG_NEED_DMA_MAP_STATE
833#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
834#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
835#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
836#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
837#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
838#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
839#else
840#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
841#define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
842#define dma_unmap_addr(PTR, ADDR_NAME) (0)
843#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
844#define dma_unmap_len(PTR, LEN_NAME) (0)
845#define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
846#endif
847
848#endif
849