1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#ifndef __ASM_DMA_MAPPING_H
17#define __ASM_DMA_MAPPING_H
18
19#ifdef __KERNEL__
20
21#include <linux/types.h>
22#include <linux/vmalloc.h>
23
24#include <asm-generic/dma-coherent.h>
25
26#include <xen/xen.h>
27#include <asm/xen/hypervisor.h>
28
29#define DMA_ERROR_CODE (~(dma_addr_t)0)
30extern struct dma_map_ops *dma_ops;
31extern struct dma_map_ops coherent_swiotlb_dma_ops;
32extern struct dma_map_ops noncoherent_swiotlb_dma_ops;
33
34static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
35{
36 if (unlikely(!dev) || !dev->archdata.dma_ops)
37 return dma_ops;
38 else
39 return dev->archdata.dma_ops;
40}
41
42static inline struct dma_map_ops *get_dma_ops(struct device *dev)
43{
44 if (xen_initial_domain())
45 return xen_dma_ops;
46 else
47 return __generic_dma_ops(dev);
48}
49
50static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
51{
52 dev->archdata.dma_ops = ops;
53}
54
55#include <asm-generic/dma-mapping-common.h>
56
57static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
58{
59 return (dma_addr_t)paddr;
60}
61
62static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
63{
64 return (phys_addr_t)dev_addr;
65}
66
67static inline int dma_mapping_error(struct device *dev, dma_addr_t dev_addr)
68{
69 struct dma_map_ops *ops = get_dma_ops(dev);
70 debug_dma_mapping_error(dev, dev_addr);
71 return ops->mapping_error(dev, dev_addr);
72}
73
74static inline int dma_supported(struct device *dev, u64 mask)
75{
76 struct dma_map_ops *ops = get_dma_ops(dev);
77 return ops->dma_supported(dev, mask);
78}
79
80static inline int dma_set_mask(struct device *dev, u64 mask)
81{
82 if (!dev->dma_mask || !dma_supported(dev, mask))
83 return -EIO;
84 *dev->dma_mask = mask;
85
86 return 0;
87}
88
89static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
90{
91 if (!dev->dma_mask)
92 return 0;
93
94 return addr + size - 1 <= *dev->dma_mask;
95}
96
97static inline void dma_mark_clean(void *addr, size_t size)
98{
99}
100
101#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
102#define dma_free_coherent(d, s, h, f) dma_free_attrs(d, s, h, f, NULL)
103
104static inline void *dma_alloc_attrs(struct device *dev, size_t size,
105 dma_addr_t *dma_handle, gfp_t flags,
106 struct dma_attrs *attrs)
107{
108 struct dma_map_ops *ops = get_dma_ops(dev);
109 void *vaddr;
110
111 if (dma_alloc_from_coherent(dev, size, dma_handle, &vaddr))
112 return vaddr;
113
114 vaddr = ops->alloc(dev, size, dma_handle, flags, attrs);
115 debug_dma_alloc_coherent(dev, size, *dma_handle, vaddr);
116 return vaddr;
117}
118
119static inline void dma_free_attrs(struct device *dev, size_t size,
120 void *vaddr, dma_addr_t dev_addr,
121 struct dma_attrs *attrs)
122{
123 struct dma_map_ops *ops = get_dma_ops(dev);
124
125 if (dma_release_from_coherent(dev, get_order(size), vaddr))
126 return;
127
128 debug_dma_free_coherent(dev, size, vaddr, dev_addr);
129 ops->free(dev, size, vaddr, dev_addr, attrs);
130}
131
132
133
134
135static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
136 dma_addr_t *handle, gfp_t flags)
137{
138 return NULL;
139}
140
141static inline void dma_free_noncoherent(struct device *dev, size_t size,
142 void *cpu_addr, dma_addr_t handle)
143{
144}
145
146#endif
147#endif
148