1#ifndef ASMARM_DMA_MAPPING_H
2#define ASMARM_DMA_MAPPING_H
3
4#ifdef __KERNEL__
5
6#include <linux/mm_types.h>
7#include <linux/scatterlist.h>
8#include <linux/dma-attrs.h>
9#include <linux/dma-debug.h>
10
11#include <asm/memory.h>
12
13#include <xen/xen.h>
14#include <asm/xen/hypervisor.h>
15
16#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
17extern struct dma_map_ops arm_dma_ops;
18extern struct dma_map_ops arm_coherent_dma_ops;
19
20static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
21{
22 if (dev && dev->archdata.dma_ops)
23 return dev->archdata.dma_ops;
24 return &arm_dma_ops;
25}
26
27static inline struct dma_map_ops *get_dma_ops(struct device *dev)
28{
29 if (xen_initial_domain())
30 return xen_dma_ops;
31 else
32 return __generic_dma_ops(dev);
33}
34
35static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
36{
37 BUG_ON(!dev);
38 dev->archdata.dma_ops = ops;
39}
40
41#define HAVE_ARCH_DMA_SUPPORTED 1
42extern int dma_supported(struct device *dev, u64 mask);
43
44#ifdef __arch_page_to_dma
45#error Please update to __arch_pfn_to_dma
46#endif
47
48
49
50
51
52
53#ifndef __arch_pfn_to_dma
54static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
55{
56 if (dev)
57 pfn -= dev->dma_pfn_offset;
58 return (dma_addr_t)__pfn_to_bus(pfn);
59}
60
61static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
62{
63 unsigned long pfn = __bus_to_pfn(addr);
64
65 if (dev)
66 pfn += dev->dma_pfn_offset;
67
68 return pfn;
69}
70
71static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
72{
73 if (dev) {
74 unsigned long pfn = dma_to_pfn(dev, addr);
75
76 return phys_to_virt(__pfn_to_phys(pfn));
77 }
78
79 return (void *)__bus_to_virt((unsigned long)addr);
80}
81
82static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
83{
84 if (dev)
85 return pfn_to_dma(dev, virt_to_pfn(addr));
86
87 return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
88}
89
90#else
91static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
92{
93 return __arch_pfn_to_dma(dev, pfn);
94}
95
96static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
97{
98 return __arch_dma_to_pfn(dev, addr);
99}
100
101static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
102{
103 return __arch_dma_to_virt(dev, addr);
104}
105
106static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
107{
108 return __arch_virt_to_dma(dev, addr);
109}
110#endif
111
112
113static inline unsigned long dma_max_pfn(struct device *dev)
114{
115 return PHYS_PFN_OFFSET + dma_to_pfn(dev, *dev->dma_mask);
116}
117#define dma_max_pfn(dev) dma_max_pfn(dev)
118
119#define arch_setup_dma_ops arch_setup_dma_ops
120extern void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
121 struct iommu_ops *iommu, bool coherent);
122
123#define arch_teardown_dma_ops arch_teardown_dma_ops
124extern void arch_teardown_dma_ops(struct device *dev);
125
126
127static inline bool is_device_dma_coherent(struct device *dev)
128{
129 return dev->archdata.dma_coherent;
130}
131
132static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
133{
134 unsigned int offset = paddr & ~PAGE_MASK;
135 return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset;
136}
137
138static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
139{
140 unsigned int offset = dev_addr & ~PAGE_MASK;
141 return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset;
142}
143
144static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
145{
146 u64 limit, mask;
147
148 if (!dev->dma_mask)
149 return 0;
150
151 mask = *dev->dma_mask;
152
153 limit = (mask + 1) & ~mask;
154 if (limit && size > limit)
155 return 0;
156
157 if ((addr | (addr + size - 1)) & ~mask)
158 return 0;
159
160 return 1;
161}
162
163static inline void dma_mark_clean(void *addr, size_t size) { }
164
165extern int arm_dma_set_mask(struct device *dev, u64 dma_mask);
166
167
168
169
170
171
172
173
174
175
176
177
178extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
179 gfp_t gfp, struct dma_attrs *attrs);
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
196 dma_addr_t handle, struct dma_attrs *attrs);
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
212 void *cpu_addr, dma_addr_t dma_addr, size_t size,
213 struct dma_attrs *attrs);
214
215
216
217
218
219
220extern void __init init_dma_coherent_pool_size(unsigned long size);
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246extern int dmabounce_register_dev(struct device *, unsigned long,
247 unsigned long, int (*)(struct device *, dma_addr_t, size_t));
248
249
250
251
252
253
254
255
256
257
258
259extern void dmabounce_unregister_dev(struct device *);
260
261
262
263
264
265
266extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
267 enum dma_data_direction, struct dma_attrs *attrs);
268extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
269 enum dma_data_direction, struct dma_attrs *attrs);
270extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
271 enum dma_data_direction);
272extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
273 enum dma_data_direction);
274extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
275 void *cpu_addr, dma_addr_t dma_addr, size_t size,
276 struct dma_attrs *attrs);
277
278#endif
279#endif
280