1
2
3
4
5
6
7
8#include <linux/memblock.h>
9#include <linux/acpi.h>
10#include <linux/dma-direct.h>
11#include <linux/dma-noncoherent.h>
12#include <linux/export.h>
13#include <linux/gfp.h>
14#include <linux/of_device.h>
15#include <linux/slab.h>
16#include <linux/vmalloc.h>
17
18
19
20
21struct dma_devres {
22 size_t size;
23 void *vaddr;
24 dma_addr_t dma_handle;
25 unsigned long attrs;
26};
27
28static void dmam_release(struct device *dev, void *res)
29{
30 struct dma_devres *this = res;
31
32 dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle,
33 this->attrs);
34}
35
36static int dmam_match(struct device *dev, void *res, void *match_data)
37{
38 struct dma_devres *this = res, *match = match_data;
39
40 if (this->vaddr == match->vaddr) {
41 WARN_ON(this->size != match->size ||
42 this->dma_handle != match->dma_handle);
43 return 1;
44 }
45 return 0;
46}
47
48
49
50
51
52
53
54
55
56
57void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
58 dma_addr_t dma_handle)
59{
60 struct dma_devres match_data = { size, vaddr, dma_handle };
61
62 dma_free_coherent(dev, size, vaddr, dma_handle);
63 WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data));
64}
65EXPORT_SYMBOL(dmam_free_coherent);
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
82 gfp_t gfp, unsigned long attrs)
83{
84 struct dma_devres *dr;
85 void *vaddr;
86
87 dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
88 if (!dr)
89 return NULL;
90
91 vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs);
92 if (!vaddr) {
93 devres_free(dr);
94 return NULL;
95 }
96
97 dr->vaddr = vaddr;
98 dr->dma_handle = *dma_handle;
99 dr->size = size;
100 dr->attrs = attrs;
101
102 devres_add(dev, dr);
103
104 return vaddr;
105}
106EXPORT_SYMBOL(dmam_alloc_attrs);
107
108
109
110
111int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
112 void *cpu_addr, dma_addr_t dma_addr, size_t size,
113 unsigned long attrs)
114{
115 struct page *page = virt_to_page(cpu_addr);
116 int ret;
117
118 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
119 if (!ret)
120 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
121 return ret;
122}
123
124
125
126
127
128
129
130
131
132
133
134
135int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
136 void *cpu_addr, dma_addr_t dma_addr, size_t size,
137 unsigned long attrs)
138{
139 const struct dma_map_ops *ops = get_dma_ops(dev);
140
141 if (dma_is_direct(ops))
142 return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr,
143 size, attrs);
144 if (!ops->get_sgtable)
145 return -ENXIO;
146 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs);
147}
148EXPORT_SYMBOL(dma_get_sgtable_attrs);
149
150#ifdef CONFIG_MMU
151
152
153
154
155pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
156{
157 if (force_dma_unencrypted(dev))
158 prot = pgprot_decrypted(prot);
159 if (dev_is_dma_coherent(dev) ||
160 (IS_ENABLED(CONFIG_DMA_NONCOHERENT_CACHE_SYNC) &&
161 (attrs & DMA_ATTR_NON_CONSISTENT)))
162 return prot;
163#ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE
164 if (attrs & DMA_ATTR_WRITE_COMBINE)
165 return pgprot_writecombine(prot);
166#endif
167 return pgprot_dmacoherent(prot);
168}
169#endif
170
171
172
173
174int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
175 void *cpu_addr, dma_addr_t dma_addr, size_t size,
176 unsigned long attrs)
177{
178#ifdef CONFIG_MMU
179 unsigned long user_count = vma_pages(vma);
180 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
181 unsigned long off = vma->vm_pgoff;
182 int ret = -ENXIO;
183
184 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
185
186 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
187 return ret;
188
189 if (off >= count || user_count > count - off)
190 return -ENXIO;
191
192 return remap_pfn_range(vma, vma->vm_start,
193 page_to_pfn(virt_to_page(cpu_addr)) + vma->vm_pgoff,
194 user_count << PAGE_SHIFT, vma->vm_page_prot);
195#else
196 return -ENXIO;
197#endif
198}
199
200
201
202
203
204
205
206
207bool dma_can_mmap(struct device *dev)
208{
209 const struct dma_map_ops *ops = get_dma_ops(dev);
210
211 if (dma_is_direct(ops))
212 return dma_direct_can_mmap(dev);
213 return ops->mmap != NULL;
214}
215EXPORT_SYMBOL_GPL(dma_can_mmap);
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
231 void *cpu_addr, dma_addr_t dma_addr, size_t size,
232 unsigned long attrs)
233{
234 const struct dma_map_ops *ops = get_dma_ops(dev);
235
236 if (dma_is_direct(ops))
237 return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size,
238 attrs);
239 if (!ops->mmap)
240 return -ENXIO;
241 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
242}
243EXPORT_SYMBOL(dma_mmap_attrs);
244
245u64 dma_get_required_mask(struct device *dev)
246{
247 const struct dma_map_ops *ops = get_dma_ops(dev);
248
249 if (dma_is_direct(ops))
250 return dma_direct_get_required_mask(dev);
251 if (ops->get_required_mask)
252 return ops->get_required_mask(dev);
253
254
255
256
257
258
259
260
261
262 return DMA_BIT_MASK(32);
263}
264EXPORT_SYMBOL_GPL(dma_get_required_mask);
265
266void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
267 gfp_t flag, unsigned long attrs)
268{
269 const struct dma_map_ops *ops = get_dma_ops(dev);
270 void *cpu_addr;
271
272 WARN_ON_ONCE(!dev->coherent_dma_mask);
273
274 if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
275 return cpu_addr;
276
277
278 flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
279
280 if (dma_is_direct(ops))
281 cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs);
282 else if (ops->alloc)
283 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
284 else
285 return NULL;
286
287 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
288 return cpu_addr;
289}
290EXPORT_SYMBOL(dma_alloc_attrs);
291
292void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
293 dma_addr_t dma_handle, unsigned long attrs)
294{
295 const struct dma_map_ops *ops = get_dma_ops(dev);
296
297 if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
298 return;
299
300
301
302
303
304
305
306 WARN_ON(irqs_disabled());
307
308 if (!cpu_addr)
309 return;
310
311 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
312 if (dma_is_direct(ops))
313 dma_direct_free(dev, size, cpu_addr, dma_handle, attrs);
314 else if (ops->free)
315 ops->free(dev, size, cpu_addr, dma_handle, attrs);
316}
317EXPORT_SYMBOL(dma_free_attrs);
318
319int dma_supported(struct device *dev, u64 mask)
320{
321 const struct dma_map_ops *ops = get_dma_ops(dev);
322
323 if (dma_is_direct(ops))
324 return dma_direct_supported(dev, mask);
325 if (!ops->dma_supported)
326 return 1;
327 return ops->dma_supported(dev, mask);
328}
329EXPORT_SYMBOL(dma_supported);
330
331#ifdef CONFIG_ARCH_HAS_DMA_SET_MASK
332void arch_dma_set_mask(struct device *dev, u64 mask);
333#else
334#define arch_dma_set_mask(dev, mask) do { } while (0)
335#endif
336
337int dma_set_mask(struct device *dev, u64 mask)
338{
339
340
341
342
343 mask = (dma_addr_t)mask;
344
345 if (!dev->dma_mask || !dma_supported(dev, mask))
346 return -EIO;
347
348 arch_dma_set_mask(dev, mask);
349 *dev->dma_mask = mask;
350 return 0;
351}
352EXPORT_SYMBOL(dma_set_mask);
353
354#ifndef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
355int dma_set_coherent_mask(struct device *dev, u64 mask)
356{
357
358
359
360
361 mask = (dma_addr_t)mask;
362
363 if (!dma_supported(dev, mask))
364 return -EIO;
365
366 dev->coherent_dma_mask = mask;
367 return 0;
368}
369EXPORT_SYMBOL(dma_set_coherent_mask);
370#endif
371
372void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
373 enum dma_data_direction dir)
374{
375 const struct dma_map_ops *ops = get_dma_ops(dev);
376
377 BUG_ON(!valid_dma_direction(dir));
378
379 if (dma_is_direct(ops))
380 arch_dma_cache_sync(dev, vaddr, size, dir);
381 else if (ops->cache_sync)
382 ops->cache_sync(dev, vaddr, size, dir);
383}
384EXPORT_SYMBOL(dma_cache_sync);
385
386size_t dma_max_mapping_size(struct device *dev)
387{
388 const struct dma_map_ops *ops = get_dma_ops(dev);
389 size_t size = SIZE_MAX;
390
391 if (dma_is_direct(ops))
392 size = dma_direct_max_mapping_size(dev);
393 else if (ops && ops->max_mapping_size)
394 size = ops->max_mapping_size(dev);
395
396 return size;
397}
398EXPORT_SYMBOL_GPL(dma_max_mapping_size);
399
400unsigned long dma_get_merge_boundary(struct device *dev)
401{
402 const struct dma_map_ops *ops = get_dma_ops(dev);
403
404 if (!ops || !ops->get_merge_boundary)
405 return 0;
406
407 return ops->get_merge_boundary(dev);
408}
409EXPORT_SYMBOL_GPL(dma_get_merge_boundary);
410