1
2
3
4
5
6
7
8#include <linux/memblock.h>
9#include <linux/acpi.h>
10#include <linux/dma-map-ops.h>
11#include <linux/export.h>
12#include <linux/gfp.h>
13#include <linux/of_device.h>
14#include <linux/slab.h>
15#include <linux/vmalloc.h>
16#include "debug.h"
17#include "direct.h"
18
19
20
21
22struct dma_devres {
23 size_t size;
24 void *vaddr;
25 dma_addr_t dma_handle;
26 unsigned long attrs;
27};
28
29static void dmam_release(struct device *dev, void *res)
30{
31 struct dma_devres *this = res;
32
33 dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle,
34 this->attrs);
35}
36
37static int dmam_match(struct device *dev, void *res, void *match_data)
38{
39 struct dma_devres *this = res, *match = match_data;
40
41 if (this->vaddr == match->vaddr) {
42 WARN_ON(this->size != match->size ||
43 this->dma_handle != match->dma_handle);
44 return 1;
45 }
46 return 0;
47}
48
49
50
51
52
53
54
55
56
57
58void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
59 dma_addr_t dma_handle)
60{
61 struct dma_devres match_data = { size, vaddr, dma_handle };
62
63 dma_free_coherent(dev, size, vaddr, dma_handle);
64 WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data));
65}
66EXPORT_SYMBOL(dmam_free_coherent);
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
83 gfp_t gfp, unsigned long attrs)
84{
85 struct dma_devres *dr;
86 void *vaddr;
87
88 dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
89 if (!dr)
90 return NULL;
91
92 vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs);
93 if (!vaddr) {
94 devres_free(dr);
95 return NULL;
96 }
97
98 dr->vaddr = vaddr;
99 dr->dma_handle = *dma_handle;
100 dr->size = size;
101 dr->attrs = attrs;
102
103 devres_add(dev, dr);
104
105 return vaddr;
106}
107EXPORT_SYMBOL(dmam_alloc_attrs);
108
109static bool dma_go_direct(struct device *dev, dma_addr_t mask,
110 const struct dma_map_ops *ops)
111{
112 if (likely(!ops))
113 return true;
114#ifdef CONFIG_DMA_OPS_BYPASS
115 if (dev->dma_ops_bypass)
116 return min_not_zero(mask, dev->bus_dma_limit) >=
117 dma_direct_get_required_mask(dev);
118#endif
119 return false;
120}
121
122
123
124
125
126
127
128static inline bool dma_alloc_direct(struct device *dev,
129 const struct dma_map_ops *ops)
130{
131 return dma_go_direct(dev, dev->coherent_dma_mask, ops);
132}
133
134static inline bool dma_map_direct(struct device *dev,
135 const struct dma_map_ops *ops)
136{
137 return dma_go_direct(dev, *dev->dma_mask, ops);
138}
139
140dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
141 size_t offset, size_t size, enum dma_data_direction dir,
142 unsigned long attrs)
143{
144 const struct dma_map_ops *ops = get_dma_ops(dev);
145 dma_addr_t addr;
146
147 BUG_ON(!valid_dma_direction(dir));
148
149 if (WARN_ON_ONCE(!dev->dma_mask))
150 return DMA_MAPPING_ERROR;
151
152 if (dma_map_direct(dev, ops))
153 addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
154 else
155 addr = ops->map_page(dev, page, offset, size, dir, attrs);
156 debug_dma_map_page(dev, page, offset, size, dir, addr);
157
158 return addr;
159}
160EXPORT_SYMBOL(dma_map_page_attrs);
161
162void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
163 enum dma_data_direction dir, unsigned long attrs)
164{
165 const struct dma_map_ops *ops = get_dma_ops(dev);
166
167 BUG_ON(!valid_dma_direction(dir));
168 if (dma_map_direct(dev, ops))
169 dma_direct_unmap_page(dev, addr, size, dir, attrs);
170 else if (ops->unmap_page)
171 ops->unmap_page(dev, addr, size, dir, attrs);
172 debug_dma_unmap_page(dev, addr, size, dir);
173}
174EXPORT_SYMBOL(dma_unmap_page_attrs);
175
176
177
178
179
180int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
181 enum dma_data_direction dir, unsigned long attrs)
182{
183 const struct dma_map_ops *ops = get_dma_ops(dev);
184 int ents;
185
186 BUG_ON(!valid_dma_direction(dir));
187
188 if (WARN_ON_ONCE(!dev->dma_mask))
189 return 0;
190
191 if (dma_map_direct(dev, ops))
192 ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
193 else
194 ents = ops->map_sg(dev, sg, nents, dir, attrs);
195 BUG_ON(ents < 0);
196 debug_dma_map_sg(dev, sg, nents, ents, dir);
197
198 return ents;
199}
200EXPORT_SYMBOL(dma_map_sg_attrs);
201
202void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
203 int nents, enum dma_data_direction dir,
204 unsigned long attrs)
205{
206 const struct dma_map_ops *ops = get_dma_ops(dev);
207
208 BUG_ON(!valid_dma_direction(dir));
209 debug_dma_unmap_sg(dev, sg, nents, dir);
210 if (dma_map_direct(dev, ops))
211 dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
212 else if (ops->unmap_sg)
213 ops->unmap_sg(dev, sg, nents, dir, attrs);
214}
215EXPORT_SYMBOL(dma_unmap_sg_attrs);
216
217dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
218 size_t size, enum dma_data_direction dir, unsigned long attrs)
219{
220 const struct dma_map_ops *ops = get_dma_ops(dev);
221 dma_addr_t addr = DMA_MAPPING_ERROR;
222
223 BUG_ON(!valid_dma_direction(dir));
224
225 if (WARN_ON_ONCE(!dev->dma_mask))
226 return DMA_MAPPING_ERROR;
227
228
229 if (WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr))))
230 return DMA_MAPPING_ERROR;
231
232 if (dma_map_direct(dev, ops))
233 addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
234 else if (ops->map_resource)
235 addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
236
237 debug_dma_map_resource(dev, phys_addr, size, dir, addr);
238 return addr;
239}
240EXPORT_SYMBOL(dma_map_resource);
241
242void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
243 enum dma_data_direction dir, unsigned long attrs)
244{
245 const struct dma_map_ops *ops = get_dma_ops(dev);
246
247 BUG_ON(!valid_dma_direction(dir));
248 if (!dma_map_direct(dev, ops) && ops->unmap_resource)
249 ops->unmap_resource(dev, addr, size, dir, attrs);
250 debug_dma_unmap_resource(dev, addr, size, dir);
251}
252EXPORT_SYMBOL(dma_unmap_resource);
253
254void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
255 enum dma_data_direction dir)
256{
257 const struct dma_map_ops *ops = get_dma_ops(dev);
258
259 BUG_ON(!valid_dma_direction(dir));
260 if (dma_map_direct(dev, ops))
261 dma_direct_sync_single_for_cpu(dev, addr, size, dir);
262 else if (ops->sync_single_for_cpu)
263 ops->sync_single_for_cpu(dev, addr, size, dir);
264 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
265}
266EXPORT_SYMBOL(dma_sync_single_for_cpu);
267
268void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
269 size_t size, enum dma_data_direction dir)
270{
271 const struct dma_map_ops *ops = get_dma_ops(dev);
272
273 BUG_ON(!valid_dma_direction(dir));
274 if (dma_map_direct(dev, ops))
275 dma_direct_sync_single_for_device(dev, addr, size, dir);
276 else if (ops->sync_single_for_device)
277 ops->sync_single_for_device(dev, addr, size, dir);
278 debug_dma_sync_single_for_device(dev, addr, size, dir);
279}
280EXPORT_SYMBOL(dma_sync_single_for_device);
281
282void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
283 int nelems, enum dma_data_direction dir)
284{
285 const struct dma_map_ops *ops = get_dma_ops(dev);
286
287 BUG_ON(!valid_dma_direction(dir));
288 if (dma_map_direct(dev, ops))
289 dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
290 else if (ops->sync_sg_for_cpu)
291 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
292 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
293}
294EXPORT_SYMBOL(dma_sync_sg_for_cpu);
295
296void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
297 int nelems, enum dma_data_direction dir)
298{
299 const struct dma_map_ops *ops = get_dma_ops(dev);
300
301 BUG_ON(!valid_dma_direction(dir));
302 if (dma_map_direct(dev, ops))
303 dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
304 else if (ops->sync_sg_for_device)
305 ops->sync_sg_for_device(dev, sg, nelems, dir);
306 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
307}
308EXPORT_SYMBOL(dma_sync_sg_for_device);
309
310
311
312
313
314
315
316
317
318
319
320
321int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
322 void *cpu_addr, dma_addr_t dma_addr, size_t size,
323 unsigned long attrs)
324{
325 const struct dma_map_ops *ops = get_dma_ops(dev);
326
327 if (dma_alloc_direct(dev, ops))
328 return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr,
329 size, attrs);
330 if (!ops->get_sgtable)
331 return -ENXIO;
332 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs);
333}
334EXPORT_SYMBOL(dma_get_sgtable_attrs);
335
336#ifdef CONFIG_MMU
337
338
339
340
341pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
342{
343 if (force_dma_unencrypted(dev))
344 prot = pgprot_decrypted(prot);
345 if (dev_is_dma_coherent(dev))
346 return prot;
347#ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE
348 if (attrs & DMA_ATTR_WRITE_COMBINE)
349 return pgprot_writecombine(prot);
350#endif
351 return pgprot_dmacoherent(prot);
352}
353#endif
354
355
356
357
358
359
360
361
362bool dma_can_mmap(struct device *dev)
363{
364 const struct dma_map_ops *ops = get_dma_ops(dev);
365
366 if (dma_alloc_direct(dev, ops))
367 return dma_direct_can_mmap(dev);
368 return ops->mmap != NULL;
369}
370EXPORT_SYMBOL_GPL(dma_can_mmap);
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
386 void *cpu_addr, dma_addr_t dma_addr, size_t size,
387 unsigned long attrs)
388{
389 const struct dma_map_ops *ops = get_dma_ops(dev);
390
391 if (dma_alloc_direct(dev, ops))
392 return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size,
393 attrs);
394 if (!ops->mmap)
395 return -ENXIO;
396 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
397}
398EXPORT_SYMBOL(dma_mmap_attrs);
399
400u64 dma_get_required_mask(struct device *dev)
401{
402 const struct dma_map_ops *ops = get_dma_ops(dev);
403
404 if (dma_alloc_direct(dev, ops))
405 return dma_direct_get_required_mask(dev);
406 if (ops->get_required_mask)
407 return ops->get_required_mask(dev);
408
409
410
411
412
413
414
415
416
417 return DMA_BIT_MASK(32);
418}
419EXPORT_SYMBOL_GPL(dma_get_required_mask);
420
421void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
422 gfp_t flag, unsigned long attrs)
423{
424 const struct dma_map_ops *ops = get_dma_ops(dev);
425 void *cpu_addr;
426
427 WARN_ON_ONCE(!dev->coherent_dma_mask);
428
429 if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
430 return cpu_addr;
431
432
433 flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
434
435 if (dma_alloc_direct(dev, ops))
436 cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs);
437 else if (ops->alloc)
438 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
439 else
440 return NULL;
441
442 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
443 return cpu_addr;
444}
445EXPORT_SYMBOL(dma_alloc_attrs);
446
447void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
448 dma_addr_t dma_handle, unsigned long attrs)
449{
450 const struct dma_map_ops *ops = get_dma_ops(dev);
451
452 if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
453 return;
454
455
456
457
458
459
460
461 WARN_ON(irqs_disabled());
462
463 if (!cpu_addr)
464 return;
465
466 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
467 if (dma_alloc_direct(dev, ops))
468 dma_direct_free(dev, size, cpu_addr, dma_handle, attrs);
469 else if (ops->free)
470 ops->free(dev, size, cpu_addr, dma_handle, attrs);
471}
472EXPORT_SYMBOL(dma_free_attrs);
473
474struct page *dma_alloc_pages(struct device *dev, size_t size,
475 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
476{
477 const struct dma_map_ops *ops = get_dma_ops(dev);
478 struct page *page;
479
480 if (WARN_ON_ONCE(!dev->coherent_dma_mask))
481 return NULL;
482 if (WARN_ON_ONCE(gfp & (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM)))
483 return NULL;
484
485 size = PAGE_ALIGN(size);
486 if (dma_alloc_direct(dev, ops))
487 page = dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp);
488 else if (ops->alloc_pages)
489 page = ops->alloc_pages(dev, size, dma_handle, dir, gfp);
490 else
491 return NULL;
492
493 debug_dma_map_page(dev, page, 0, size, dir, *dma_handle);
494
495 return page;
496}
497EXPORT_SYMBOL_GPL(dma_alloc_pages);
498
499void dma_free_pages(struct device *dev, size_t size, struct page *page,
500 dma_addr_t dma_handle, enum dma_data_direction dir)
501{
502 const struct dma_map_ops *ops = get_dma_ops(dev);
503
504 size = PAGE_ALIGN(size);
505 debug_dma_unmap_page(dev, dma_handle, size, dir);
506
507 if (dma_alloc_direct(dev, ops))
508 dma_direct_free_pages(dev, size, page, dma_handle, dir);
509 else if (ops->free_pages)
510 ops->free_pages(dev, size, page, dma_handle, dir);
511}
512EXPORT_SYMBOL_GPL(dma_free_pages);
513
514void *dma_alloc_noncoherent(struct device *dev, size_t size,
515 dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
516{
517 const struct dma_map_ops *ops = get_dma_ops(dev);
518 void *vaddr;
519
520 if (!ops || !ops->alloc_noncoherent) {
521 struct page *page;
522
523 page = dma_alloc_pages(dev, size, dma_handle, dir, gfp);
524 if (!page)
525 return NULL;
526 return page_address(page);
527 }
528
529 size = PAGE_ALIGN(size);
530 vaddr = ops->alloc_noncoherent(dev, size, dma_handle, dir, gfp);
531 if (vaddr)
532 debug_dma_map_page(dev, virt_to_page(vaddr), 0, size, dir,
533 *dma_handle);
534 return vaddr;
535}
536EXPORT_SYMBOL_GPL(dma_alloc_noncoherent);
537
538void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
539 dma_addr_t dma_handle, enum dma_data_direction dir)
540{
541 const struct dma_map_ops *ops = get_dma_ops(dev);
542
543 if (!ops || !ops->free_noncoherent) {
544 dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir);
545 return;
546 }
547
548 size = PAGE_ALIGN(size);
549 debug_dma_unmap_page(dev, dma_handle, size, dir);
550 ops->free_noncoherent(dev, size, vaddr, dma_handle, dir);
551}
552EXPORT_SYMBOL_GPL(dma_free_noncoherent);
553
554int dma_supported(struct device *dev, u64 mask)
555{
556 const struct dma_map_ops *ops = get_dma_ops(dev);
557
558
559
560
561
562 if (!ops)
563 return dma_direct_supported(dev, mask);
564 if (!ops->dma_supported)
565 return 1;
566 return ops->dma_supported(dev, mask);
567}
568EXPORT_SYMBOL(dma_supported);
569
570#ifdef CONFIG_ARCH_HAS_DMA_SET_MASK
571void arch_dma_set_mask(struct device *dev, u64 mask);
572#else
573#define arch_dma_set_mask(dev, mask) do { } while (0)
574#endif
575
576int dma_set_mask(struct device *dev, u64 mask)
577{
578
579
580
581
582 mask = (dma_addr_t)mask;
583
584 if (!dev->dma_mask || !dma_supported(dev, mask))
585 return -EIO;
586
587 arch_dma_set_mask(dev, mask);
588 *dev->dma_mask = mask;
589 return 0;
590}
591EXPORT_SYMBOL(dma_set_mask);
592
593#ifndef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
594int dma_set_coherent_mask(struct device *dev, u64 mask)
595{
596
597
598
599
600 mask = (dma_addr_t)mask;
601
602 if (!dma_supported(dev, mask))
603 return -EIO;
604
605 dev->coherent_dma_mask = mask;
606 return 0;
607}
608EXPORT_SYMBOL(dma_set_coherent_mask);
609#endif
610
611size_t dma_max_mapping_size(struct device *dev)
612{
613 const struct dma_map_ops *ops = get_dma_ops(dev);
614 size_t size = SIZE_MAX;
615
616 if (dma_map_direct(dev, ops))
617 size = dma_direct_max_mapping_size(dev);
618 else if (ops && ops->max_mapping_size)
619 size = ops->max_mapping_size(dev);
620
621 return size;
622}
623EXPORT_SYMBOL_GPL(dma_max_mapping_size);
624
625bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
626{
627 const struct dma_map_ops *ops = get_dma_ops(dev);
628
629 if (dma_map_direct(dev, ops))
630 return dma_direct_need_sync(dev, dma_addr);
631 return ops->sync_single_for_cpu || ops->sync_single_for_device;
632}
633EXPORT_SYMBOL_GPL(dma_need_sync);
634
635unsigned long dma_get_merge_boundary(struct device *dev)
636{
637 const struct dma_map_ops *ops = get_dma_ops(dev);
638
639 if (!ops || !ops->get_merge_boundary)
640 return 0;
641
642 return ops->get_merge_boundary(dev);
643}
644EXPORT_SYMBOL_GPL(dma_get_merge_boundary);
645