1
2
3
4
5
6
7
8
9
10
11
12#include <linux/module.h>
13#include <linux/mm.h>
14#include <linux/gfp.h>
15#include <linux/errno.h>
16#include <linux/list.h>
17#include <linux/init.h>
18#include <linux/device.h>
19#include <linux/dma-mapping.h>
20#include <linux/dma-contiguous.h>
21#include <linux/highmem.h>
22#include <linux/memblock.h>
23#include <linux/slab.h>
24#include <linux/iommu.h>
25#include <linux/io.h>
26#include <linux/vmalloc.h>
27#include <linux/sizes.h>
28
29#include <asm/memory.h>
30#include <asm/highmem.h>
31#include <asm/cacheflush.h>
32#include <asm/tlbflush.h>
33#include <asm/mach/arch.h>
34#include <asm/dma-iommu.h>
35#include <asm/mach/map.h>
36#include <asm/system_info.h>
37#include <asm/dma-contiguous.h>
38
39#include "mm.h"
40
41
42
43
44
45
46
47
48
49
50
51
52
53static void __dma_page_cpu_to_dev(struct page *, unsigned long,
54 size_t, enum dma_data_direction);
55static void __dma_page_dev_to_cpu(struct page *, unsigned long,
56 size_t, enum dma_data_direction);
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
73 unsigned long offset, size_t size, enum dma_data_direction dir,
74 struct dma_attrs *attrs)
75{
76 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
77 __dma_page_cpu_to_dev(page, offset, size, dir);
78 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
79}
80
81static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page,
82 unsigned long offset, size_t size, enum dma_data_direction dir,
83 struct dma_attrs *attrs)
84{
85 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
86}
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
103 size_t size, enum dma_data_direction dir,
104 struct dma_attrs *attrs)
105{
106 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
107 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
108 handle & ~PAGE_MASK, size, dir);
109}
110
111static void arm_dma_sync_single_for_cpu(struct device *dev,
112 dma_addr_t handle, size_t size, enum dma_data_direction dir)
113{
114 unsigned int offset = handle & (PAGE_SIZE - 1);
115 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
116 __dma_page_dev_to_cpu(page, offset, size, dir);
117}
118
119static void arm_dma_sync_single_for_device(struct device *dev,
120 dma_addr_t handle, size_t size, enum dma_data_direction dir)
121{
122 unsigned int offset = handle & (PAGE_SIZE - 1);
123 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
124 __dma_page_cpu_to_dev(page, offset, size, dir);
125}
126
127struct dma_map_ops arm_dma_ops = {
128 .alloc = arm_dma_alloc,
129 .free = arm_dma_free,
130 .mmap = arm_dma_mmap,
131 .get_sgtable = arm_dma_get_sgtable,
132 .map_page = arm_dma_map_page,
133 .unmap_page = arm_dma_unmap_page,
134 .map_sg = arm_dma_map_sg,
135 .unmap_sg = arm_dma_unmap_sg,
136 .sync_single_for_cpu = arm_dma_sync_single_for_cpu,
137 .sync_single_for_device = arm_dma_sync_single_for_device,
138 .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
139 .sync_sg_for_device = arm_dma_sync_sg_for_device,
140 .set_dma_mask = arm_dma_set_mask,
141};
142EXPORT_SYMBOL(arm_dma_ops);
143
144static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
145 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs);
146static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
147 dma_addr_t handle, struct dma_attrs *attrs);
148
149struct dma_map_ops arm_coherent_dma_ops = {
150 .alloc = arm_coherent_dma_alloc,
151 .free = arm_coherent_dma_free,
152 .mmap = arm_dma_mmap,
153 .get_sgtable = arm_dma_get_sgtable,
154 .map_page = arm_coherent_dma_map_page,
155 .map_sg = arm_dma_map_sg,
156 .set_dma_mask = arm_dma_set_mask,
157};
158EXPORT_SYMBOL(arm_coherent_dma_ops);
159
160static u64 get_coherent_dma_mask(struct device *dev)
161{
162 u64 mask = (u64)arm_dma_limit;
163
164 if (dev) {
165 mask = dev->coherent_dma_mask;
166
167
168
169
170
171 if (mask == 0) {
172 dev_warn(dev, "coherent DMA mask is unset\n");
173 return 0;
174 }
175
176 if ((~mask) & (u64)arm_dma_limit) {
177 dev_warn(dev, "coherent DMA mask %#llx is smaller "
178 "than system GFP_DMA mask %#llx\n",
179 mask, (u64)arm_dma_limit);
180 return 0;
181 }
182 }
183
184 return mask;
185}
186
187static void __dma_clear_buffer(struct page *page, size_t size)
188{
189
190
191
192
193 if (PageHighMem(page)) {
194 phys_addr_t base = __pfn_to_phys(page_to_pfn(page));
195 phys_addr_t end = base + size;
196 while (size > 0) {
197 void *ptr = kmap_atomic(page);
198 memset(ptr, 0, PAGE_SIZE);
199 dmac_flush_range(ptr, ptr + PAGE_SIZE);
200 kunmap_atomic(ptr);
201 page++;
202 size -= PAGE_SIZE;
203 }
204 outer_flush_range(base, end);
205 } else {
206 void *ptr = page_address(page);
207 memset(ptr, 0, size);
208 dmac_flush_range(ptr, ptr + size);
209 outer_flush_range(__pa(ptr), __pa(ptr) + size);
210 }
211}
212
213
214
215
216
217static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
218{
219 unsigned long order = get_order(size);
220 struct page *page, *p, *e;
221
222 page = alloc_pages(gfp, order);
223 if (!page)
224 return NULL;
225
226
227
228
229 split_page(page, order);
230 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
231 __free_page(p);
232
233 __dma_clear_buffer(page, size);
234
235 return page;
236}
237
238
239
240
241static void __dma_free_buffer(struct page *page, size_t size)
242{
243 struct page *e = page + (size >> PAGE_SHIFT);
244
245 while (page < e) {
246 __free_page(page);
247 page++;
248 }
249}
250
251#ifdef CONFIG_MMU
252#ifdef CONFIG_HUGETLB_PAGE
253#error ARM Coherent DMA allocator does not (yet) support huge TLB
254#endif
255
256static void *__alloc_from_contiguous(struct device *dev, size_t size,
257 pgprot_t prot, struct page **ret_page,
258 const void *caller);
259
260static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
261 pgprot_t prot, struct page **ret_page,
262 const void *caller);
263
264static void *
265__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
266 const void *caller)
267{
268 struct vm_struct *area;
269 unsigned long addr;
270
271
272
273
274
275 area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
276 caller);
277 if (!area)
278 return NULL;
279 addr = (unsigned long)area->addr;
280 area->phys_addr = __pfn_to_phys(page_to_pfn(page));
281
282 if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) {
283 vunmap((void *)addr);
284 return NULL;
285 }
286 return (void *)addr;
287}
288
289static void __dma_free_remap(void *cpu_addr, size_t size)
290{
291 unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP;
292 struct vm_struct *area = find_vm_area(cpu_addr);
293 if (!area || (area->flags & flags) != flags) {
294 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
295 return;
296 }
297 unmap_kernel_range((unsigned long)cpu_addr, size);
298 vunmap(cpu_addr);
299}
300
301#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
302
303struct dma_pool {
304 size_t size;
305 spinlock_t lock;
306 unsigned long *bitmap;
307 unsigned long nr_pages;
308 void *vaddr;
309 struct page **pages;
310};
311
312static struct dma_pool atomic_pool = {
313 .size = DEFAULT_DMA_COHERENT_POOL_SIZE,
314};
315
316static int __init early_coherent_pool(char *p)
317{
318 atomic_pool.size = memparse(p, &p);
319 return 0;
320}
321early_param("coherent_pool", early_coherent_pool);
322
323void __init init_dma_coherent_pool_size(unsigned long size)
324{
325
326
327
328 BUG_ON(atomic_pool.vaddr);
329
330
331
332
333
334 if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE)
335 atomic_pool.size = size;
336}
337
338
339
340
341static int __init atomic_pool_init(void)
342{
343 struct dma_pool *pool = &atomic_pool;
344 pgprot_t prot = pgprot_dmacoherent(pgprot_kernel);
345 gfp_t gfp = GFP_KERNEL | GFP_DMA;
346 unsigned long nr_pages = pool->size >> PAGE_SHIFT;
347 unsigned long *bitmap;
348 struct page *page;
349 struct page **pages;
350 void *ptr;
351 int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
352
353 bitmap = kzalloc(bitmap_size, GFP_KERNEL);
354 if (!bitmap)
355 goto no_bitmap;
356
357 pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
358 if (!pages)
359 goto no_pages;
360
361 if (IS_ENABLED(CONFIG_DMA_CMA))
362 ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page,
363 atomic_pool_init);
364 else
365 ptr = __alloc_remap_buffer(NULL, pool->size, gfp, prot, &page,
366 atomic_pool_init);
367 if (ptr) {
368 int i;
369
370 for (i = 0; i < nr_pages; i++)
371 pages[i] = page + i;
372
373 spin_lock_init(&pool->lock);
374 pool->vaddr = ptr;
375 pool->pages = pages;
376 pool->bitmap = bitmap;
377 pool->nr_pages = nr_pages;
378 pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
379 (unsigned)pool->size / 1024);
380 return 0;
381 }
382
383 kfree(pages);
384no_pages:
385 kfree(bitmap);
386no_bitmap:
387 pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
388 (unsigned)pool->size / 1024);
389 return -ENOMEM;
390}
391
392
393
394postcore_initcall(atomic_pool_init);
395
396struct dma_contig_early_reserve {
397 phys_addr_t base;
398 unsigned long size;
399};
400
401static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata;
402
403static int dma_mmu_remap_num __initdata;
404
405void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
406{
407 dma_mmu_remap[dma_mmu_remap_num].base = base;
408 dma_mmu_remap[dma_mmu_remap_num].size = size;
409 dma_mmu_remap_num++;
410}
411
412void __init dma_contiguous_remap(void)
413{
414 int i;
415 for (i = 0; i < dma_mmu_remap_num; i++) {
416 phys_addr_t start = dma_mmu_remap[i].base;
417 phys_addr_t end = start + dma_mmu_remap[i].size;
418 struct map_desc map;
419 unsigned long addr;
420
421 if (end > arm_lowmem_limit)
422 end = arm_lowmem_limit;
423 if (start >= end)
424 continue;
425
426 map.pfn = __phys_to_pfn(start);
427 map.virtual = __phys_to_virt(start);
428 map.length = end - start;
429 map.type = MT_MEMORY_DMA_READY;
430
431
432
433
434 for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
435 addr += PMD_SIZE)
436 pmd_clear(pmd_off_k(addr));
437
438 iotable_init(&map, 1);
439 }
440}
441
442static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr,
443 void *data)
444{
445 struct page *page = virt_to_page(addr);
446 pgprot_t prot = *(pgprot_t *)data;
447
448 set_pte_ext(pte, mk_pte(page, prot), 0);
449 return 0;
450}
451
452static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
453{
454 unsigned long start = (unsigned long) page_address(page);
455 unsigned end = start + size;
456
457 apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
458 dsb();
459 flush_tlb_kernel_range(start, end);
460}
461
462static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
463 pgprot_t prot, struct page **ret_page,
464 const void *caller)
465{
466 struct page *page;
467 void *ptr;
468 page = __dma_alloc_buffer(dev, size, gfp);
469 if (!page)
470 return NULL;
471
472 ptr = __dma_alloc_remap(page, size, gfp, prot, caller);
473 if (!ptr) {
474 __dma_free_buffer(page, size);
475 return NULL;
476 }
477
478 *ret_page = page;
479 return ptr;
480}
481
482static void *__alloc_from_pool(size_t size, struct page **ret_page)
483{
484 struct dma_pool *pool = &atomic_pool;
485 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
486 unsigned int pageno;
487 unsigned long flags;
488 void *ptr = NULL;
489 unsigned long align_mask;
490
491 if (!pool->vaddr) {
492 WARN(1, "coherent pool not initialised!\n");
493 return NULL;
494 }
495
496
497
498
499
500
501 align_mask = (1 << get_order(size)) - 1;
502
503 spin_lock_irqsave(&pool->lock, flags);
504 pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages,
505 0, count, align_mask);
506 if (pageno < pool->nr_pages) {
507 bitmap_set(pool->bitmap, pageno, count);
508 ptr = pool->vaddr + PAGE_SIZE * pageno;
509 *ret_page = pool->pages[pageno];
510 } else {
511 pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n"
512 "Please increase it with coherent_pool= kernel parameter!\n",
513 (unsigned)pool->size / 1024);
514 }
515 spin_unlock_irqrestore(&pool->lock, flags);
516
517 return ptr;
518}
519
520static bool __in_atomic_pool(void *start, size_t size)
521{
522 struct dma_pool *pool = &atomic_pool;
523 void *end = start + size;
524 void *pool_start = pool->vaddr;
525 void *pool_end = pool->vaddr + pool->size;
526
527 if (start < pool_start || start >= pool_end)
528 return false;
529
530 if (end <= pool_end)
531 return true;
532
533 WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n",
534 start, end - 1, pool_start, pool_end - 1);
535
536 return false;
537}
538
539static int __free_from_pool(void *start, size_t size)
540{
541 struct dma_pool *pool = &atomic_pool;
542 unsigned long pageno, count;
543 unsigned long flags;
544
545 if (!__in_atomic_pool(start, size))
546 return 0;
547
548 pageno = (start - pool->vaddr) >> PAGE_SHIFT;
549 count = size >> PAGE_SHIFT;
550
551 spin_lock_irqsave(&pool->lock, flags);
552 bitmap_clear(pool->bitmap, pageno, count);
553 spin_unlock_irqrestore(&pool->lock, flags);
554
555 return 1;
556}
557
558static void *__alloc_from_contiguous(struct device *dev, size_t size,
559 pgprot_t prot, struct page **ret_page,
560 const void *caller)
561{
562 unsigned long order = get_order(size);
563 size_t count = size >> PAGE_SHIFT;
564 struct page *page;
565 void *ptr;
566
567 page = dma_alloc_from_contiguous(dev, count, order);
568 if (!page)
569 return NULL;
570
571 __dma_clear_buffer(page, size);
572
573 if (PageHighMem(page)) {
574 ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller);
575 if (!ptr) {
576 dma_release_from_contiguous(dev, page, count);
577 return NULL;
578 }
579 } else {
580 __dma_remap(page, size, prot);
581 ptr = page_address(page);
582 }
583 *ret_page = page;
584 return ptr;
585}
586
587static void __free_from_contiguous(struct device *dev, struct page *page,
588 void *cpu_addr, size_t size)
589{
590 if (PageHighMem(page))
591 __dma_free_remap(cpu_addr, size);
592 else
593 __dma_remap(page, size, pgprot_kernel);
594 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
595}
596
597static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
598{
599 prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ?
600 pgprot_writecombine(prot) :
601 pgprot_dmacoherent(prot);
602 return prot;
603}
604
605#define nommu() 0
606
607#else
608
609#define nommu() 1
610
611#define __get_dma_pgprot(attrs, prot) __pgprot(0)
612#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL
613#define __alloc_from_pool(size, ret_page) NULL
614#define __alloc_from_contiguous(dev, size, prot, ret, c) NULL
615#define __free_from_pool(cpu_addr, size) 0
616#define __free_from_contiguous(dev, page, cpu_addr, size) do { } while (0)
617#define __dma_free_remap(cpu_addr, size) do { } while (0)
618
619#endif
620
621static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
622 struct page **ret_page)
623{
624 struct page *page;
625 page = __dma_alloc_buffer(dev, size, gfp);
626 if (!page)
627 return NULL;
628
629 *ret_page = page;
630 return page_address(page);
631}
632
633
634
635static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
636 gfp_t gfp, pgprot_t prot, bool is_coherent, const void *caller)
637{
638 u64 mask = get_coherent_dma_mask(dev);
639 struct page *page = NULL;
640 void *addr;
641
642#ifdef CONFIG_DMA_API_DEBUG
643 u64 limit = (mask + 1) & ~mask;
644 if (limit && size >= limit) {
645 dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
646 size, mask);
647 return NULL;
648 }
649#endif
650
651 if (!mask)
652 return NULL;
653
654 if (mask < 0xffffffffULL)
655 gfp |= GFP_DMA;
656
657
658
659
660
661
662
663
664 gfp &= ~(__GFP_COMP);
665
666 *handle = DMA_ERROR_CODE;
667 size = PAGE_ALIGN(size);
668
669 if (is_coherent || nommu())
670 addr = __alloc_simple_buffer(dev, size, gfp, &page);
671 else if (!(gfp & __GFP_WAIT))
672 addr = __alloc_from_pool(size, &page);
673 else if (!IS_ENABLED(CONFIG_DMA_CMA))
674 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
675 else
676 addr = __alloc_from_contiguous(dev, size, prot, &page, caller);
677
678 if (addr)
679 *handle = pfn_to_dma(dev, page_to_pfn(page));
680
681 return addr;
682}
683
684
685
686
687
688void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
689 gfp_t gfp, struct dma_attrs *attrs)
690{
691 pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
692 void *memory;
693
694 if (dma_alloc_from_coherent(dev, size, handle, &memory))
695 return memory;
696
697 return __dma_alloc(dev, size, handle, gfp, prot, false,
698 __builtin_return_address(0));
699}
700
701static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
702 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
703{
704 pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
705 void *memory;
706
707 if (dma_alloc_from_coherent(dev, size, handle, &memory))
708 return memory;
709
710 return __dma_alloc(dev, size, handle, gfp, prot, true,
711 __builtin_return_address(0));
712}
713
714
715
716
717int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
718 void *cpu_addr, dma_addr_t dma_addr, size_t size,
719 struct dma_attrs *attrs)
720{
721 int ret = -ENXIO;
722#ifdef CONFIG_MMU
723 unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
724 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
725 unsigned long pfn = dma_to_pfn(dev, dma_addr);
726 unsigned long off = vma->vm_pgoff;
727
728 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
729
730 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
731 return ret;
732
733 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
734 ret = remap_pfn_range(vma, vma->vm_start,
735 pfn + off,
736 vma->vm_end - vma->vm_start,
737 vma->vm_page_prot);
738 }
739#endif
740
741 return ret;
742}
743
744
745
746
747static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
748 dma_addr_t handle, struct dma_attrs *attrs,
749 bool is_coherent)
750{
751 struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
752
753 if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
754 return;
755
756 size = PAGE_ALIGN(size);
757
758 if (is_coherent || nommu()) {
759 __dma_free_buffer(page, size);
760 } else if (__free_from_pool(cpu_addr, size)) {
761 return;
762 } else if (!IS_ENABLED(CONFIG_DMA_CMA)) {
763 __dma_free_remap(cpu_addr, size);
764 __dma_free_buffer(page, size);
765 } else {
766
767
768
769 WARN_ON(irqs_disabled());
770 __free_from_contiguous(dev, page, cpu_addr, size);
771 }
772}
773
774void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
775 dma_addr_t handle, struct dma_attrs *attrs)
776{
777 __arm_dma_free(dev, size, cpu_addr, handle, attrs, false);
778}
779
780static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
781 dma_addr_t handle, struct dma_attrs *attrs)
782{
783 __arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
784}
785
786int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
787 void *cpu_addr, dma_addr_t handle, size_t size,
788 struct dma_attrs *attrs)
789{
790 struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
791 int ret;
792
793 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
794 if (unlikely(ret))
795 return ret;
796
797 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
798 return 0;
799}
800
801static void dma_cache_maint_page(struct page *page, unsigned long offset,
802 size_t size, enum dma_data_direction dir,
803 void (*op)(const void *, size_t, int))
804{
805 unsigned long pfn;
806 size_t left = size;
807
808 pfn = page_to_pfn(page) + offset / PAGE_SIZE;
809 offset %= PAGE_SIZE;
810
811
812
813
814
815
816
817 do {
818 size_t len = left;
819 void *vaddr;
820
821 page = pfn_to_page(pfn);
822
823 if (PageHighMem(page)) {
824 if (len + offset > PAGE_SIZE)
825 len = PAGE_SIZE - offset;
826
827 if (cache_is_vipt_nonaliasing()) {
828 vaddr = kmap_atomic(page);
829 op(vaddr + offset, len, dir);
830 kunmap_atomic(vaddr);
831 } else {
832 vaddr = kmap_high_get(page);
833 if (vaddr) {
834 op(vaddr + offset, len, dir);
835 kunmap_high(page);
836 }
837 }
838 } else {
839 vaddr = page_address(page) + offset;
840 op(vaddr, len, dir);
841 }
842 offset = 0;
843 pfn++;
844 left -= len;
845 } while (left);
846}
847
848
849
850
851
852
853
854static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
855 size_t size, enum dma_data_direction dir)
856{
857 unsigned long paddr;
858
859 dma_cache_maint_page(page, off, size, dir, dmac_map_area);
860
861 paddr = page_to_phys(page) + off;
862 if (dir == DMA_FROM_DEVICE) {
863 outer_inv_range(paddr, paddr + size);
864 } else {
865 outer_clean_range(paddr, paddr + size);
866 }
867
868}
869
870static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
871 size_t size, enum dma_data_direction dir)
872{
873 unsigned long paddr = page_to_phys(page) + off;
874
875
876
877 if (dir != DMA_TO_DEVICE)
878 outer_inv_range(paddr, paddr + size);
879
880 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
881
882
883
884
885 if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
886 set_bit(PG_dcache_clean, &page->flags);
887}
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
906 enum dma_data_direction dir, struct dma_attrs *attrs)
907{
908 struct dma_map_ops *ops = get_dma_ops(dev);
909 struct scatterlist *s;
910 int i, j;
911
912 for_each_sg(sg, s, nents, i) {
913#ifdef CONFIG_NEED_SG_DMA_LENGTH
914 s->dma_length = s->length;
915#endif
916 s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
917 s->length, dir, attrs);
918 if (dma_mapping_error(dev, s->dma_address))
919 goto bad_mapping;
920 }
921 return nents;
922
923 bad_mapping:
924 for_each_sg(sg, s, i, j)
925 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
926 return 0;
927}
928
929
930
931
932
933
934
935
936
937
938
939void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
940 enum dma_data_direction dir, struct dma_attrs *attrs)
941{
942 struct dma_map_ops *ops = get_dma_ops(dev);
943 struct scatterlist *s;
944
945 int i;
946
947 for_each_sg(sg, s, nents, i)
948 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
949}
950
951
952
953
954
955
956
957
958void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
959 int nents, enum dma_data_direction dir)
960{
961 struct dma_map_ops *ops = get_dma_ops(dev);
962 struct scatterlist *s;
963 int i;
964
965 for_each_sg(sg, s, nents, i)
966 ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length,
967 dir);
968}
969
970
971
972
973
974
975
976
977void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
978 int nents, enum dma_data_direction dir)
979{
980 struct dma_map_ops *ops = get_dma_ops(dev);
981 struct scatterlist *s;
982 int i;
983
984 for_each_sg(sg, s, nents, i)
985 ops->sync_single_for_device(dev, sg_dma_address(s), s->length,
986 dir);
987}
988
989
990
991
992
993
994
995int dma_supported(struct device *dev, u64 mask)
996{
997 if (mask < (u64)arm_dma_limit)
998 return 0;
999 return 1;
1000}
1001EXPORT_SYMBOL(dma_supported);
1002
1003int arm_dma_set_mask(struct device *dev, u64 dma_mask)
1004{
1005 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
1006 return -EIO;
1007
1008 *dev->dma_mask = dma_mask;
1009
1010 return 0;
1011}
1012
1013#define PREALLOC_DMA_DEBUG_ENTRIES 4096
1014
1015static int __init dma_debug_do_init(void)
1016{
1017 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
1018 return 0;
1019}
1020fs_initcall(dma_debug_do_init);
1021
1022#ifdef CONFIG_ARM_DMA_USE_IOMMU
1023
1024
1025
1026static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
1027 size_t size)
1028{
1029 unsigned int order = get_order(size);
1030 unsigned int align = 0;
1031 unsigned int count, start;
1032 unsigned long flags;
1033
1034 if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT)
1035 order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT;
1036
1037 count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
1038 (1 << mapping->order) - 1) >> mapping->order;
1039
1040 if (order > mapping->order)
1041 align = (1 << (order - mapping->order)) - 1;
1042
1043 spin_lock_irqsave(&mapping->lock, flags);
1044 start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0,
1045 count, align);
1046 if (start > mapping->bits) {
1047 spin_unlock_irqrestore(&mapping->lock, flags);
1048 return DMA_ERROR_CODE;
1049 }
1050
1051 bitmap_set(mapping->bitmap, start, count);
1052 spin_unlock_irqrestore(&mapping->lock, flags);
1053
1054 return mapping->base + (start << (mapping->order + PAGE_SHIFT));
1055}
1056
1057static inline void __free_iova(struct dma_iommu_mapping *mapping,
1058 dma_addr_t addr, size_t size)
1059{
1060 unsigned int start = (addr - mapping->base) >>
1061 (mapping->order + PAGE_SHIFT);
1062 unsigned int count = ((size >> PAGE_SHIFT) +
1063 (1 << mapping->order) - 1) >> mapping->order;
1064 unsigned long flags;
1065
1066 spin_lock_irqsave(&mapping->lock, flags);
1067 bitmap_clear(mapping->bitmap, start, count);
1068 spin_unlock_irqrestore(&mapping->lock, flags);
1069}
1070
1071static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
1072 gfp_t gfp, struct dma_attrs *attrs)
1073{
1074 struct page **pages;
1075 int count = size >> PAGE_SHIFT;
1076 int array_size = count * sizeof(struct page *);
1077 int i = 0;
1078
1079 if (array_size <= PAGE_SIZE)
1080 pages = kzalloc(array_size, gfp);
1081 else
1082 pages = vzalloc(array_size);
1083 if (!pages)
1084 return NULL;
1085
1086 if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs))
1087 {
1088 unsigned long order = get_order(size);
1089 struct page *page;
1090
1091 page = dma_alloc_from_contiguous(dev, count, order);
1092 if (!page)
1093 goto error;
1094
1095 __dma_clear_buffer(page, size);
1096
1097 for (i = 0; i < count; i++)
1098 pages[i] = page + i;
1099
1100 return pages;
1101 }
1102
1103
1104
1105
1106 gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
1107
1108 while (count) {
1109 int j, order = __fls(count);
1110
1111 pages[i] = alloc_pages(gfp, order);
1112 while (!pages[i] && order)
1113 pages[i] = alloc_pages(gfp, --order);
1114 if (!pages[i])
1115 goto error;
1116
1117 if (order) {
1118 split_page(pages[i], order);
1119 j = 1 << order;
1120 while (--j)
1121 pages[i + j] = pages[i] + j;
1122 }
1123
1124 __dma_clear_buffer(pages[i], PAGE_SIZE << order);
1125 i += 1 << order;
1126 count -= 1 << order;
1127 }
1128
1129 return pages;
1130error:
1131 while (i--)
1132 if (pages[i])
1133 __free_pages(pages[i], 0);
1134 if (array_size <= PAGE_SIZE)
1135 kfree(pages);
1136 else
1137 vfree(pages);
1138 return NULL;
1139}
1140
1141static int __iommu_free_buffer(struct device *dev, struct page **pages,
1142 size_t size, struct dma_attrs *attrs)
1143{
1144 int count = size >> PAGE_SHIFT;
1145 int array_size = count * sizeof(struct page *);
1146 int i;
1147
1148 if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
1149 dma_release_from_contiguous(dev, pages[0], count);
1150 } else {
1151 for (i = 0; i < count; i++)
1152 if (pages[i])
1153 __free_pages(pages[i], 0);
1154 }
1155
1156 if (array_size <= PAGE_SIZE)
1157 kfree(pages);
1158 else
1159 vfree(pages);
1160 return 0;
1161}
1162
1163
1164
1165
1166static void *
1167__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
1168 const void *caller)
1169{
1170 unsigned int i, nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1171 struct vm_struct *area;
1172 unsigned long p;
1173
1174 area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
1175 caller);
1176 if (!area)
1177 return NULL;
1178
1179 area->pages = pages;
1180 area->nr_pages = nr_pages;
1181 p = (unsigned long)area->addr;
1182
1183 for (i = 0; i < nr_pages; i++) {
1184 phys_addr_t phys = __pfn_to_phys(page_to_pfn(pages[i]));
1185 if (ioremap_page_range(p, p + PAGE_SIZE, phys, prot))
1186 goto err;
1187 p += PAGE_SIZE;
1188 }
1189 return area->addr;
1190err:
1191 unmap_kernel_range((unsigned long)area->addr, size);
1192 vunmap(area->addr);
1193 return NULL;
1194}
1195
1196
1197
1198
1199static dma_addr_t
1200__iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
1201{
1202 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1203 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1204 dma_addr_t dma_addr, iova;
1205 int i, ret = DMA_ERROR_CODE;
1206
1207 dma_addr = __alloc_iova(mapping, size);
1208 if (dma_addr == DMA_ERROR_CODE)
1209 return dma_addr;
1210
1211 iova = dma_addr;
1212 for (i = 0; i < count; ) {
1213 unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
1214 phys_addr_t phys = page_to_phys(pages[i]);
1215 unsigned int len, j;
1216
1217 for (j = i + 1; j < count; j++, next_pfn++)
1218 if (page_to_pfn(pages[j]) != next_pfn)
1219 break;
1220
1221 len = (j - i) << PAGE_SHIFT;
1222 ret = iommu_map(mapping->domain, iova, phys, len, 0);
1223 if (ret < 0)
1224 goto fail;
1225 iova += len;
1226 i = j;
1227 }
1228 return dma_addr;
1229fail:
1230 iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
1231 __free_iova(mapping, dma_addr, size);
1232 return DMA_ERROR_CODE;
1233}
1234
1235static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
1236{
1237 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1238
1239
1240
1241
1242
1243 size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
1244 iova &= PAGE_MASK;
1245
1246 iommu_unmap(mapping->domain, iova, size);
1247 __free_iova(mapping, iova, size);
1248 return 0;
1249}
1250
1251static struct page **__atomic_get_pages(void *addr)
1252{
1253 struct dma_pool *pool = &atomic_pool;
1254 struct page **pages = pool->pages;
1255 int offs = (addr - pool->vaddr) >> PAGE_SHIFT;
1256
1257 return pages + offs;
1258}
1259
1260static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
1261{
1262 struct vm_struct *area;
1263
1264 if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
1265 return __atomic_get_pages(cpu_addr);
1266
1267 if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
1268 return cpu_addr;
1269
1270 area = find_vm_area(cpu_addr);
1271 if (area && (area->flags & VM_ARM_DMA_CONSISTENT))
1272 return area->pages;
1273 return NULL;
1274}
1275
1276static void *__iommu_alloc_atomic(struct device *dev, size_t size,
1277 dma_addr_t *handle)
1278{
1279 struct page *page;
1280 void *addr;
1281
1282 addr = __alloc_from_pool(size, &page);
1283 if (!addr)
1284 return NULL;
1285
1286 *handle = __iommu_create_mapping(dev, &page, size);
1287 if (*handle == DMA_ERROR_CODE)
1288 goto err_mapping;
1289
1290 return addr;
1291
1292err_mapping:
1293 __free_from_pool(addr, size);
1294 return NULL;
1295}
1296
1297static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
1298 dma_addr_t handle, size_t size)
1299{
1300 __iommu_remove_mapping(dev, handle, size);
1301 __free_from_pool(cpu_addr, size);
1302}
1303
1304static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
1305 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
1306{
1307 pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
1308 struct page **pages;
1309 void *addr = NULL;
1310
1311 *handle = DMA_ERROR_CODE;
1312 size = PAGE_ALIGN(size);
1313
1314 if (gfp & GFP_ATOMIC)
1315 return __iommu_alloc_atomic(dev, size, handle);
1316
1317 pages = __iommu_alloc_buffer(dev, size, gfp, attrs);
1318 if (!pages)
1319 return NULL;
1320
1321 *handle = __iommu_create_mapping(dev, pages, size);
1322 if (*handle == DMA_ERROR_CODE)
1323 goto err_buffer;
1324
1325 if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
1326 return pages;
1327
1328 addr = __iommu_alloc_remap(pages, size, gfp, prot,
1329 __builtin_return_address(0));
1330 if (!addr)
1331 goto err_mapping;
1332
1333 return addr;
1334
1335err_mapping:
1336 __iommu_remove_mapping(dev, *handle, size);
1337err_buffer:
1338 __iommu_free_buffer(dev, pages, size, attrs);
1339 return NULL;
1340}
1341
1342static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
1343 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1344 struct dma_attrs *attrs)
1345{
1346 unsigned long uaddr = vma->vm_start;
1347 unsigned long usize = vma->vm_end - vma->vm_start;
1348 struct page **pages = __iommu_get_pages(cpu_addr, attrs);
1349
1350 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
1351
1352 if (!pages)
1353 return -ENXIO;
1354
1355 do {
1356 int ret = vm_insert_page(vma, uaddr, *pages++);
1357 if (ret) {
1358 pr_err("Remapping memory failed: %d\n", ret);
1359 return ret;
1360 }
1361 uaddr += PAGE_SIZE;
1362 usize -= PAGE_SIZE;
1363 } while (usize > 0);
1364
1365 return 0;
1366}
1367
1368
1369
1370
1371
1372void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
1373 dma_addr_t handle, struct dma_attrs *attrs)
1374{
1375 struct page **pages = __iommu_get_pages(cpu_addr, attrs);
1376 size = PAGE_ALIGN(size);
1377
1378 if (!pages) {
1379 WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
1380 return;
1381 }
1382
1383 if (__in_atomic_pool(cpu_addr, size)) {
1384 __iommu_free_atomic(dev, cpu_addr, handle, size);
1385 return;
1386 }
1387
1388 if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
1389 unmap_kernel_range((unsigned long)cpu_addr, size);
1390 vunmap(cpu_addr);
1391 }
1392
1393 __iommu_remove_mapping(dev, handle, size);
1394 __iommu_free_buffer(dev, pages, size, attrs);
1395}
1396
1397static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
1398 void *cpu_addr, dma_addr_t dma_addr,
1399 size_t size, struct dma_attrs *attrs)
1400{
1401 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1402 struct page **pages = __iommu_get_pages(cpu_addr, attrs);
1403
1404 if (!pages)
1405 return -ENXIO;
1406
1407 return sg_alloc_table_from_pages(sgt, pages, count, 0, size,
1408 GFP_KERNEL);
1409}
1410
1411
1412
1413
1414static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
1415 size_t size, dma_addr_t *handle,
1416 enum dma_data_direction dir, struct dma_attrs *attrs,
1417 bool is_coherent)
1418{
1419 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1420 dma_addr_t iova, iova_base;
1421 int ret = 0;
1422 unsigned int count;
1423 struct scatterlist *s;
1424
1425 size = PAGE_ALIGN(size);
1426 *handle = DMA_ERROR_CODE;
1427
1428 iova_base = iova = __alloc_iova(mapping, size);
1429 if (iova == DMA_ERROR_CODE)
1430 return -ENOMEM;
1431
1432 for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
1433 phys_addr_t phys = page_to_phys(sg_page(s));
1434 unsigned int len = PAGE_ALIGN(s->offset + s->length);
1435
1436 if (!is_coherent &&
1437 !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
1438 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
1439
1440 ret = iommu_map(mapping->domain, iova, phys, len, 0);
1441 if (ret < 0)
1442 goto fail;
1443 count += len >> PAGE_SHIFT;
1444 iova += len;
1445 }
1446 *handle = iova_base;
1447
1448 return 0;
1449fail:
1450 iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE);
1451 __free_iova(mapping, iova_base, size);
1452 return ret;
1453}
1454
1455static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
1456 enum dma_data_direction dir, struct dma_attrs *attrs,
1457 bool is_coherent)
1458{
1459 struct scatterlist *s = sg, *dma = sg, *start = sg;
1460 int i, count = 0;
1461 unsigned int offset = s->offset;
1462 unsigned int size = s->offset + s->length;
1463 unsigned int max = dma_get_max_seg_size(dev);
1464
1465 for (i = 1; i < nents; i++) {
1466 s = sg_next(s);
1467
1468 s->dma_address = DMA_ERROR_CODE;
1469 s->dma_length = 0;
1470
1471 if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
1472 if (__map_sg_chunk(dev, start, size, &dma->dma_address,
1473 dir, attrs, is_coherent) < 0)
1474 goto bad_mapping;
1475
1476 dma->dma_address += offset;
1477 dma->dma_length = size - offset;
1478
1479 size = offset = s->offset;
1480 start = s;
1481 dma = sg_next(dma);
1482 count += 1;
1483 }
1484 size += s->length;
1485 }
1486 if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs,
1487 is_coherent) < 0)
1488 goto bad_mapping;
1489
1490 dma->dma_address += offset;
1491 dma->dma_length = size - offset;
1492
1493 return count+1;
1494
1495bad_mapping:
1496 for_each_sg(sg, s, count, i)
1497 __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
1498 return 0;
1499}
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg,
1514 int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
1515{
1516 return __iommu_map_sg(dev, sg, nents, dir, attrs, true);
1517}
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
1532 int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
1533{
1534 return __iommu_map_sg(dev, sg, nents, dir, attrs, false);
1535}
1536
1537static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
1538 int nents, enum dma_data_direction dir, struct dma_attrs *attrs,
1539 bool is_coherent)
1540{
1541 struct scatterlist *s;
1542 int i;
1543
1544 for_each_sg(sg, s, nents, i) {
1545 if (sg_dma_len(s))
1546 __iommu_remove_mapping(dev, sg_dma_address(s),
1547 sg_dma_len(s));
1548 if (!is_coherent &&
1549 !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
1550 __dma_page_dev_to_cpu(sg_page(s), s->offset,
1551 s->length, dir);
1552 }
1553}
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
1566 int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
1567{
1568 __iommu_unmap_sg(dev, sg, nents, dir, attrs, true);
1569}
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
1582 enum dma_data_direction dir, struct dma_attrs *attrs)
1583{
1584 __iommu_unmap_sg(dev, sg, nents, dir, attrs, false);
1585}
1586
1587
1588
1589
1590
1591
1592
1593
1594void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1595 int nents, enum dma_data_direction dir)
1596{
1597 struct scatterlist *s;
1598 int i;
1599
1600 for_each_sg(sg, s, nents, i)
1601 __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
1602
1603}
1604
1605
1606
1607
1608
1609
1610
1611
1612void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1613 int nents, enum dma_data_direction dir)
1614{
1615 struct scatterlist *s;
1616 int i;
1617
1618 for_each_sg(sg, s, nents, i)
1619 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
1620}
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page,
1634 unsigned long offset, size_t size, enum dma_data_direction dir,
1635 struct dma_attrs *attrs)
1636{
1637 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1638 dma_addr_t dma_addr;
1639 int ret, len = PAGE_ALIGN(size + offset);
1640
1641 dma_addr = __alloc_iova(mapping, len);
1642 if (dma_addr == DMA_ERROR_CODE)
1643 return dma_addr;
1644
1645 ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, 0);
1646 if (ret < 0)
1647 goto fail;
1648
1649 return dma_addr + offset;
1650fail:
1651 __free_iova(mapping, dma_addr, len);
1652 return DMA_ERROR_CODE;
1653}
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
1666 unsigned long offset, size_t size, enum dma_data_direction dir,
1667 struct dma_attrs *attrs)
1668{
1669 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
1670 __dma_page_cpu_to_dev(page, offset, size, dir);
1671
1672 return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs);
1673}
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle,
1685 size_t size, enum dma_data_direction dir,
1686 struct dma_attrs *attrs)
1687{
1688 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1689 dma_addr_t iova = handle & PAGE_MASK;
1690 int offset = handle & ~PAGE_MASK;
1691 int len = PAGE_ALIGN(size + offset);
1692
1693 if (!iova)
1694 return;
1695
1696 iommu_unmap(mapping->domain, iova, len);
1697 __free_iova(mapping, iova, len);
1698}
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
1710 size_t size, enum dma_data_direction dir,
1711 struct dma_attrs *attrs)
1712{
1713 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1714 dma_addr_t iova = handle & PAGE_MASK;
1715 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1716 int offset = handle & ~PAGE_MASK;
1717 int len = PAGE_ALIGN(size + offset);
1718
1719 if (!iova)
1720 return;
1721
1722 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
1723 __dma_page_dev_to_cpu(page, offset, size, dir);
1724
1725 iommu_unmap(mapping->domain, iova, len);
1726 __free_iova(mapping, iova, len);
1727}
1728
1729static void arm_iommu_sync_single_for_cpu(struct device *dev,
1730 dma_addr_t handle, size_t size, enum dma_data_direction dir)
1731{
1732 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1733 dma_addr_t iova = handle & PAGE_MASK;
1734 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1735 unsigned int offset = handle & ~PAGE_MASK;
1736
1737 if (!iova)
1738 return;
1739
1740 __dma_page_dev_to_cpu(page, offset, size, dir);
1741}
1742
1743static void arm_iommu_sync_single_for_device(struct device *dev,
1744 dma_addr_t handle, size_t size, enum dma_data_direction dir)
1745{
1746 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1747 dma_addr_t iova = handle & PAGE_MASK;
1748 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1749 unsigned int offset = handle & ~PAGE_MASK;
1750
1751 if (!iova)
1752 return;
1753
1754 __dma_page_cpu_to_dev(page, offset, size, dir);
1755}
1756
1757struct dma_map_ops iommu_ops = {
1758 .alloc = arm_iommu_alloc_attrs,
1759 .free = arm_iommu_free_attrs,
1760 .mmap = arm_iommu_mmap_attrs,
1761 .get_sgtable = arm_iommu_get_sgtable,
1762
1763 .map_page = arm_iommu_map_page,
1764 .unmap_page = arm_iommu_unmap_page,
1765 .sync_single_for_cpu = arm_iommu_sync_single_for_cpu,
1766 .sync_single_for_device = arm_iommu_sync_single_for_device,
1767
1768 .map_sg = arm_iommu_map_sg,
1769 .unmap_sg = arm_iommu_unmap_sg,
1770 .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
1771 .sync_sg_for_device = arm_iommu_sync_sg_for_device,
1772
1773 .set_dma_mask = arm_dma_set_mask,
1774};
1775
1776struct dma_map_ops iommu_coherent_ops = {
1777 .alloc = arm_iommu_alloc_attrs,
1778 .free = arm_iommu_free_attrs,
1779 .mmap = arm_iommu_mmap_attrs,
1780 .get_sgtable = arm_iommu_get_sgtable,
1781
1782 .map_page = arm_coherent_iommu_map_page,
1783 .unmap_page = arm_coherent_iommu_unmap_page,
1784
1785 .map_sg = arm_coherent_iommu_map_sg,
1786 .unmap_sg = arm_coherent_iommu_unmap_sg,
1787
1788 .set_dma_mask = arm_dma_set_mask,
1789};
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805struct dma_iommu_mapping *
1806arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
1807 int order)
1808{
1809 unsigned int count = size >> (PAGE_SHIFT + order);
1810 unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
1811 struct dma_iommu_mapping *mapping;
1812 int err = -ENOMEM;
1813
1814 if (!count)
1815 return ERR_PTR(-EINVAL);
1816
1817 mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
1818 if (!mapping)
1819 goto err;
1820
1821 mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
1822 if (!mapping->bitmap)
1823 goto err2;
1824
1825 mapping->base = base;
1826 mapping->bits = BITS_PER_BYTE * bitmap_size;
1827 mapping->order = order;
1828 spin_lock_init(&mapping->lock);
1829
1830 mapping->domain = iommu_domain_alloc(bus);
1831 if (!mapping->domain)
1832 goto err3;
1833
1834 kref_init(&mapping->kref);
1835 return mapping;
1836err3:
1837 kfree(mapping->bitmap);
1838err2:
1839 kfree(mapping);
1840err:
1841 return ERR_PTR(err);
1842}
1843EXPORT_SYMBOL_GPL(arm_iommu_create_mapping);
1844
1845static void release_iommu_mapping(struct kref *kref)
1846{
1847 struct dma_iommu_mapping *mapping =
1848 container_of(kref, struct dma_iommu_mapping, kref);
1849
1850 iommu_domain_free(mapping->domain);
1851 kfree(mapping->bitmap);
1852 kfree(mapping);
1853}
1854
1855void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
1856{
1857 if (mapping)
1858 kref_put(&mapping->kref, release_iommu_mapping);
1859}
1860EXPORT_SYMBOL_GPL(arm_iommu_release_mapping);
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873int arm_iommu_attach_device(struct device *dev,
1874 struct dma_iommu_mapping *mapping)
1875{
1876 int err;
1877
1878 err = iommu_attach_device(mapping->domain, dev);
1879 if (err)
1880 return err;
1881
1882 kref_get(&mapping->kref);
1883 dev->archdata.mapping = mapping;
1884 set_dma_ops(dev, &iommu_ops);
1885
1886 pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
1887 return 0;
1888}
1889EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
1890
1891
1892
1893
1894
1895
1896
1897
1898void arm_iommu_detach_device(struct device *dev)
1899{
1900 struct dma_iommu_mapping *mapping;
1901
1902 mapping = to_dma_iommu_mapping(dev);
1903 if (!mapping) {
1904 dev_warn(dev, "Not attached\n");
1905 return;
1906 }
1907
1908 iommu_detach_device(mapping->domain, dev);
1909 kref_put(&mapping->kref, release_iommu_mapping);
1910 mapping = NULL;
1911 set_dma_ops(dev, NULL);
1912
1913 pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
1914}
1915EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
1916
1917#endif
1918