1
2
3
4
5
6
7
8
9
10
11#include <linux/acpi_iort.h>
12#include <linux/device.h>
13#include <linux/dma-contiguous.h>
14#include <linux/dma-iommu.h>
15#include <linux/dma-noncoherent.h>
16#include <linux/gfp.h>
17#include <linux/huge_mm.h>
18#include <linux/iommu.h>
19#include <linux/iova.h>
20#include <linux/irq.h>
21#include <linux/mm.h>
22#include <linux/pci.h>
23#include <linux/scatterlist.h>
24#include <linux/vmalloc.h>
25
26struct iommu_dma_msi_page {
27 struct list_head list;
28 dma_addr_t iova;
29 phys_addr_t phys;
30};
31
32enum iommu_dma_cookie_type {
33 IOMMU_DMA_IOVA_COOKIE,
34 IOMMU_DMA_MSI_COOKIE,
35};
36
37struct iommu_dma_cookie {
38 enum iommu_dma_cookie_type type;
39 union {
40
41 struct iova_domain iovad;
42
43 dma_addr_t msi_iova;
44 };
45 struct list_head msi_page_list;
46 spinlock_t msi_lock;
47
48
49 struct iommu_domain *fq_domain;
50};
51
52static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
53{
54 if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
55 return cookie->iovad.granule;
56 return PAGE_SIZE;
57}
58
59static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
60{
61 struct iommu_dma_cookie *cookie;
62
63 cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
64 if (cookie) {
65 spin_lock_init(&cookie->msi_lock);
66 INIT_LIST_HEAD(&cookie->msi_page_list);
67 cookie->type = type;
68 }
69 return cookie;
70}
71
72
73
74
75
76
77
78
79int iommu_get_dma_cookie(struct iommu_domain *domain)
80{
81 if (domain->iova_cookie)
82 return -EEXIST;
83
84 domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
85 if (!domain->iova_cookie)
86 return -ENOMEM;
87
88 return 0;
89}
90EXPORT_SYMBOL(iommu_get_dma_cookie);
91
92
93
94
95
96
97
98
99
100
101
102
103
104int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
105{
106 struct iommu_dma_cookie *cookie;
107
108 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
109 return -EINVAL;
110
111 if (domain->iova_cookie)
112 return -EEXIST;
113
114 cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
115 if (!cookie)
116 return -ENOMEM;
117
118 cookie->msi_iova = base;
119 domain->iova_cookie = cookie;
120 return 0;
121}
122EXPORT_SYMBOL(iommu_get_msi_cookie);
123
124
125
126
127
128
129
130
131void iommu_put_dma_cookie(struct iommu_domain *domain)
132{
133 struct iommu_dma_cookie *cookie = domain->iova_cookie;
134 struct iommu_dma_msi_page *msi, *tmp;
135
136 if (!cookie)
137 return;
138
139 if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
140 put_iova_domain(&cookie->iovad);
141
142 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
143 list_del(&msi->list);
144 kfree(msi);
145 }
146 kfree(cookie);
147 domain->iova_cookie = NULL;
148}
149EXPORT_SYMBOL(iommu_put_dma_cookie);
150
151
152
153
154
155
156
157
158
159
160
161void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
162{
163
164 if (!is_of_node(dev_iommu_fwspec_get(dev)->iommu_fwnode))
165 iort_iommu_msi_get_resv_regions(dev, list);
166
167}
168EXPORT_SYMBOL(iommu_dma_get_resv_regions);
169
170static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
171 phys_addr_t start, phys_addr_t end)
172{
173 struct iova_domain *iovad = &cookie->iovad;
174 struct iommu_dma_msi_page *msi_page;
175 int i, num_pages;
176
177 start -= iova_offset(iovad, start);
178 num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
179
180 msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL);
181 if (!msi_page)
182 return -ENOMEM;
183
184 for (i = 0; i < num_pages; i++) {
185 msi_page[i].phys = start;
186 msi_page[i].iova = start;
187 INIT_LIST_HEAD(&msi_page[i].list);
188 list_add(&msi_page[i].list, &cookie->msi_page_list);
189 start += iovad->granule;
190 }
191
192 return 0;
193}
194
195static int iova_reserve_pci_windows(struct pci_dev *dev,
196 struct iova_domain *iovad)
197{
198 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
199 struct resource_entry *window;
200 unsigned long lo, hi;
201 phys_addr_t start = 0, end;
202
203 resource_list_for_each_entry(window, &bridge->windows) {
204 if (resource_type(window->res) != IORESOURCE_MEM)
205 continue;
206
207 lo = iova_pfn(iovad, window->res->start - window->offset);
208 hi = iova_pfn(iovad, window->res->end - window->offset);
209 reserve_iova(iovad, lo, hi);
210 }
211
212
213 resource_list_for_each_entry(window, &bridge->dma_ranges) {
214 end = window->res->start - window->offset;
215resv_iova:
216 if (end > start) {
217 lo = iova_pfn(iovad, start);
218 hi = iova_pfn(iovad, end);
219 reserve_iova(iovad, lo, hi);
220 } else {
221
222 dev_err(&dev->dev, "Failed to reserve IOVA\n");
223 return -EINVAL;
224 }
225
226 start = window->res->end - window->offset + 1;
227
228 if (window->node.next == &bridge->dma_ranges &&
229 end != ~(phys_addr_t)0) {
230 end = ~(phys_addr_t)0;
231 goto resv_iova;
232 }
233 }
234
235 return 0;
236}
237
238static int iova_reserve_iommu_regions(struct device *dev,
239 struct iommu_domain *domain)
240{
241 struct iommu_dma_cookie *cookie = domain->iova_cookie;
242 struct iova_domain *iovad = &cookie->iovad;
243 struct iommu_resv_region *region;
244 LIST_HEAD(resv_regions);
245 int ret = 0;
246
247 if (dev_is_pci(dev)) {
248 ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad);
249 if (ret)
250 return ret;
251 }
252
253 iommu_get_resv_regions(dev, &resv_regions);
254 list_for_each_entry(region, &resv_regions, list) {
255 unsigned long lo, hi;
256
257
258 if (region->type == IOMMU_RESV_SW_MSI)
259 continue;
260
261 lo = iova_pfn(iovad, region->start);
262 hi = iova_pfn(iovad, region->start + region->length - 1);
263 reserve_iova(iovad, lo, hi);
264
265 if (region->type == IOMMU_RESV_MSI)
266 ret = cookie_init_hw_msi_region(cookie, region->start,
267 region->start + region->length);
268 if (ret)
269 break;
270 }
271 iommu_put_resv_regions(dev, &resv_regions);
272
273 return ret;
274}
275
276static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
277{
278 struct iommu_dma_cookie *cookie;
279 struct iommu_domain *domain;
280
281 cookie = container_of(iovad, struct iommu_dma_cookie, iovad);
282 domain = cookie->fq_domain;
283
284
285
286
287 domain->ops->flush_iotlb_all(domain);
288}
289
290
291
292
293
294
295
296
297
298
299
300
301
302static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
303 u64 size, struct device *dev)
304{
305 struct iommu_dma_cookie *cookie = domain->iova_cookie;
306 struct iova_domain *iovad = &cookie->iovad;
307 unsigned long order, base_pfn;
308 int attr;
309
310 if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
311 return -EINVAL;
312
313
314 order = __ffs(domain->pgsize_bitmap);
315 base_pfn = max_t(unsigned long, 1, base >> order);
316
317
318 if (domain->geometry.force_aperture) {
319 if (base > domain->geometry.aperture_end ||
320 base + size <= domain->geometry.aperture_start) {
321 pr_warn("specified DMA range outside IOMMU capability\n");
322 return -EFAULT;
323 }
324
325 base_pfn = max_t(unsigned long, base_pfn,
326 domain->geometry.aperture_start >> order);
327 }
328
329
330 if (iovad->start_pfn) {
331 if (1UL << order != iovad->granule ||
332 base_pfn != iovad->start_pfn) {
333 pr_warn("Incompatible range for DMA domain\n");
334 return -EFAULT;
335 }
336
337 return 0;
338 }
339
340 init_iova_domain(iovad, 1UL << order, base_pfn);
341
342 if (!cookie->fq_domain && !iommu_domain_get_attr(domain,
343 DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) {
344 cookie->fq_domain = domain;
345 init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, NULL);
346 }
347
348 if (!dev)
349 return 0;
350
351 return iova_reserve_iommu_regions(dev, domain);
352}
353
354
355
356
357
358
359
360
361
362
363static int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
364 unsigned long attrs)
365{
366 int prot = coherent ? IOMMU_CACHE : 0;
367
368 if (attrs & DMA_ATTR_PRIVILEGED)
369 prot |= IOMMU_PRIV;
370
371 switch (dir) {
372 case DMA_BIDIRECTIONAL:
373 return prot | IOMMU_READ | IOMMU_WRITE;
374 case DMA_TO_DEVICE:
375 return prot | IOMMU_READ;
376 case DMA_FROM_DEVICE:
377 return prot | IOMMU_WRITE;
378 default:
379 return 0;
380 }
381}
382
383static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
384 size_t size, dma_addr_t dma_limit, struct device *dev)
385{
386 struct iommu_dma_cookie *cookie = domain->iova_cookie;
387 struct iova_domain *iovad = &cookie->iovad;
388 unsigned long shift, iova_len, iova = 0;
389
390 if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
391 cookie->msi_iova += size;
392 return cookie->msi_iova - size;
393 }
394
395 shift = iova_shift(iovad);
396 iova_len = size >> shift;
397
398
399
400
401
402
403 if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
404 iova_len = roundup_pow_of_two(iova_len);
405
406 if (dev->bus_dma_mask)
407 dma_limit &= dev->bus_dma_mask;
408
409 if (domain->geometry.force_aperture)
410 dma_limit = min(dma_limit, domain->geometry.aperture_end);
411
412
413 if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
414 iova = alloc_iova_fast(iovad, iova_len,
415 DMA_BIT_MASK(32) >> shift, false);
416
417 if (!iova)
418 iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
419 true);
420
421 return (dma_addr_t)iova << shift;
422}
423
424static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
425 dma_addr_t iova, size_t size)
426{
427 struct iova_domain *iovad = &cookie->iovad;
428
429
430 if (cookie->type == IOMMU_DMA_MSI_COOKIE)
431 cookie->msi_iova -= size;
432 else if (cookie->fq_domain)
433 queue_iova(iovad, iova_pfn(iovad, iova),
434 size >> iova_shift(iovad), 0);
435 else
436 free_iova_fast(iovad, iova_pfn(iovad, iova),
437 size >> iova_shift(iovad));
438}
439
440static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
441 size_t size)
442{
443 struct iommu_domain *domain = iommu_get_dma_domain(dev);
444 struct iommu_dma_cookie *cookie = domain->iova_cookie;
445 struct iova_domain *iovad = &cookie->iovad;
446 size_t iova_off = iova_offset(iovad, dma_addr);
447
448 dma_addr -= iova_off;
449 size = iova_align(iovad, size + iova_off);
450
451 WARN_ON(iommu_unmap_fast(domain, dma_addr, size) != size);
452 if (!cookie->fq_domain)
453 iommu_tlb_sync(domain);
454 iommu_dma_free_iova(cookie, dma_addr, size);
455}
456
457static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
458 size_t size, int prot)
459{
460 struct iommu_domain *domain = iommu_get_dma_domain(dev);
461 struct iommu_dma_cookie *cookie = domain->iova_cookie;
462 struct iova_domain *iovad = &cookie->iovad;
463 size_t iova_off = iova_offset(iovad, phys);
464 dma_addr_t iova;
465
466 size = iova_align(iovad, size + iova_off);
467
468 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
469 if (!iova)
470 return DMA_MAPPING_ERROR;
471
472 if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
473 iommu_dma_free_iova(cookie, iova, size);
474 return DMA_MAPPING_ERROR;
475 }
476 return iova + iova_off;
477}
478
479static void __iommu_dma_free_pages(struct page **pages, int count)
480{
481 while (count--)
482 __free_page(pages[count]);
483 kvfree(pages);
484}
485
486static struct page **__iommu_dma_alloc_pages(struct device *dev,
487 unsigned int count, unsigned long order_mask, gfp_t gfp)
488{
489 struct page **pages;
490 unsigned int i = 0, nid = dev_to_node(dev);
491
492 order_mask &= (2U << MAX_ORDER) - 1;
493 if (!order_mask)
494 return NULL;
495
496 pages = kvzalloc(count * sizeof(*pages), GFP_KERNEL);
497 if (!pages)
498 return NULL;
499
500
501 gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
502
503 while (count) {
504 struct page *page = NULL;
505 unsigned int order_size;
506
507
508
509
510
511
512 for (order_mask &= (2U << __fls(count)) - 1;
513 order_mask; order_mask &= ~order_size) {
514 unsigned int order = __fls(order_mask);
515 gfp_t alloc_flags = gfp;
516
517 order_size = 1U << order;
518 if (order_mask > order_size)
519 alloc_flags |= __GFP_NORETRY;
520 page = alloc_pages_node(nid, alloc_flags, order);
521 if (!page)
522 continue;
523 if (!order)
524 break;
525 if (!PageCompound(page)) {
526 split_page(page, order);
527 break;
528 } else if (!split_huge_page(page)) {
529 break;
530 }
531 __free_pages(page, order);
532 }
533 if (!page) {
534 __iommu_dma_free_pages(pages, i);
535 return NULL;
536 }
537 count -= order_size;
538 while (order_size--)
539 pages[i++] = page++;
540 }
541 return pages;
542}
543
544static struct page **__iommu_dma_get_pages(void *cpu_addr)
545{
546 struct vm_struct *area = find_vm_area(cpu_addr);
547
548 if (!area || !area->pages)
549 return NULL;
550 return area->pages;
551}
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567static void *iommu_dma_alloc_remap(struct device *dev, size_t size,
568 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
569{
570 struct iommu_domain *domain = iommu_get_dma_domain(dev);
571 struct iommu_dma_cookie *cookie = domain->iova_cookie;
572 struct iova_domain *iovad = &cookie->iovad;
573 bool coherent = dev_is_dma_coherent(dev);
574 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
575 pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
576 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
577 struct page **pages;
578 struct sg_table sgt;
579 dma_addr_t iova;
580 void *vaddr;
581
582 *dma_handle = DMA_MAPPING_ERROR;
583
584 min_size = alloc_sizes & -alloc_sizes;
585 if (min_size < PAGE_SIZE) {
586 min_size = PAGE_SIZE;
587 alloc_sizes |= PAGE_SIZE;
588 } else {
589 size = ALIGN(size, min_size);
590 }
591 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
592 alloc_sizes = min_size;
593
594 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
595 pages = __iommu_dma_alloc_pages(dev, count, alloc_sizes >> PAGE_SHIFT,
596 gfp);
597 if (!pages)
598 return NULL;
599
600 size = iova_align(iovad, size);
601 iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
602 if (!iova)
603 goto out_free_pages;
604
605 if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
606 goto out_free_iova;
607
608 if (!(ioprot & IOMMU_CACHE)) {
609 struct scatterlist *sg;
610 int i;
611
612 for_each_sg(sgt.sgl, sg, sgt.orig_nents, i)
613 arch_dma_prep_coherent(sg_page(sg), sg->length);
614 }
615
616 if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, ioprot)
617 < size)
618 goto out_free_sg;
619
620 vaddr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
621 __builtin_return_address(0));
622 if (!vaddr)
623 goto out_unmap;
624
625 *dma_handle = iova;
626 sg_free_table(&sgt);
627 return vaddr;
628
629out_unmap:
630 __iommu_dma_unmap(dev, iova, size);
631out_free_sg:
632 sg_free_table(&sgt);
633out_free_iova:
634 iommu_dma_free_iova(cookie, iova, size);
635out_free_pages:
636 __iommu_dma_free_pages(pages, count);
637 return NULL;
638}
639
640
641
642
643
644
645
646
647
648
649static int __iommu_dma_mmap(struct page **pages, size_t size,
650 struct vm_area_struct *vma)
651{
652 return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
653}
654
655static void iommu_dma_sync_single_for_cpu(struct device *dev,
656 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
657{
658 phys_addr_t phys;
659
660 if (dev_is_dma_coherent(dev))
661 return;
662
663 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
664 arch_sync_dma_for_cpu(dev, phys, size, dir);
665}
666
667static void iommu_dma_sync_single_for_device(struct device *dev,
668 dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
669{
670 phys_addr_t phys;
671
672 if (dev_is_dma_coherent(dev))
673 return;
674
675 phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
676 arch_sync_dma_for_device(dev, phys, size, dir);
677}
678
679static void iommu_dma_sync_sg_for_cpu(struct device *dev,
680 struct scatterlist *sgl, int nelems,
681 enum dma_data_direction dir)
682{
683 struct scatterlist *sg;
684 int i;
685
686 if (dev_is_dma_coherent(dev))
687 return;
688
689 for_each_sg(sgl, sg, nelems, i)
690 arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
691}
692
693static void iommu_dma_sync_sg_for_device(struct device *dev,
694 struct scatterlist *sgl, int nelems,
695 enum dma_data_direction dir)
696{
697 struct scatterlist *sg;
698 int i;
699
700 if (dev_is_dma_coherent(dev))
701 return;
702
703 for_each_sg(sgl, sg, nelems, i)
704 arch_sync_dma_for_device(dev, sg_phys(sg), sg->length, dir);
705}
706
707static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
708 unsigned long offset, size_t size, enum dma_data_direction dir,
709 unsigned long attrs)
710{
711 phys_addr_t phys = page_to_phys(page) + offset;
712 bool coherent = dev_is_dma_coherent(dev);
713 int prot = dma_info_to_prot(dir, coherent, attrs);
714 dma_addr_t dma_handle;
715
716 dma_handle =__iommu_dma_map(dev, phys, size, prot);
717 if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
718 dma_handle != DMA_MAPPING_ERROR)
719 arch_sync_dma_for_device(dev, phys, size, dir);
720 return dma_handle;
721}
722
723static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
724 size_t size, enum dma_data_direction dir, unsigned long attrs)
725{
726 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
727 iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
728 __iommu_dma_unmap(dev, dma_handle, size);
729}
730
731
732
733
734
735
736
737
738static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
739 dma_addr_t dma_addr)
740{
741 struct scatterlist *s, *cur = sg;
742 unsigned long seg_mask = dma_get_seg_boundary(dev);
743 unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
744 int i, count = 0;
745
746 for_each_sg(sg, s, nents, i) {
747
748 unsigned int s_iova_off = sg_dma_address(s);
749 unsigned int s_length = sg_dma_len(s);
750 unsigned int s_iova_len = s->length;
751
752 s->offset += s_iova_off;
753 s->length = s_length;
754 sg_dma_address(s) = DMA_MAPPING_ERROR;
755 sg_dma_len(s) = 0;
756
757
758
759
760
761
762
763
764 if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
765 (max_len - cur_len >= s_length)) {
766
767 cur_len += s_length;
768 } else {
769
770 if (i > 0)
771 cur = sg_next(cur);
772 cur_len = s_length;
773 count++;
774
775 sg_dma_address(cur) = dma_addr + s_iova_off;
776 }
777
778 sg_dma_len(cur) = cur_len;
779 dma_addr += s_iova_len;
780
781 if (s_length + s_iova_off < s_iova_len)
782 cur_len = 0;
783 }
784 return count;
785}
786
787
788
789
790
791static void __invalidate_sg(struct scatterlist *sg, int nents)
792{
793 struct scatterlist *s;
794 int i;
795
796 for_each_sg(sg, s, nents, i) {
797 if (sg_dma_address(s) != DMA_MAPPING_ERROR)
798 s->offset += sg_dma_address(s);
799 if (sg_dma_len(s))
800 s->length = sg_dma_len(s);
801 sg_dma_address(s) = DMA_MAPPING_ERROR;
802 sg_dma_len(s) = 0;
803 }
804}
805
806
807
808
809
810
811
812
813static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
814 int nents, enum dma_data_direction dir, unsigned long attrs)
815{
816 struct iommu_domain *domain = iommu_get_dma_domain(dev);
817 struct iommu_dma_cookie *cookie = domain->iova_cookie;
818 struct iova_domain *iovad = &cookie->iovad;
819 struct scatterlist *s, *prev = NULL;
820 int prot = dma_info_to_prot(dir, dev_is_dma_coherent(dev), attrs);
821 dma_addr_t iova;
822 size_t iova_len = 0;
823 unsigned long mask = dma_get_seg_boundary(dev);
824 int i;
825
826 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
827 iommu_dma_sync_sg_for_device(dev, sg, nents, dir);
828
829
830
831
832
833
834
835 for_each_sg(sg, s, nents, i) {
836 size_t s_iova_off = iova_offset(iovad, s->offset);
837 size_t s_length = s->length;
838 size_t pad_len = (mask - iova_len + 1) & mask;
839
840 sg_dma_address(s) = s_iova_off;
841 sg_dma_len(s) = s_length;
842 s->offset -= s_iova_off;
843 s_length = iova_align(iovad, s_length + s_iova_off);
844 s->length = s_length;
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859 if (pad_len && pad_len < s_length - 1) {
860 prev->length += pad_len;
861 iova_len += pad_len;
862 }
863
864 iova_len += s_length;
865 prev = s;
866 }
867
868 iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
869 if (!iova)
870 goto out_restore_sg;
871
872
873
874
875
876 if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len)
877 goto out_free_iova;
878
879 return __finalise_sg(dev, sg, nents, iova);
880
881out_free_iova:
882 iommu_dma_free_iova(cookie, iova, iova_len);
883out_restore_sg:
884 __invalidate_sg(sg, nents);
885 return 0;
886}
887
888static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
889 int nents, enum dma_data_direction dir, unsigned long attrs)
890{
891 dma_addr_t start, end;
892 struct scatterlist *tmp;
893 int i;
894
895 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
896 iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir);
897
898
899
900
901
902 start = sg_dma_address(sg);
903 for_each_sg(sg_next(sg), tmp, nents - 1, i) {
904 if (sg_dma_len(tmp) == 0)
905 break;
906 sg = tmp;
907 }
908 end = sg_dma_address(sg) + sg_dma_len(sg);
909 __iommu_dma_unmap(dev, start, end - start);
910}
911
912static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
913 size_t size, enum dma_data_direction dir, unsigned long attrs)
914{
915 return __iommu_dma_map(dev, phys, size,
916 dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
917}
918
919static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
920 size_t size, enum dma_data_direction dir, unsigned long attrs)
921{
922 __iommu_dma_unmap(dev, handle, size);
923}
924
925static void __iommu_dma_free(struct device *dev, size_t size, void *cpu_addr)
926{
927 size_t alloc_size = PAGE_ALIGN(size);
928 int count = alloc_size >> PAGE_SHIFT;
929 struct page *page = NULL, **pages = NULL;
930
931
932 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
933 dma_free_from_pool(cpu_addr, alloc_size))
934 return;
935
936 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
937
938
939
940
941 pages = __iommu_dma_get_pages(cpu_addr);
942 if (!pages)
943 page = vmalloc_to_page(cpu_addr);
944 dma_common_free_remap(cpu_addr, alloc_size, VM_USERMAP);
945 } else {
946
947 page = virt_to_page(cpu_addr);
948 }
949
950 if (pages)
951 __iommu_dma_free_pages(pages, count);
952 if (page)
953 dma_free_contiguous(dev, page, alloc_size);
954}
955
956static void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr,
957 dma_addr_t handle, unsigned long attrs)
958{
959 __iommu_dma_unmap(dev, handle, size);
960 __iommu_dma_free(dev, size, cpu_addr);
961}
962
963static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
964 struct page **pagep, gfp_t gfp, unsigned long attrs)
965{
966 bool coherent = dev_is_dma_coherent(dev);
967 size_t alloc_size = PAGE_ALIGN(size);
968 int node = dev_to_node(dev);
969 struct page *page = NULL;
970 void *cpu_addr;
971
972 page = dma_alloc_contiguous(dev, alloc_size, gfp);
973 if (!page)
974 page = alloc_pages_node(node, gfp, get_order(alloc_size));
975 if (!page)
976 return NULL;
977
978 if (IS_ENABLED(CONFIG_DMA_REMAP) && (!coherent || PageHighMem(page))) {
979 pgprot_t prot = dma_pgprot(dev, PAGE_KERNEL, attrs);
980
981 cpu_addr = dma_common_contiguous_remap(page, alloc_size,
982 VM_USERMAP, prot, __builtin_return_address(0));
983 if (!cpu_addr)
984 goto out_free_pages;
985
986 if (!coherent)
987 arch_dma_prep_coherent(page, size);
988 } else {
989 cpu_addr = page_address(page);
990 }
991
992 *pagep = page;
993 memset(cpu_addr, 0, alloc_size);
994 return cpu_addr;
995out_free_pages:
996 dma_free_contiguous(dev, page, alloc_size);
997 return NULL;
998}
999
1000static void *iommu_dma_alloc(struct device *dev, size_t size,
1001 dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
1002{
1003 bool coherent = dev_is_dma_coherent(dev);
1004 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
1005 struct page *page = NULL;
1006 void *cpu_addr;
1007
1008 gfp |= __GFP_ZERO;
1009
1010 if (IS_ENABLED(CONFIG_DMA_REMAP) && gfpflags_allow_blocking(gfp) &&
1011 !(attrs & DMA_ATTR_FORCE_CONTIGUOUS))
1012 return iommu_dma_alloc_remap(dev, size, handle, gfp, attrs);
1013
1014 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
1015 !gfpflags_allow_blocking(gfp) && !coherent)
1016 cpu_addr = dma_alloc_from_pool(PAGE_ALIGN(size), &page, gfp);
1017 else
1018 cpu_addr = iommu_dma_alloc_pages(dev, size, &page, gfp, attrs);
1019 if (!cpu_addr)
1020 return NULL;
1021
1022 *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot);
1023 if (*handle == DMA_MAPPING_ERROR) {
1024 __iommu_dma_free(dev, size, cpu_addr);
1025 return NULL;
1026 }
1027
1028 return cpu_addr;
1029}
1030
1031static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
1032 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1033 unsigned long attrs)
1034{
1035 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
1036 unsigned long pfn, off = vma->vm_pgoff;
1037 int ret;
1038
1039 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
1040
1041 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
1042 return ret;
1043
1044 if (off >= nr_pages || vma_pages(vma) > nr_pages - off)
1045 return -ENXIO;
1046
1047 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
1048 struct page **pages = __iommu_dma_get_pages(cpu_addr);
1049
1050 if (pages)
1051 return __iommu_dma_mmap(pages, size, vma);
1052 pfn = vmalloc_to_pfn(cpu_addr);
1053 } else {
1054 pfn = page_to_pfn(virt_to_page(cpu_addr));
1055 }
1056
1057 return remap_pfn_range(vma, vma->vm_start, pfn + off,
1058 vma->vm_end - vma->vm_start,
1059 vma->vm_page_prot);
1060}
1061
1062static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
1063 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1064 unsigned long attrs)
1065{
1066 struct page *page;
1067 int ret;
1068
1069 if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
1070 struct page **pages = __iommu_dma_get_pages(cpu_addr);
1071
1072 if (pages) {
1073 return sg_alloc_table_from_pages(sgt, pages,
1074 PAGE_ALIGN(size) >> PAGE_SHIFT,
1075 0, size, GFP_KERNEL);
1076 }
1077
1078 page = vmalloc_to_page(cpu_addr);
1079 } else {
1080 page = virt_to_page(cpu_addr);
1081 }
1082
1083 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
1084 if (!ret)
1085 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
1086 return ret;
1087}
1088
1089static const struct dma_map_ops iommu_dma_ops = {
1090 .alloc = iommu_dma_alloc,
1091 .free = iommu_dma_free,
1092 .mmap = iommu_dma_mmap,
1093 .get_sgtable = iommu_dma_get_sgtable,
1094 .map_page = iommu_dma_map_page,
1095 .unmap_page = iommu_dma_unmap_page,
1096 .map_sg = iommu_dma_map_sg,
1097 .unmap_sg = iommu_dma_unmap_sg,
1098 .sync_single_for_cpu = iommu_dma_sync_single_for_cpu,
1099 .sync_single_for_device = iommu_dma_sync_single_for_device,
1100 .sync_sg_for_cpu = iommu_dma_sync_sg_for_cpu,
1101 .sync_sg_for_device = iommu_dma_sync_sg_for_device,
1102 .map_resource = iommu_dma_map_resource,
1103 .unmap_resource = iommu_dma_unmap_resource,
1104};
1105
1106
1107
1108
1109
1110void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size)
1111{
1112 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1113
1114 if (!domain)
1115 goto out_err;
1116
1117
1118
1119
1120
1121 if (domain->type == IOMMU_DOMAIN_DMA) {
1122 if (iommu_dma_init_domain(domain, dma_base, size, dev))
1123 goto out_err;
1124 dev->dma_ops = &iommu_dma_ops;
1125 }
1126
1127 return;
1128out_err:
1129 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
1130 dev_name(dev));
1131}
1132
1133static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
1134 phys_addr_t msi_addr, struct iommu_domain *domain)
1135{
1136 struct iommu_dma_cookie *cookie = domain->iova_cookie;
1137 struct iommu_dma_msi_page *msi_page;
1138 dma_addr_t iova;
1139 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1140 size_t size = cookie_msi_granule(cookie);
1141
1142 msi_addr &= ~(phys_addr_t)(size - 1);
1143 list_for_each_entry(msi_page, &cookie->msi_page_list, list)
1144 if (msi_page->phys == msi_addr)
1145 return msi_page;
1146
1147 msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC);
1148 if (!msi_page)
1149 return NULL;
1150
1151 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
1152 if (!iova)
1153 goto out_free_page;
1154
1155 if (iommu_map(domain, iova, msi_addr, size, prot))
1156 goto out_free_iova;
1157
1158 INIT_LIST_HEAD(&msi_page->list);
1159 msi_page->phys = msi_addr;
1160 msi_page->iova = iova;
1161 list_add(&msi_page->list, &cookie->msi_page_list);
1162 return msi_page;
1163
1164out_free_iova:
1165 iommu_dma_free_iova(cookie, iova, size);
1166out_free_page:
1167 kfree(msi_page);
1168 return NULL;
1169}
1170
1171int iommu_dma_prepare_msi(struct msi_desc *desc, phys_addr_t msi_addr)
1172{
1173 struct device *dev = msi_desc_to_dev(desc);
1174 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1175 struct iommu_dma_cookie *cookie;
1176 struct iommu_dma_msi_page *msi_page;
1177 unsigned long flags;
1178
1179 if (!domain || !domain->iova_cookie) {
1180 desc->iommu_cookie = NULL;
1181 return 0;
1182 }
1183
1184 cookie = domain->iova_cookie;
1185
1186
1187
1188
1189
1190
1191 spin_lock_irqsave(&cookie->msi_lock, flags);
1192 msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
1193 spin_unlock_irqrestore(&cookie->msi_lock, flags);
1194
1195 msi_desc_set_iommu_cookie(desc, msi_page);
1196
1197 if (!msi_page)
1198 return -ENOMEM;
1199 return 0;
1200}
1201
1202void iommu_dma_compose_msi_msg(struct msi_desc *desc,
1203 struct msi_msg *msg)
1204{
1205 struct device *dev = msi_desc_to_dev(desc);
1206 const struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1207 const struct iommu_dma_msi_page *msi_page;
1208
1209 msi_page = msi_desc_get_iommu_cookie(desc);
1210
1211 if (!domain || !domain->iova_cookie || WARN_ON(!msi_page))
1212 return;
1213
1214 msg->address_hi = upper_32_bits(msi_page->iova);
1215 msg->address_lo &= cookie_msi_granule(domain->iova_cookie) - 1;
1216 msg->address_lo += lower_32_bits(msi_page->iova);
1217}
1218
1219static int iommu_dma_init(void)
1220{
1221 return iova_cache_get();
1222}
1223arch_initcall(iommu_dma_init);
1224