1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22#include <linux/acpi_iort.h>
23#include <linux/device.h>
24#include <linux/dma-iommu.h>
25#include <linux/gfp.h>
26#include <linux/huge_mm.h>
27#include <linux/iommu.h>
28#include <linux/iova.h>
29#include <linux/irq.h>
30#include <linux/mm.h>
31#include <linux/pci.h>
32#include <linux/scatterlist.h>
33#include <linux/vmalloc.h>
34
35#define IOMMU_MAPPING_ERROR 0
36
37struct iommu_dma_msi_page {
38 struct list_head list;
39 dma_addr_t iova;
40 phys_addr_t phys;
41};
42
43enum iommu_dma_cookie_type {
44 IOMMU_DMA_IOVA_COOKIE,
45 IOMMU_DMA_MSI_COOKIE,
46};
47
48struct iommu_dma_cookie {
49 enum iommu_dma_cookie_type type;
50 union {
51
52 struct iova_domain iovad;
53
54 dma_addr_t msi_iova;
55 };
56 struct list_head msi_page_list;
57 spinlock_t msi_lock;
58};
59
60static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
61{
62 if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
63 return cookie->iovad.granule;
64 return PAGE_SIZE;
65}
66
67static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
68{
69 struct iommu_dma_cookie *cookie;
70
71 cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
72 if (cookie) {
73 spin_lock_init(&cookie->msi_lock);
74 INIT_LIST_HEAD(&cookie->msi_page_list);
75 cookie->type = type;
76 }
77 return cookie;
78}
79
80int iommu_dma_init(void)
81{
82 return iova_cache_get();
83}
84
85
86
87
88
89
90
91
92int iommu_get_dma_cookie(struct iommu_domain *domain)
93{
94 if (domain->iova_cookie)
95 return -EEXIST;
96
97 domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
98 if (!domain->iova_cookie)
99 return -ENOMEM;
100
101 return 0;
102}
103EXPORT_SYMBOL(iommu_get_dma_cookie);
104
105
106
107
108
109
110
111
112
113
114
115
116
117int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
118{
119 struct iommu_dma_cookie *cookie;
120
121 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
122 return -EINVAL;
123
124 if (domain->iova_cookie)
125 return -EEXIST;
126
127 cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
128 if (!cookie)
129 return -ENOMEM;
130
131 cookie->msi_iova = base;
132 domain->iova_cookie = cookie;
133 return 0;
134}
135EXPORT_SYMBOL(iommu_get_msi_cookie);
136
137
138
139
140
141
142
143
144void iommu_put_dma_cookie(struct iommu_domain *domain)
145{
146 struct iommu_dma_cookie *cookie = domain->iova_cookie;
147 struct iommu_dma_msi_page *msi, *tmp;
148
149 if (!cookie)
150 return;
151
152 if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
153 put_iova_domain(&cookie->iovad);
154
155 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
156 list_del(&msi->list);
157 kfree(msi);
158 }
159 kfree(cookie);
160 domain->iova_cookie = NULL;
161}
162EXPORT_SYMBOL(iommu_put_dma_cookie);
163
164
165
166
167
168
169
170
171
172
173
174void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
175{
176
177 if (!is_of_node(dev->iommu_fwspec->iommu_fwnode))
178 iort_iommu_msi_get_resv_regions(dev, list);
179
180}
181EXPORT_SYMBOL(iommu_dma_get_resv_regions);
182
183static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
184 phys_addr_t start, phys_addr_t end)
185{
186 struct iova_domain *iovad = &cookie->iovad;
187 struct iommu_dma_msi_page *msi_page;
188 int i, num_pages;
189
190 start -= iova_offset(iovad, start);
191 num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
192
193 msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL);
194 if (!msi_page)
195 return -ENOMEM;
196
197 for (i = 0; i < num_pages; i++) {
198 msi_page[i].phys = start;
199 msi_page[i].iova = start;
200 INIT_LIST_HEAD(&msi_page[i].list);
201 list_add(&msi_page[i].list, &cookie->msi_page_list);
202 start += iovad->granule;
203 }
204
205 return 0;
206}
207
208static void iova_reserve_pci_windows(struct pci_dev *dev,
209 struct iova_domain *iovad)
210{
211 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
212 struct resource_entry *window;
213 unsigned long lo, hi;
214
215 resource_list_for_each_entry(window, &bridge->windows) {
216 if (resource_type(window->res) != IORESOURCE_MEM)
217 continue;
218
219 lo = iova_pfn(iovad, window->res->start - window->offset);
220 hi = iova_pfn(iovad, window->res->end - window->offset);
221 reserve_iova(iovad, lo, hi);
222 }
223}
224
225static int iova_reserve_iommu_regions(struct device *dev,
226 struct iommu_domain *domain)
227{
228 struct iommu_dma_cookie *cookie = domain->iova_cookie;
229 struct iova_domain *iovad = &cookie->iovad;
230 struct iommu_resv_region *region;
231 LIST_HEAD(resv_regions);
232 int ret = 0;
233
234 if (dev_is_pci(dev))
235 iova_reserve_pci_windows(to_pci_dev(dev), iovad);
236
237 iommu_get_resv_regions(dev, &resv_regions);
238 list_for_each_entry(region, &resv_regions, list) {
239 unsigned long lo, hi;
240
241
242 if (region->type == IOMMU_RESV_SW_MSI)
243 continue;
244
245 lo = iova_pfn(iovad, region->start);
246 hi = iova_pfn(iovad, region->start + region->length - 1);
247 reserve_iova(iovad, lo, hi);
248
249 if (region->type == IOMMU_RESV_MSI)
250 ret = cookie_init_hw_msi_region(cookie, region->start,
251 region->start + region->length);
252 if (ret)
253 break;
254 }
255 iommu_put_resv_regions(dev, &resv_regions);
256
257 return ret;
258}
259
260
261
262
263
264
265
266
267
268
269
270
271
272int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
273 u64 size, struct device *dev)
274{
275 struct iommu_dma_cookie *cookie = domain->iova_cookie;
276 struct iova_domain *iovad = &cookie->iovad;
277 unsigned long order, base_pfn, end_pfn;
278
279 if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
280 return -EINVAL;
281
282
283 order = __ffs(domain->pgsize_bitmap);
284 base_pfn = max_t(unsigned long, 1, base >> order);
285 end_pfn = (base + size - 1) >> order;
286
287
288 if (domain->geometry.force_aperture) {
289 if (base > domain->geometry.aperture_end ||
290 base + size <= domain->geometry.aperture_start) {
291 pr_warn("specified DMA range outside IOMMU capability\n");
292 return -EFAULT;
293 }
294
295 base_pfn = max_t(unsigned long, base_pfn,
296 domain->geometry.aperture_start >> order);
297 }
298
299
300 if (iovad->start_pfn) {
301 if (1UL << order != iovad->granule ||
302 base_pfn != iovad->start_pfn) {
303 pr_warn("Incompatible range for DMA domain\n");
304 return -EFAULT;
305 }
306
307 return 0;
308 }
309
310 init_iova_domain(iovad, 1UL << order, base_pfn);
311 if (!dev)
312 return 0;
313
314 return iova_reserve_iommu_regions(dev, domain);
315}
316EXPORT_SYMBOL(iommu_dma_init_domain);
317
318
319
320
321
322
323
324
325
326
327int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
328 unsigned long attrs)
329{
330 int prot = coherent ? IOMMU_CACHE : 0;
331
332 if (attrs & DMA_ATTR_PRIVILEGED)
333 prot |= IOMMU_PRIV;
334
335 switch (dir) {
336 case DMA_BIDIRECTIONAL:
337 return prot | IOMMU_READ | IOMMU_WRITE;
338 case DMA_TO_DEVICE:
339 return prot | IOMMU_READ;
340 case DMA_FROM_DEVICE:
341 return prot | IOMMU_WRITE;
342 default:
343 return 0;
344 }
345}
346
347static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
348 size_t size, dma_addr_t dma_limit, struct device *dev)
349{
350 struct iommu_dma_cookie *cookie = domain->iova_cookie;
351 struct iova_domain *iovad = &cookie->iovad;
352 unsigned long shift, iova_len, iova = 0;
353
354 if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
355 cookie->msi_iova += size;
356 return cookie->msi_iova - size;
357 }
358
359 shift = iova_shift(iovad);
360 iova_len = size >> shift;
361
362
363
364
365
366
367 if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
368 iova_len = roundup_pow_of_two(iova_len);
369
370 if (domain->geometry.force_aperture)
371 dma_limit = min(dma_limit, domain->geometry.aperture_end);
372
373
374 if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
375 iova = alloc_iova_fast(iovad, iova_len,
376 DMA_BIT_MASK(32) >> shift, false);
377
378 if (!iova)
379 iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
380 true);
381
382 return (dma_addr_t)iova << shift;
383}
384
385static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
386 dma_addr_t iova, size_t size)
387{
388 struct iova_domain *iovad = &cookie->iovad;
389
390
391 if (cookie->type == IOMMU_DMA_MSI_COOKIE)
392 cookie->msi_iova -= size;
393 else
394 free_iova_fast(iovad, iova_pfn(iovad, iova),
395 size >> iova_shift(iovad));
396}
397
398static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
399 size_t size)
400{
401 struct iommu_dma_cookie *cookie = domain->iova_cookie;
402 struct iova_domain *iovad = &cookie->iovad;
403 size_t iova_off = iova_offset(iovad, dma_addr);
404
405 dma_addr -= iova_off;
406 size = iova_align(iovad, size + iova_off);
407
408 WARN_ON(iommu_unmap(domain, dma_addr, size) != size);
409 iommu_dma_free_iova(cookie, dma_addr, size);
410}
411
412static void __iommu_dma_free_pages(struct page **pages, int count)
413{
414 while (count--)
415 __free_page(pages[count]);
416 kvfree(pages);
417}
418
419static struct page **__iommu_dma_alloc_pages(unsigned int count,
420 unsigned long order_mask, gfp_t gfp)
421{
422 struct page **pages;
423 unsigned int i = 0, array_size = count * sizeof(*pages);
424
425 order_mask &= (2U << MAX_ORDER) - 1;
426 if (!order_mask)
427 return NULL;
428
429 if (array_size <= PAGE_SIZE)
430 pages = kzalloc(array_size, GFP_KERNEL);
431 else
432 pages = vzalloc(array_size);
433 if (!pages)
434 return NULL;
435
436
437 gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
438
439 while (count) {
440 struct page *page = NULL;
441 unsigned int order_size;
442
443
444
445
446
447
448 for (order_mask &= (2U << __fls(count)) - 1;
449 order_mask; order_mask &= ~order_size) {
450 unsigned int order = __fls(order_mask);
451
452 order_size = 1U << order;
453 page = alloc_pages((order_mask - order_size) ?
454 gfp | __GFP_NORETRY : gfp, order);
455 if (!page)
456 continue;
457 if (!order)
458 break;
459 if (!PageCompound(page)) {
460 split_page(page, order);
461 break;
462 } else if (!split_huge_page(page)) {
463 break;
464 }
465 __free_pages(page, order);
466 }
467 if (!page) {
468 __iommu_dma_free_pages(pages, i);
469 return NULL;
470 }
471 count -= order_size;
472 while (order_size--)
473 pages[i++] = page++;
474 }
475 return pages;
476}
477
478
479
480
481
482
483
484
485
486
487
488void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
489 dma_addr_t *handle)
490{
491 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle, size);
492 __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
493 *handle = IOMMU_MAPPING_ERROR;
494}
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
515 unsigned long attrs, int prot, dma_addr_t *handle,
516 void (*flush_page)(struct device *, const void *, phys_addr_t))
517{
518 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
519 struct iommu_dma_cookie *cookie = domain->iova_cookie;
520 struct iova_domain *iovad = &cookie->iovad;
521 struct page **pages;
522 struct sg_table sgt;
523 dma_addr_t iova;
524 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
525
526 *handle = IOMMU_MAPPING_ERROR;
527
528 min_size = alloc_sizes & -alloc_sizes;
529 if (min_size < PAGE_SIZE) {
530 min_size = PAGE_SIZE;
531 alloc_sizes |= PAGE_SIZE;
532 } else {
533 size = ALIGN(size, min_size);
534 }
535 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
536 alloc_sizes = min_size;
537
538 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
539 pages = __iommu_dma_alloc_pages(count, alloc_sizes >> PAGE_SHIFT, gfp);
540 if (!pages)
541 return NULL;
542
543 size = iova_align(iovad, size);
544 iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
545 if (!iova)
546 goto out_free_pages;
547
548 if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
549 goto out_free_iova;
550
551 if (!(prot & IOMMU_CACHE)) {
552 struct sg_mapping_iter miter;
553
554
555
556
557 sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG);
558 while (sg_miter_next(&miter))
559 flush_page(dev, miter.addr, page_to_phys(miter.page));
560 sg_miter_stop(&miter);
561 }
562
563 if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot)
564 < size)
565 goto out_free_sg;
566
567 *handle = iova;
568 sg_free_table(&sgt);
569 return pages;
570
571out_free_sg:
572 sg_free_table(&sgt);
573out_free_iova:
574 iommu_dma_free_iova(cookie, iova, size);
575out_free_pages:
576 __iommu_dma_free_pages(pages, count);
577 return NULL;
578}
579
580
581
582
583
584
585
586
587
588
589
590int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
591{
592 unsigned long uaddr = vma->vm_start;
593 unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT;
594 int ret = -ENXIO;
595
596 for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) {
597 ret = vm_insert_page(vma, uaddr, pages[i]);
598 if (ret)
599 break;
600 uaddr += PAGE_SIZE;
601 }
602 return ret;
603}
604
605static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
606 size_t size, int prot)
607{
608 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
609 struct iommu_dma_cookie *cookie = domain->iova_cookie;
610 size_t iova_off = 0;
611 dma_addr_t iova;
612
613 if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
614 iova_off = iova_offset(&cookie->iovad, phys);
615 size = iova_align(&cookie->iovad, size + iova_off);
616 }
617
618 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
619 if (!iova)
620 return IOMMU_MAPPING_ERROR;
621
622 if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
623 iommu_dma_free_iova(cookie, iova, size);
624 return IOMMU_MAPPING_ERROR;
625 }
626 return iova + iova_off;
627}
628
629dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
630 unsigned long offset, size_t size, int prot)
631{
632 return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot);
633}
634
635void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
636 enum dma_data_direction dir, unsigned long attrs)
637{
638 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size);
639}
640
641
642
643
644
645
646
647
648static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
649 dma_addr_t dma_addr)
650{
651 struct scatterlist *s, *cur = sg;
652 unsigned long seg_mask = dma_get_seg_boundary(dev);
653 unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
654 int i, count = 0;
655
656 for_each_sg(sg, s, nents, i) {
657
658 unsigned int s_iova_off = sg_dma_address(s);
659 unsigned int s_length = sg_dma_len(s);
660 unsigned int s_iova_len = s->length;
661
662 s->offset += s_iova_off;
663 s->length = s_length;
664 sg_dma_address(s) = IOMMU_MAPPING_ERROR;
665 sg_dma_len(s) = 0;
666
667
668
669
670
671
672
673
674 if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
675 (cur_len + s_length <= max_len)) {
676
677 cur_len += s_length;
678 } else {
679
680 if (i > 0)
681 cur = sg_next(cur);
682 cur_len = s_length;
683 count++;
684
685 sg_dma_address(cur) = dma_addr + s_iova_off;
686 }
687
688 sg_dma_len(cur) = cur_len;
689 dma_addr += s_iova_len;
690
691 if (s_length + s_iova_off < s_iova_len)
692 cur_len = 0;
693 }
694 return count;
695}
696
697
698
699
700
701static void __invalidate_sg(struct scatterlist *sg, int nents)
702{
703 struct scatterlist *s;
704 int i;
705
706 for_each_sg(sg, s, nents, i) {
707 if (sg_dma_address(s) != IOMMU_MAPPING_ERROR)
708 s->offset += sg_dma_address(s);
709 if (sg_dma_len(s))
710 s->length = sg_dma_len(s);
711 sg_dma_address(s) = IOMMU_MAPPING_ERROR;
712 sg_dma_len(s) = 0;
713 }
714}
715
716
717
718
719
720
721
722
723int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
724 int nents, int prot)
725{
726 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
727 struct iommu_dma_cookie *cookie = domain->iova_cookie;
728 struct iova_domain *iovad = &cookie->iovad;
729 struct scatterlist *s, *prev = NULL;
730 dma_addr_t iova;
731 size_t iova_len = 0;
732 unsigned long mask = dma_get_seg_boundary(dev);
733 int i;
734
735
736
737
738
739
740
741 for_each_sg(sg, s, nents, i) {
742 size_t s_iova_off = iova_offset(iovad, s->offset);
743 size_t s_length = s->length;
744 size_t pad_len = (mask - iova_len + 1) & mask;
745
746 sg_dma_address(s) = s_iova_off;
747 sg_dma_len(s) = s_length;
748 s->offset -= s_iova_off;
749 s_length = iova_align(iovad, s_length + s_iova_off);
750 s->length = s_length;
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765 if (pad_len && pad_len < s_length - 1) {
766 prev->length += pad_len;
767 iova_len += pad_len;
768 }
769
770 iova_len += s_length;
771 prev = s;
772 }
773
774 iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
775 if (!iova)
776 goto out_restore_sg;
777
778
779
780
781
782 if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len)
783 goto out_free_iova;
784
785 return __finalise_sg(dev, sg, nents, iova);
786
787out_free_iova:
788 iommu_dma_free_iova(cookie, iova, iova_len);
789out_restore_sg:
790 __invalidate_sg(sg, nents);
791 return 0;
792}
793
794void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
795 enum dma_data_direction dir, unsigned long attrs)
796{
797 dma_addr_t start, end;
798 struct scatterlist *tmp;
799 int i;
800
801
802
803
804 start = sg_dma_address(sg);
805 for_each_sg(sg_next(sg), tmp, nents - 1, i) {
806 if (sg_dma_len(tmp) == 0)
807 break;
808 sg = tmp;
809 }
810 end = sg_dma_address(sg) + sg_dma_len(sg);
811 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), start, end - start);
812}
813
814dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
815 size_t size, enum dma_data_direction dir, unsigned long attrs)
816{
817 return __iommu_dma_map(dev, phys, size,
818 dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
819}
820
821void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
822 size_t size, enum dma_data_direction dir, unsigned long attrs)
823{
824 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size);
825}
826
827int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
828{
829 return dma_addr == IOMMU_MAPPING_ERROR;
830}
831
832static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
833 phys_addr_t msi_addr, struct iommu_domain *domain)
834{
835 struct iommu_dma_cookie *cookie = domain->iova_cookie;
836 struct iommu_dma_msi_page *msi_page;
837 dma_addr_t iova;
838 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
839 size_t size = cookie_msi_granule(cookie);
840
841 msi_addr &= ~(phys_addr_t)(size - 1);
842 list_for_each_entry(msi_page, &cookie->msi_page_list, list)
843 if (msi_page->phys == msi_addr)
844 return msi_page;
845
846 msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC);
847 if (!msi_page)
848 return NULL;
849
850 iova = __iommu_dma_map(dev, msi_addr, size, prot);
851 if (iommu_dma_mapping_error(dev, iova))
852 goto out_free_page;
853
854 INIT_LIST_HEAD(&msi_page->list);
855 msi_page->phys = msi_addr;
856 msi_page->iova = iova;
857 list_add(&msi_page->list, &cookie->msi_page_list);
858 return msi_page;
859
860out_free_page:
861 kfree(msi_page);
862 return NULL;
863}
864
865void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
866{
867 struct device *dev = msi_desc_to_dev(irq_get_msi_desc(irq));
868 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
869 struct iommu_dma_cookie *cookie;
870 struct iommu_dma_msi_page *msi_page;
871 phys_addr_t msi_addr = (u64)msg->address_hi << 32 | msg->address_lo;
872 unsigned long flags;
873
874 if (!domain || !domain->iova_cookie)
875 return;
876
877 cookie = domain->iova_cookie;
878
879
880
881
882
883
884 spin_lock_irqsave(&cookie->msi_lock, flags);
885 msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
886 spin_unlock_irqrestore(&cookie->msi_lock, flags);
887
888 if (WARN_ON(!msi_page)) {
889
890
891
892
893
894
895 msg->address_hi = ~0U;
896 msg->address_lo = ~0U;
897 msg->data = ~0U;
898 } else {
899 msg->address_hi = upper_32_bits(msi_page->iova);
900 msg->address_lo &= cookie_msi_granule(cookie) - 1;
901 msg->address_lo += lower_32_bits(msi_page->iova);
902 }
903}
904