1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27#include <linux/compat.h>
28#include <linux/device.h>
29#include <linux/fs.h>
30#include <linux/iommu.h>
31#include <linux/module.h>
32#include <linux/mm.h>
33#include <linux/rbtree.h>
34#include <linux/sched/signal.h>
35#include <linux/sched/mm.h>
36#include <linux/slab.h>
37#include <linux/uaccess.h>
38#include <linux/vfio.h>
39#include <linux/workqueue.h>
40#include <linux/mdev.h>
41#include <linux/notifier.h>
42#include <linux/dma-iommu.h>
43#include <linux/irqdomain.h>
44
45#define DRIVER_VERSION "0.2"
46#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
47#define DRIVER_DESC "Type1 IOMMU driver for VFIO"
48
49static bool allow_unsafe_interrupts;
50module_param_named(allow_unsafe_interrupts,
51 allow_unsafe_interrupts, bool, S_IRUGO | S_IWUSR);
52MODULE_PARM_DESC(allow_unsafe_interrupts,
53 "Enable VFIO IOMMU support for on platforms without interrupt remapping support.");
54
55static bool disable_hugepages;
56module_param_named(disable_hugepages,
57 disable_hugepages, bool, S_IRUGO | S_IWUSR);
58MODULE_PARM_DESC(disable_hugepages,
59 "Disable VFIO IOMMU support for IOMMU hugepages.");
60
61struct vfio_iommu {
62 struct list_head domain_list;
63 struct vfio_domain *external_domain;
64 struct mutex lock;
65 struct rb_root dma_list;
66 struct blocking_notifier_head notifier;
67 bool v2;
68 bool nesting;
69};
70
71struct vfio_domain {
72 struct iommu_domain *domain;
73 struct list_head next;
74 struct list_head group_list;
75 int prot;
76 bool fgsp;
77};
78
79struct vfio_dma {
80 struct rb_node node;
81 dma_addr_t iova;
82 unsigned long vaddr;
83 size_t size;
84 int prot;
85 bool iommu_mapped;
86 struct task_struct *task;
87 struct rb_root pfn_list;
88};
89
90struct vfio_group {
91 struct iommu_group *iommu_group;
92 struct list_head next;
93};
94
95
96
97
98struct vfio_pfn {
99 struct rb_node node;
100 dma_addr_t iova;
101 unsigned long pfn;
102 atomic_t ref_count;
103};
104
105#define IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu) \
106 (!list_empty(&iommu->domain_list))
107
108static int put_pfn(unsigned long pfn, int prot);
109
110
111
112
113
114
115static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu,
116 dma_addr_t start, size_t size)
117{
118 struct rb_node *node = iommu->dma_list.rb_node;
119
120 while (node) {
121 struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node);
122
123 if (start + size <= dma->iova)
124 node = node->rb_left;
125 else if (start >= dma->iova + dma->size)
126 node = node->rb_right;
127 else
128 return dma;
129 }
130
131 return NULL;
132}
133
134static void vfio_link_dma(struct vfio_iommu *iommu, struct vfio_dma *new)
135{
136 struct rb_node **link = &iommu->dma_list.rb_node, *parent = NULL;
137 struct vfio_dma *dma;
138
139 while (*link) {
140 parent = *link;
141 dma = rb_entry(parent, struct vfio_dma, node);
142
143 if (new->iova + new->size <= dma->iova)
144 link = &(*link)->rb_left;
145 else
146 link = &(*link)->rb_right;
147 }
148
149 rb_link_node(&new->node, parent, link);
150 rb_insert_color(&new->node, &iommu->dma_list);
151}
152
153static void vfio_unlink_dma(struct vfio_iommu *iommu, struct vfio_dma *old)
154{
155 rb_erase(&old->node, &iommu->dma_list);
156}
157
158
159
160
161static struct vfio_pfn *vfio_find_vpfn(struct vfio_dma *dma, dma_addr_t iova)
162{
163 struct vfio_pfn *vpfn;
164 struct rb_node *node = dma->pfn_list.rb_node;
165
166 while (node) {
167 vpfn = rb_entry(node, struct vfio_pfn, node);
168
169 if (iova < vpfn->iova)
170 node = node->rb_left;
171 else if (iova > vpfn->iova)
172 node = node->rb_right;
173 else
174 return vpfn;
175 }
176 return NULL;
177}
178
179static void vfio_link_pfn(struct vfio_dma *dma,
180 struct vfio_pfn *new)
181{
182 struct rb_node **link, *parent = NULL;
183 struct vfio_pfn *vpfn;
184
185 link = &dma->pfn_list.rb_node;
186 while (*link) {
187 parent = *link;
188 vpfn = rb_entry(parent, struct vfio_pfn, node);
189
190 if (new->iova < vpfn->iova)
191 link = &(*link)->rb_left;
192 else
193 link = &(*link)->rb_right;
194 }
195
196 rb_link_node(&new->node, parent, link);
197 rb_insert_color(&new->node, &dma->pfn_list);
198}
199
200static void vfio_unlink_pfn(struct vfio_dma *dma, struct vfio_pfn *old)
201{
202 rb_erase(&old->node, &dma->pfn_list);
203}
204
205static int vfio_add_to_pfn_list(struct vfio_dma *dma, dma_addr_t iova,
206 unsigned long pfn)
207{
208 struct vfio_pfn *vpfn;
209
210 vpfn = kzalloc(sizeof(*vpfn), GFP_KERNEL);
211 if (!vpfn)
212 return -ENOMEM;
213
214 vpfn->iova = iova;
215 vpfn->pfn = pfn;
216 atomic_set(&vpfn->ref_count, 1);
217 vfio_link_pfn(dma, vpfn);
218 return 0;
219}
220
221static void vfio_remove_from_pfn_list(struct vfio_dma *dma,
222 struct vfio_pfn *vpfn)
223{
224 vfio_unlink_pfn(dma, vpfn);
225 kfree(vpfn);
226}
227
228static struct vfio_pfn *vfio_iova_get_vfio_pfn(struct vfio_dma *dma,
229 unsigned long iova)
230{
231 struct vfio_pfn *vpfn = vfio_find_vpfn(dma, iova);
232
233 if (vpfn)
234 atomic_inc(&vpfn->ref_count);
235 return vpfn;
236}
237
238static int vfio_iova_put_vfio_pfn(struct vfio_dma *dma, struct vfio_pfn *vpfn)
239{
240 int ret = 0;
241
242 if (atomic_dec_and_test(&vpfn->ref_count)) {
243 ret = put_pfn(vpfn->pfn, dma->prot);
244 vfio_remove_from_pfn_list(dma, vpfn);
245 }
246 return ret;
247}
248
249static int vfio_lock_acct(struct task_struct *task, long npage, bool *lock_cap)
250{
251 struct mm_struct *mm;
252 bool is_current;
253 int ret;
254
255 if (!npage)
256 return 0;
257
258 is_current = (task->mm == current->mm);
259
260 mm = is_current ? task->mm : get_task_mm(task);
261 if (!mm)
262 return -ESRCH;
263
264 ret = down_write_killable(&mm->mmap_sem);
265 if (!ret) {
266 if (npage > 0) {
267 if (lock_cap ? !*lock_cap :
268 !has_capability(task, CAP_IPC_LOCK)) {
269 unsigned long limit;
270
271 limit = task_rlimit(task,
272 RLIMIT_MEMLOCK) >> PAGE_SHIFT;
273
274 if (mm->locked_vm + npage > limit)
275 ret = -ENOMEM;
276 }
277 }
278
279 if (!ret)
280 mm->locked_vm += npage;
281
282 up_write(&mm->mmap_sem);
283 }
284
285 if (!is_current)
286 mmput(mm);
287
288 return ret;
289}
290
291
292
293
294
295
296static bool is_invalid_reserved_pfn(unsigned long pfn)
297{
298 if (pfn_valid(pfn)) {
299 bool reserved;
300 struct page *tail = pfn_to_page(pfn);
301 struct page *head = compound_head(tail);
302 reserved = !!(PageReserved(head));
303 if (head != tail) {
304
305
306
307
308
309
310
311
312
313
314 smp_rmb();
315 if (PageTail(tail))
316 return reserved;
317 }
318 return PageReserved(tail);
319 }
320
321 return true;
322}
323
324static int put_pfn(unsigned long pfn, int prot)
325{
326 if (!is_invalid_reserved_pfn(pfn)) {
327 struct page *page = pfn_to_page(pfn);
328 if (prot & IOMMU_WRITE)
329 SetPageDirty(page);
330 put_page(page);
331 return 1;
332 }
333 return 0;
334}
335
336static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
337 int prot, unsigned long *pfn)
338{
339 struct page *page[1];
340 struct vm_area_struct *vma;
341 int ret;
342
343 if (mm == current->mm) {
344 ret = get_user_pages_fast(vaddr, 1, !!(prot & IOMMU_WRITE),
345 page);
346 } else {
347 unsigned int flags = 0;
348
349 if (prot & IOMMU_WRITE)
350 flags |= FOLL_WRITE;
351
352 down_read(&mm->mmap_sem);
353 ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page,
354 NULL, NULL);
355 up_read(&mm->mmap_sem);
356 }
357
358 if (ret == 1) {
359 *pfn = page_to_pfn(page[0]);
360 return 0;
361 }
362
363 down_read(&mm->mmap_sem);
364
365 vma = find_vma_intersection(mm, vaddr, vaddr + 1);
366
367 if (vma && vma->vm_flags & VM_PFNMAP) {
368 *pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
369 if (is_invalid_reserved_pfn(*pfn))
370 ret = 0;
371 }
372
373 up_read(&mm->mmap_sem);
374 return ret;
375}
376
377
378
379
380
381
382static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
383 long npage, unsigned long *pfn_base,
384 bool lock_cap, unsigned long limit)
385{
386 unsigned long pfn = 0;
387 long ret, pinned = 0, lock_acct = 0;
388 bool rsvd;
389 dma_addr_t iova = vaddr - dma->vaddr + dma->iova;
390
391
392 if (!current->mm)
393 return -ENODEV;
394
395 ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, pfn_base);
396 if (ret)
397 return ret;
398
399 pinned++;
400 rsvd = is_invalid_reserved_pfn(*pfn_base);
401
402
403
404
405
406 if (!rsvd && !vfio_find_vpfn(dma, iova)) {
407 if (!lock_cap && current->mm->locked_vm + 1 > limit) {
408 put_pfn(*pfn_base, dma->prot);
409 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__,
410 limit << PAGE_SHIFT);
411 return -ENOMEM;
412 }
413 lock_acct++;
414 }
415
416 if (unlikely(disable_hugepages))
417 goto out;
418
419
420 for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; pinned < npage;
421 pinned++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) {
422 ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, &pfn);
423 if (ret)
424 break;
425
426 if (pfn != *pfn_base + pinned ||
427 rsvd != is_invalid_reserved_pfn(pfn)) {
428 put_pfn(pfn, dma->prot);
429 break;
430 }
431
432 if (!rsvd && !vfio_find_vpfn(dma, iova)) {
433 if (!lock_cap &&
434 current->mm->locked_vm + lock_acct + 1 > limit) {
435 put_pfn(pfn, dma->prot);
436 pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
437 __func__, limit << PAGE_SHIFT);
438 ret = -ENOMEM;
439 goto unpin_out;
440 }
441 lock_acct++;
442 }
443 }
444
445out:
446 ret = vfio_lock_acct(current, lock_acct, &lock_cap);
447
448unpin_out:
449 if (ret) {
450 if (!rsvd) {
451 for (pfn = *pfn_base ; pinned ; pfn++, pinned--)
452 put_pfn(pfn, dma->prot);
453 }
454
455 return ret;
456 }
457
458 return pinned;
459}
460
461static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova,
462 unsigned long pfn, long npage,
463 bool do_accounting)
464{
465 long unlocked = 0, locked = 0;
466 long i;
467
468 for (i = 0; i < npage; i++, iova += PAGE_SIZE) {
469 if (put_pfn(pfn++, dma->prot)) {
470 unlocked++;
471 if (vfio_find_vpfn(dma, iova))
472 locked++;
473 }
474 }
475
476 if (do_accounting)
477 vfio_lock_acct(dma->task, locked - unlocked, NULL);
478
479 return unlocked;
480}
481
482static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
483 unsigned long *pfn_base, bool do_accounting)
484{
485 struct mm_struct *mm;
486 int ret;
487
488 mm = get_task_mm(dma->task);
489 if (!mm)
490 return -ENODEV;
491
492 ret = vaddr_get_pfn(mm, vaddr, dma->prot, pfn_base);
493 if (!ret && do_accounting && !is_invalid_reserved_pfn(*pfn_base)) {
494 ret = vfio_lock_acct(dma->task, 1, NULL);
495 if (ret) {
496 put_pfn(*pfn_base, dma->prot);
497 if (ret == -ENOMEM)
498 pr_warn("%s: Task %s (%d) RLIMIT_MEMLOCK "
499 "(%ld) exceeded\n", __func__,
500 dma->task->comm, task_pid_nr(dma->task),
501 task_rlimit(dma->task, RLIMIT_MEMLOCK));
502 }
503 }
504
505 mmput(mm);
506 return ret;
507}
508
509static int vfio_unpin_page_external(struct vfio_dma *dma, dma_addr_t iova,
510 bool do_accounting)
511{
512 int unlocked;
513 struct vfio_pfn *vpfn = vfio_find_vpfn(dma, iova);
514
515 if (!vpfn)
516 return 0;
517
518 unlocked = vfio_iova_put_vfio_pfn(dma, vpfn);
519
520 if (do_accounting)
521 vfio_lock_acct(dma->task, -unlocked, NULL);
522
523 return unlocked;
524}
525
526static int vfio_iommu_type1_pin_pages(void *iommu_data,
527 unsigned long *user_pfn,
528 int npage, int prot,
529 unsigned long *phys_pfn)
530{
531 struct vfio_iommu *iommu = iommu_data;
532 int i, j, ret;
533 unsigned long remote_vaddr;
534 struct vfio_dma *dma;
535 bool do_accounting;
536
537 if (!iommu || !user_pfn || !phys_pfn)
538 return -EINVAL;
539
540
541 if (!iommu->v2)
542 return -EACCES;
543
544 mutex_lock(&iommu->lock);
545
546
547 if ((!iommu->external_domain) || (!iommu->notifier.head)) {
548 ret = -EINVAL;
549 goto pin_done;
550 }
551
552
553
554
555
556
557 do_accounting = !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu);
558
559 for (i = 0; i < npage; i++) {
560 dma_addr_t iova;
561 struct vfio_pfn *vpfn;
562
563 iova = user_pfn[i] << PAGE_SHIFT;
564 dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
565 if (!dma) {
566 ret = -EINVAL;
567 goto pin_unwind;
568 }
569
570 if ((dma->prot & prot) != prot) {
571 ret = -EPERM;
572 goto pin_unwind;
573 }
574
575 vpfn = vfio_iova_get_vfio_pfn(dma, iova);
576 if (vpfn) {
577 phys_pfn[i] = vpfn->pfn;
578 continue;
579 }
580
581 remote_vaddr = dma->vaddr + iova - dma->iova;
582 ret = vfio_pin_page_external(dma, remote_vaddr, &phys_pfn[i],
583 do_accounting);
584 if (ret)
585 goto pin_unwind;
586
587 ret = vfio_add_to_pfn_list(dma, iova, phys_pfn[i]);
588 if (ret) {
589 vfio_unpin_page_external(dma, iova, do_accounting);
590 goto pin_unwind;
591 }
592 }
593
594 ret = i;
595 goto pin_done;
596
597pin_unwind:
598 phys_pfn[i] = 0;
599 for (j = 0; j < i; j++) {
600 dma_addr_t iova;
601
602 iova = user_pfn[j] << PAGE_SHIFT;
603 dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
604 vfio_unpin_page_external(dma, iova, do_accounting);
605 phys_pfn[j] = 0;
606 }
607pin_done:
608 mutex_unlock(&iommu->lock);
609 return ret;
610}
611
612static int vfio_iommu_type1_unpin_pages(void *iommu_data,
613 unsigned long *user_pfn,
614 int npage)
615{
616 struct vfio_iommu *iommu = iommu_data;
617 bool do_accounting;
618 int i;
619
620 if (!iommu || !user_pfn)
621 return -EINVAL;
622
623
624 if (!iommu->v2)
625 return -EACCES;
626
627 mutex_lock(&iommu->lock);
628
629 if (!iommu->external_domain) {
630 mutex_unlock(&iommu->lock);
631 return -EINVAL;
632 }
633
634 do_accounting = !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu);
635 for (i = 0; i < npage; i++) {
636 struct vfio_dma *dma;
637 dma_addr_t iova;
638
639 iova = user_pfn[i] << PAGE_SHIFT;
640 dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
641 if (!dma)
642 goto unpin_exit;
643 vfio_unpin_page_external(dma, iova, do_accounting);
644 }
645
646unpin_exit:
647 mutex_unlock(&iommu->lock);
648 return i > npage ? npage : (i > 0 ? i : -EINVAL);
649}
650
651static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
652 bool do_accounting)
653{
654 dma_addr_t iova = dma->iova, end = dma->iova + dma->size;
655 struct vfio_domain *domain, *d;
656 long unlocked = 0;
657
658 if (!dma->size)
659 return 0;
660
661 if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu))
662 return 0;
663
664
665
666
667
668
669
670
671 domain = d = list_first_entry(&iommu->domain_list,
672 struct vfio_domain, next);
673
674 list_for_each_entry_continue(d, &iommu->domain_list, next) {
675 iommu_unmap(d->domain, dma->iova, dma->size);
676 cond_resched();
677 }
678
679 while (iova < end) {
680 size_t unmapped, len;
681 phys_addr_t phys, next;
682
683 phys = iommu_iova_to_phys(domain->domain, iova);
684 if (WARN_ON(!phys)) {
685 iova += PAGE_SIZE;
686 continue;
687 }
688
689
690
691
692
693
694 for (len = PAGE_SIZE;
695 !domain->fgsp && iova + len < end; len += PAGE_SIZE) {
696 next = iommu_iova_to_phys(domain->domain, iova + len);
697 if (next != phys + len)
698 break;
699 }
700
701 unmapped = iommu_unmap(domain->domain, iova, len);
702 if (WARN_ON(!unmapped))
703 break;
704
705 unlocked += vfio_unpin_pages_remote(dma, iova,
706 phys >> PAGE_SHIFT,
707 unmapped >> PAGE_SHIFT,
708 false);
709 iova += unmapped;
710
711 cond_resched();
712 }
713
714 dma->iommu_mapped = false;
715 if (do_accounting) {
716 vfio_lock_acct(dma->task, -unlocked, NULL);
717 return 0;
718 }
719 return unlocked;
720}
721
722static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
723{
724 vfio_unmap_unpin(iommu, dma, true);
725 vfio_unlink_dma(iommu, dma);
726 put_task_struct(dma->task);
727 kfree(dma);
728}
729
730static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
731{
732 struct vfio_domain *domain;
733 unsigned long bitmap = ULONG_MAX;
734
735 mutex_lock(&iommu->lock);
736 list_for_each_entry(domain, &iommu->domain_list, next)
737 bitmap &= domain->domain->pgsize_bitmap;
738 mutex_unlock(&iommu->lock);
739
740
741
742
743
744
745
746
747
748 if (bitmap & ~PAGE_MASK) {
749 bitmap &= PAGE_MASK;
750 bitmap |= PAGE_SIZE;
751 }
752
753 return bitmap;
754}
755
756static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
757 struct vfio_iommu_type1_dma_unmap *unmap)
758{
759 uint64_t mask;
760 struct vfio_dma *dma, *dma_last = NULL;
761 size_t unmapped = 0;
762 int ret = 0, retries = 0;
763
764 mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1;
765
766 if (unmap->iova & mask)
767 return -EINVAL;
768 if (!unmap->size || unmap->size & mask)
769 return -EINVAL;
770 if (unmap->iova + unmap->size < unmap->iova ||
771 unmap->size > SIZE_MAX)
772 return -EINVAL;
773
774 WARN_ON(mask & PAGE_MASK);
775again:
776 mutex_lock(&iommu->lock);
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809 if (iommu->v2) {
810 dma = vfio_find_dma(iommu, unmap->iova, 1);
811 if (dma && dma->iova != unmap->iova) {
812 ret = -EINVAL;
813 goto unlock;
814 }
815 dma = vfio_find_dma(iommu, unmap->iova + unmap->size - 1, 0);
816 if (dma && dma->iova + dma->size != unmap->iova + unmap->size) {
817 ret = -EINVAL;
818 goto unlock;
819 }
820 }
821
822 while ((dma = vfio_find_dma(iommu, unmap->iova, unmap->size))) {
823 if (!iommu->v2 && unmap->iova > dma->iova)
824 break;
825
826
827
828
829 if (dma->task->mm != current->mm)
830 break;
831
832 if (!RB_EMPTY_ROOT(&dma->pfn_list)) {
833 struct vfio_iommu_type1_dma_unmap nb_unmap;
834
835 if (dma_last == dma) {
836 BUG_ON(++retries > 10);
837 } else {
838 dma_last = dma;
839 retries = 0;
840 }
841
842 nb_unmap.iova = dma->iova;
843 nb_unmap.size = dma->size;
844
845
846
847
848
849
850
851 mutex_unlock(&iommu->lock);
852 blocking_notifier_call_chain(&iommu->notifier,
853 VFIO_IOMMU_NOTIFY_DMA_UNMAP,
854 &nb_unmap);
855 goto again;
856 }
857 unmapped += dma->size;
858 vfio_remove_dma(iommu, dma);
859 }
860
861unlock:
862 mutex_unlock(&iommu->lock);
863
864
865 unmap->size = unmapped;
866
867 return ret;
868}
869
870
871
872
873
874
875
876static int map_try_harder(struct vfio_domain *domain, dma_addr_t iova,
877 unsigned long pfn, long npage, int prot)
878{
879 long i;
880 int ret = 0;
881
882 for (i = 0; i < npage; i++, pfn++, iova += PAGE_SIZE) {
883 ret = iommu_map(domain->domain, iova,
884 (phys_addr_t)pfn << PAGE_SHIFT,
885 PAGE_SIZE, prot | domain->prot);
886 if (ret)
887 break;
888 }
889
890 for (; i < npage && i > 0; i--, iova -= PAGE_SIZE)
891 iommu_unmap(domain->domain, iova, PAGE_SIZE);
892
893 return ret;
894}
895
896static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova,
897 unsigned long pfn, long npage, int prot)
898{
899 struct vfio_domain *d;
900 int ret;
901
902 list_for_each_entry(d, &iommu->domain_list, next) {
903 ret = iommu_map(d->domain, iova, (phys_addr_t)pfn << PAGE_SHIFT,
904 npage << PAGE_SHIFT, prot | d->prot);
905 if (ret) {
906 if (ret != -EBUSY ||
907 map_try_harder(d, iova, pfn, npage, prot))
908 goto unwind;
909 }
910
911 cond_resched();
912 }
913
914 return 0;
915
916unwind:
917 list_for_each_entry_continue_reverse(d, &iommu->domain_list, next)
918 iommu_unmap(d->domain, iova, npage << PAGE_SHIFT);
919
920 return ret;
921}
922
923static int vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma,
924 size_t map_size)
925{
926 dma_addr_t iova = dma->iova;
927 unsigned long vaddr = dma->vaddr;
928 size_t size = map_size;
929 long npage;
930 unsigned long pfn, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
931 bool lock_cap = capable(CAP_IPC_LOCK);
932 int ret = 0;
933
934 while (size) {
935
936 npage = vfio_pin_pages_remote(dma, vaddr + dma->size,
937 size >> PAGE_SHIFT, &pfn,
938 lock_cap, limit);
939 if (npage <= 0) {
940 WARN_ON(!npage);
941 ret = (int)npage;
942 break;
943 }
944
945
946 ret = vfio_iommu_map(iommu, iova + dma->size, pfn, npage,
947 dma->prot);
948 if (ret) {
949 vfio_unpin_pages_remote(dma, iova + dma->size, pfn,
950 npage, true);
951 break;
952 }
953
954 size -= npage << PAGE_SHIFT;
955 dma->size += npage << PAGE_SHIFT;
956 }
957
958 dma->iommu_mapped = true;
959
960 if (ret)
961 vfio_remove_dma(iommu, dma);
962
963 return ret;
964}
965
966static int vfio_dma_do_map(struct vfio_iommu *iommu,
967 struct vfio_iommu_type1_dma_map *map)
968{
969 dma_addr_t iova = map->iova;
970 unsigned long vaddr = map->vaddr;
971 size_t size = map->size;
972 int ret = 0, prot = 0;
973 uint64_t mask;
974 struct vfio_dma *dma;
975
976
977 if (map->size != size || map->vaddr != vaddr || map->iova != iova)
978 return -EINVAL;
979
980 mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1;
981
982 WARN_ON(mask & PAGE_MASK);
983
984
985 if (map->flags & VFIO_DMA_MAP_FLAG_WRITE)
986 prot |= IOMMU_WRITE;
987 if (map->flags & VFIO_DMA_MAP_FLAG_READ)
988 prot |= IOMMU_READ;
989
990 if (!prot || !size || (size | iova | vaddr) & mask)
991 return -EINVAL;
992
993
994 if (iova + size - 1 < iova || vaddr + size - 1 < vaddr)
995 return -EINVAL;
996
997 mutex_lock(&iommu->lock);
998
999 if (vfio_find_dma(iommu, iova, size)) {
1000 ret = -EEXIST;
1001 goto out_unlock;
1002 }
1003
1004 dma = kzalloc(sizeof(*dma), GFP_KERNEL);
1005 if (!dma) {
1006 ret = -ENOMEM;
1007 goto out_unlock;
1008 }
1009
1010 dma->iova = iova;
1011 dma->vaddr = vaddr;
1012 dma->prot = prot;
1013 get_task_struct(current);
1014 dma->task = current;
1015 dma->pfn_list = RB_ROOT;
1016
1017
1018 vfio_link_dma(iommu, dma);
1019
1020
1021 if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu))
1022 dma->size = size;
1023 else
1024 ret = vfio_pin_map_dma(iommu, dma, size);
1025
1026out_unlock:
1027 mutex_unlock(&iommu->lock);
1028 return ret;
1029}
1030
1031static int vfio_bus_type(struct device *dev, void *data)
1032{
1033 struct bus_type **bus = data;
1034
1035 if (*bus && *bus != dev->bus)
1036 return -EINVAL;
1037
1038 *bus = dev->bus;
1039
1040 return 0;
1041}
1042
1043static int vfio_iommu_replay(struct vfio_iommu *iommu,
1044 struct vfio_domain *domain)
1045{
1046 struct vfio_domain *d;
1047 struct rb_node *n;
1048 unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
1049 bool lock_cap = capable(CAP_IPC_LOCK);
1050 int ret;
1051
1052
1053 d = list_first_entry(&iommu->domain_list, struct vfio_domain, next);
1054 n = rb_first(&iommu->dma_list);
1055
1056 for (; n; n = rb_next(n)) {
1057 struct vfio_dma *dma;
1058 dma_addr_t iova;
1059
1060 dma = rb_entry(n, struct vfio_dma, node);
1061 iova = dma->iova;
1062
1063 while (iova < dma->iova + dma->size) {
1064 phys_addr_t phys;
1065 size_t size;
1066
1067 if (dma->iommu_mapped) {
1068 phys_addr_t p;
1069 dma_addr_t i;
1070
1071 phys = iommu_iova_to_phys(d->domain, iova);
1072
1073 if (WARN_ON(!phys)) {
1074 iova += PAGE_SIZE;
1075 continue;
1076 }
1077
1078 size = PAGE_SIZE;
1079 p = phys + size;
1080 i = iova + size;
1081 while (i < dma->iova + dma->size &&
1082 p == iommu_iova_to_phys(d->domain, i)) {
1083 size += PAGE_SIZE;
1084 p += PAGE_SIZE;
1085 i += PAGE_SIZE;
1086 }
1087 } else {
1088 unsigned long pfn;
1089 unsigned long vaddr = dma->vaddr +
1090 (iova - dma->iova);
1091 size_t n = dma->iova + dma->size - iova;
1092 long npage;
1093
1094 npage = vfio_pin_pages_remote(dma, vaddr,
1095 n >> PAGE_SHIFT,
1096 &pfn, lock_cap,
1097 limit);
1098 if (npage <= 0) {
1099 WARN_ON(!npage);
1100 ret = (int)npage;
1101 return ret;
1102 }
1103
1104 phys = pfn << PAGE_SHIFT;
1105 size = npage << PAGE_SHIFT;
1106 }
1107
1108 ret = iommu_map(domain->domain, iova, phys,
1109 size, dma->prot | domain->prot);
1110 if (ret)
1111 return ret;
1112
1113 iova += size;
1114 }
1115 dma->iommu_mapped = true;
1116 }
1117 return 0;
1118}
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130static void vfio_test_domain_fgsp(struct vfio_domain *domain)
1131{
1132 struct page *pages;
1133 int ret, order = get_order(PAGE_SIZE * 2);
1134
1135 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
1136 if (!pages)
1137 return;
1138
1139 ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2,
1140 IOMMU_READ | IOMMU_WRITE | domain->prot);
1141 if (!ret) {
1142 size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE);
1143
1144 if (unmapped == PAGE_SIZE)
1145 iommu_unmap(domain->domain, PAGE_SIZE, PAGE_SIZE);
1146 else
1147 domain->fgsp = true;
1148 }
1149
1150 __free_pages(pages, order);
1151}
1152
1153static struct vfio_group *find_iommu_group(struct vfio_domain *domain,
1154 struct iommu_group *iommu_group)
1155{
1156 struct vfio_group *g;
1157
1158 list_for_each_entry(g, &domain->group_list, next) {
1159 if (g->iommu_group == iommu_group)
1160 return g;
1161 }
1162
1163 return NULL;
1164}
1165
1166static bool vfio_iommu_has_sw_msi(struct iommu_group *group, phys_addr_t *base)
1167{
1168 struct list_head group_resv_regions;
1169 struct iommu_resv_region *region, *next;
1170 bool ret = false;
1171
1172 INIT_LIST_HEAD(&group_resv_regions);
1173 iommu_get_group_resv_regions(group, &group_resv_regions);
1174 list_for_each_entry(region, &group_resv_regions, list) {
1175
1176
1177
1178
1179
1180 if (region->type == IOMMU_RESV_MSI) {
1181 ret = false;
1182 break;
1183 }
1184
1185 if (region->type == IOMMU_RESV_SW_MSI) {
1186 *base = region->start;
1187 ret = true;
1188 }
1189 }
1190 list_for_each_entry_safe(region, next, &group_resv_regions, list)
1191 kfree(region);
1192 return ret;
1193}
1194
1195static int vfio_iommu_type1_attach_group(void *iommu_data,
1196 struct iommu_group *iommu_group)
1197{
1198 struct vfio_iommu *iommu = iommu_data;
1199 struct vfio_group *group;
1200 struct vfio_domain *domain, *d;
1201 struct bus_type *bus = NULL, *mdev_bus;
1202 int ret;
1203 bool resv_msi, msi_remap;
1204 phys_addr_t resv_msi_base;
1205
1206 mutex_lock(&iommu->lock);
1207
1208 list_for_each_entry(d, &iommu->domain_list, next) {
1209 if (find_iommu_group(d, iommu_group)) {
1210 mutex_unlock(&iommu->lock);
1211 return -EINVAL;
1212 }
1213 }
1214
1215 if (iommu->external_domain) {
1216 if (find_iommu_group(iommu->external_domain, iommu_group)) {
1217 mutex_unlock(&iommu->lock);
1218 return -EINVAL;
1219 }
1220 }
1221
1222 group = kzalloc(sizeof(*group), GFP_KERNEL);
1223 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
1224 if (!group || !domain) {
1225 ret = -ENOMEM;
1226 goto out_free;
1227 }
1228
1229 group->iommu_group = iommu_group;
1230
1231
1232 ret = iommu_group_for_each_dev(iommu_group, &bus, vfio_bus_type);
1233 if (ret)
1234 goto out_free;
1235
1236 mdev_bus = symbol_get(mdev_bus_type);
1237
1238 if (mdev_bus) {
1239 if ((bus == mdev_bus) && !iommu_present(bus)) {
1240 symbol_put(mdev_bus_type);
1241 if (!iommu->external_domain) {
1242 INIT_LIST_HEAD(&domain->group_list);
1243 iommu->external_domain = domain;
1244 } else
1245 kfree(domain);
1246
1247 list_add(&group->next,
1248 &iommu->external_domain->group_list);
1249 mutex_unlock(&iommu->lock);
1250 return 0;
1251 }
1252 symbol_put(mdev_bus_type);
1253 }
1254
1255 domain->domain = iommu_domain_alloc(bus);
1256 if (!domain->domain) {
1257 ret = -EIO;
1258 goto out_free;
1259 }
1260
1261 if (iommu->nesting) {
1262 int attr = 1;
1263
1264 ret = iommu_domain_set_attr(domain->domain, DOMAIN_ATTR_NESTING,
1265 &attr);
1266 if (ret)
1267 goto out_domain;
1268 }
1269
1270 ret = iommu_attach_group(domain->domain, iommu_group);
1271 if (ret)
1272 goto out_domain;
1273
1274 resv_msi = vfio_iommu_has_sw_msi(iommu_group, &resv_msi_base);
1275
1276 INIT_LIST_HEAD(&domain->group_list);
1277 list_add(&group->next, &domain->group_list);
1278
1279 msi_remap = irq_domain_check_msi_remap() ||
1280 iommu_capable(bus, IOMMU_CAP_INTR_REMAP);
1281
1282 if (!allow_unsafe_interrupts && !msi_remap) {
1283 pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n",
1284 __func__);
1285 ret = -EPERM;
1286 goto out_detach;
1287 }
1288
1289 if (iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
1290 domain->prot |= IOMMU_CACHE;
1291
1292
1293
1294
1295
1296
1297
1298
1299 list_for_each_entry(d, &iommu->domain_list, next) {
1300 if (d->domain->ops == domain->domain->ops &&
1301 d->prot == domain->prot) {
1302 iommu_detach_group(domain->domain, iommu_group);
1303 if (!iommu_attach_group(d->domain, iommu_group)) {
1304 list_add(&group->next, &d->group_list);
1305 iommu_domain_free(domain->domain);
1306 kfree(domain);
1307 mutex_unlock(&iommu->lock);
1308 return 0;
1309 }
1310
1311 ret = iommu_attach_group(domain->domain, iommu_group);
1312 if (ret)
1313 goto out_domain;
1314 }
1315 }
1316
1317 vfio_test_domain_fgsp(domain);
1318
1319
1320 ret = vfio_iommu_replay(iommu, domain);
1321 if (ret)
1322 goto out_detach;
1323
1324 if (resv_msi) {
1325 ret = iommu_get_msi_cookie(domain->domain, resv_msi_base);
1326 if (ret)
1327 goto out_detach;
1328 }
1329
1330 list_add(&domain->next, &iommu->domain_list);
1331
1332 mutex_unlock(&iommu->lock);
1333
1334 return 0;
1335
1336out_detach:
1337 iommu_detach_group(domain->domain, iommu_group);
1338out_domain:
1339 iommu_domain_free(domain->domain);
1340out_free:
1341 kfree(domain);
1342 kfree(group);
1343 mutex_unlock(&iommu->lock);
1344 return ret;
1345}
1346
1347static void vfio_iommu_unmap_unpin_all(struct vfio_iommu *iommu)
1348{
1349 struct rb_node *node;
1350
1351 while ((node = rb_first(&iommu->dma_list)))
1352 vfio_remove_dma(iommu, rb_entry(node, struct vfio_dma, node));
1353}
1354
1355static void vfio_iommu_unmap_unpin_reaccount(struct vfio_iommu *iommu)
1356{
1357 struct rb_node *n, *p;
1358
1359 n = rb_first(&iommu->dma_list);
1360 for (; n; n = rb_next(n)) {
1361 struct vfio_dma *dma;
1362 long locked = 0, unlocked = 0;
1363
1364 dma = rb_entry(n, struct vfio_dma, node);
1365 unlocked += vfio_unmap_unpin(iommu, dma, false);
1366 p = rb_first(&dma->pfn_list);
1367 for (; p; p = rb_next(p)) {
1368 struct vfio_pfn *vpfn = rb_entry(p, struct vfio_pfn,
1369 node);
1370
1371 if (!is_invalid_reserved_pfn(vpfn->pfn))
1372 locked++;
1373 }
1374 vfio_lock_acct(dma->task, locked - unlocked, NULL);
1375 }
1376}
1377
1378static void vfio_sanity_check_pfn_list(struct vfio_iommu *iommu)
1379{
1380 struct rb_node *n;
1381
1382 n = rb_first(&iommu->dma_list);
1383 for (; n; n = rb_next(n)) {
1384 struct vfio_dma *dma;
1385
1386 dma = rb_entry(n, struct vfio_dma, node);
1387
1388 if (WARN_ON(!RB_EMPTY_ROOT(&dma->pfn_list)))
1389 break;
1390 }
1391
1392 WARN_ON(iommu->notifier.head);
1393}
1394
1395static void vfio_iommu_type1_detach_group(void *iommu_data,
1396 struct iommu_group *iommu_group)
1397{
1398 struct vfio_iommu *iommu = iommu_data;
1399 struct vfio_domain *domain;
1400 struct vfio_group *group;
1401
1402 mutex_lock(&iommu->lock);
1403
1404 if (iommu->external_domain) {
1405 group = find_iommu_group(iommu->external_domain, iommu_group);
1406 if (group) {
1407 list_del(&group->next);
1408 kfree(group);
1409
1410 if (list_empty(&iommu->external_domain->group_list)) {
1411 vfio_sanity_check_pfn_list(iommu);
1412
1413 if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu))
1414 vfio_iommu_unmap_unpin_all(iommu);
1415
1416 kfree(iommu->external_domain);
1417 iommu->external_domain = NULL;
1418 }
1419 goto detach_group_done;
1420 }
1421 }
1422
1423 list_for_each_entry(domain, &iommu->domain_list, next) {
1424 group = find_iommu_group(domain, iommu_group);
1425 if (!group)
1426 continue;
1427
1428 iommu_detach_group(domain->domain, iommu_group);
1429 list_del(&group->next);
1430 kfree(group);
1431
1432
1433
1434
1435
1436
1437
1438 if (list_empty(&domain->group_list)) {
1439 if (list_is_singular(&iommu->domain_list)) {
1440 if (!iommu->external_domain)
1441 vfio_iommu_unmap_unpin_all(iommu);
1442 else
1443 vfio_iommu_unmap_unpin_reaccount(iommu);
1444 }
1445 iommu_domain_free(domain->domain);
1446 list_del(&domain->next);
1447 kfree(domain);
1448 }
1449 break;
1450 }
1451
1452detach_group_done:
1453 mutex_unlock(&iommu->lock);
1454}
1455
1456static void *vfio_iommu_type1_open(unsigned long arg)
1457{
1458 struct vfio_iommu *iommu;
1459
1460 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
1461 if (!iommu)
1462 return ERR_PTR(-ENOMEM);
1463
1464 switch (arg) {
1465 case VFIO_TYPE1_IOMMU:
1466 break;
1467 case VFIO_TYPE1_NESTING_IOMMU:
1468 iommu->nesting = true;
1469 case VFIO_TYPE1v2_IOMMU:
1470 iommu->v2 = true;
1471 break;
1472 default:
1473 kfree(iommu);
1474 return ERR_PTR(-EINVAL);
1475 }
1476
1477 INIT_LIST_HEAD(&iommu->domain_list);
1478 iommu->dma_list = RB_ROOT;
1479 mutex_init(&iommu->lock);
1480 BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier);
1481
1482 return iommu;
1483}
1484
1485static void vfio_release_domain(struct vfio_domain *domain, bool external)
1486{
1487 struct vfio_group *group, *group_tmp;
1488
1489 list_for_each_entry_safe(group, group_tmp,
1490 &domain->group_list, next) {
1491 if (!external)
1492 iommu_detach_group(domain->domain, group->iommu_group);
1493 list_del(&group->next);
1494 kfree(group);
1495 }
1496
1497 if (!external)
1498 iommu_domain_free(domain->domain);
1499}
1500
1501static void vfio_iommu_type1_release(void *iommu_data)
1502{
1503 struct vfio_iommu *iommu = iommu_data;
1504 struct vfio_domain *domain, *domain_tmp;
1505
1506 if (iommu->external_domain) {
1507 vfio_release_domain(iommu->external_domain, true);
1508 vfio_sanity_check_pfn_list(iommu);
1509 kfree(iommu->external_domain);
1510 }
1511
1512 vfio_iommu_unmap_unpin_all(iommu);
1513
1514 list_for_each_entry_safe(domain, domain_tmp,
1515 &iommu->domain_list, next) {
1516 vfio_release_domain(domain, false);
1517 list_del(&domain->next);
1518 kfree(domain);
1519 }
1520 kfree(iommu);
1521}
1522
1523static int vfio_domains_have_iommu_cache(struct vfio_iommu *iommu)
1524{
1525 struct vfio_domain *domain;
1526 int ret = 1;
1527
1528 mutex_lock(&iommu->lock);
1529 list_for_each_entry(domain, &iommu->domain_list, next) {
1530 if (!(domain->prot & IOMMU_CACHE)) {
1531 ret = 0;
1532 break;
1533 }
1534 }
1535 mutex_unlock(&iommu->lock);
1536
1537 return ret;
1538}
1539
1540static long vfio_iommu_type1_ioctl(void *iommu_data,
1541 unsigned int cmd, unsigned long arg)
1542{
1543 struct vfio_iommu *iommu = iommu_data;
1544 unsigned long minsz;
1545
1546 if (cmd == VFIO_CHECK_EXTENSION) {
1547 switch (arg) {
1548 case VFIO_TYPE1_IOMMU:
1549 case VFIO_TYPE1v2_IOMMU:
1550 case VFIO_TYPE1_NESTING_IOMMU:
1551 return 1;
1552 case VFIO_DMA_CC_IOMMU:
1553 if (!iommu)
1554 return 0;
1555 return vfio_domains_have_iommu_cache(iommu);
1556 default:
1557 return 0;
1558 }
1559 } else if (cmd == VFIO_IOMMU_GET_INFO) {
1560 struct vfio_iommu_type1_info info;
1561
1562 minsz = offsetofend(struct vfio_iommu_type1_info, iova_pgsizes);
1563
1564 if (copy_from_user(&info, (void __user *)arg, minsz))
1565 return -EFAULT;
1566
1567 if (info.argsz < minsz)
1568 return -EINVAL;
1569
1570 info.flags = VFIO_IOMMU_INFO_PGSIZES;
1571
1572 info.iova_pgsizes = vfio_pgsize_bitmap(iommu);
1573
1574 return copy_to_user((void __user *)arg, &info, minsz) ?
1575 -EFAULT : 0;
1576
1577 } else if (cmd == VFIO_IOMMU_MAP_DMA) {
1578 struct vfio_iommu_type1_dma_map map;
1579 uint32_t mask = VFIO_DMA_MAP_FLAG_READ |
1580 VFIO_DMA_MAP_FLAG_WRITE;
1581
1582 minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
1583
1584 if (copy_from_user(&map, (void __user *)arg, minsz))
1585 return -EFAULT;
1586
1587 if (map.argsz < minsz || map.flags & ~mask)
1588 return -EINVAL;
1589
1590 return vfio_dma_do_map(iommu, &map);
1591
1592 } else if (cmd == VFIO_IOMMU_UNMAP_DMA) {
1593 struct vfio_iommu_type1_dma_unmap unmap;
1594 long ret;
1595
1596 minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size);
1597
1598 if (copy_from_user(&unmap, (void __user *)arg, minsz))
1599 return -EFAULT;
1600
1601 if (unmap.argsz < minsz || unmap.flags)
1602 return -EINVAL;
1603
1604 ret = vfio_dma_do_unmap(iommu, &unmap);
1605 if (ret)
1606 return ret;
1607
1608 return copy_to_user((void __user *)arg, &unmap, minsz) ?
1609 -EFAULT : 0;
1610 }
1611
1612 return -ENOTTY;
1613}
1614
1615static int vfio_iommu_type1_register_notifier(void *iommu_data,
1616 unsigned long *events,
1617 struct notifier_block *nb)
1618{
1619 struct vfio_iommu *iommu = iommu_data;
1620
1621
1622 *events &= ~VFIO_IOMMU_NOTIFY_DMA_UNMAP;
1623
1624
1625 if (*events)
1626 return -EINVAL;
1627
1628 return blocking_notifier_chain_register(&iommu->notifier, nb);
1629}
1630
1631static int vfio_iommu_type1_unregister_notifier(void *iommu_data,
1632 struct notifier_block *nb)
1633{
1634 struct vfio_iommu *iommu = iommu_data;
1635
1636 return blocking_notifier_chain_unregister(&iommu->notifier, nb);
1637}
1638
1639static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = {
1640 .name = "vfio-iommu-type1",
1641 .owner = THIS_MODULE,
1642 .open = vfio_iommu_type1_open,
1643 .release = vfio_iommu_type1_release,
1644 .ioctl = vfio_iommu_type1_ioctl,
1645 .attach_group = vfio_iommu_type1_attach_group,
1646 .detach_group = vfio_iommu_type1_detach_group,
1647 .pin_pages = vfio_iommu_type1_pin_pages,
1648 .unpin_pages = vfio_iommu_type1_unpin_pages,
1649 .register_notifier = vfio_iommu_type1_register_notifier,
1650 .unregister_notifier = vfio_iommu_type1_unregister_notifier,
1651};
1652
1653static int __init vfio_iommu_type1_init(void)
1654{
1655 return vfio_register_iommu_driver(&vfio_iommu_driver_ops_type1);
1656}
1657
1658static void __exit vfio_iommu_type1_cleanup(void)
1659{
1660 vfio_unregister_iommu_driver(&vfio_iommu_driver_ops_type1);
1661}
1662
1663module_init(vfio_iommu_type1_init);
1664module_exit(vfio_iommu_type1_cleanup);
1665
1666MODULE_VERSION(DRIVER_VERSION);
1667MODULE_LICENSE("GPL v2");
1668MODULE_AUTHOR(DRIVER_AUTHOR);
1669MODULE_DESCRIPTION(DRIVER_DESC);
1670