1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/cdev.h>
17#include <linux/compat.h>
18#include <linux/device.h>
19#include <linux/file.h>
20#include <linux/anon_inodes.h>
21#include <linux/fs.h>
22#include <linux/idr.h>
23#include <linux/iommu.h>
24#include <linux/list.h>
25#include <linux/miscdevice.h>
26#include <linux/module.h>
27#include <linux/mutex.h>
28#include <linux/pci.h>
29#include <linux/rwsem.h>
30#include <linux/sched.h>
31#include <linux/slab.h>
32#include <linux/stat.h>
33#include <linux/string.h>
34#include <linux/uaccess.h>
35#include <linux/vfio.h>
36#include <linux/wait.h>
37
38#define DRIVER_VERSION "0.3"
39#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
40#define DRIVER_DESC "VFIO - User Level meta-driver"
41
42static struct vfio {
43 struct class *class;
44 struct list_head iommu_drivers_list;
45 struct mutex iommu_drivers_lock;
46 struct list_head group_list;
47 struct idr group_idr;
48 struct mutex group_lock;
49 struct cdev group_cdev;
50 dev_t group_devt;
51 wait_queue_head_t release_q;
52} vfio;
53
54struct vfio_iommu_driver {
55 const struct vfio_iommu_driver_ops *ops;
56 struct list_head vfio_next;
57};
58
59struct vfio_container {
60 struct kref kref;
61 struct list_head group_list;
62 struct rw_semaphore group_lock;
63 struct vfio_iommu_driver *iommu_driver;
64 void *iommu_data;
65 bool noiommu;
66};
67
68struct vfio_unbound_dev {
69 struct device *dev;
70 struct list_head unbound_next;
71};
72
73struct vfio_group {
74 struct kref kref;
75 int minor;
76 atomic_t container_users;
77 struct iommu_group *iommu_group;
78 struct vfio_container *container;
79 struct list_head device_list;
80 struct mutex device_lock;
81 struct device *dev;
82 struct notifier_block nb;
83 struct list_head vfio_next;
84 struct list_head container_next;
85 struct list_head unbound_list;
86 struct mutex unbound_lock;
87 atomic_t opened;
88 bool noiommu;
89};
90
91struct vfio_device {
92 struct kref kref;
93 struct device *dev;
94 const struct vfio_device_ops *ops;
95 struct vfio_group *group;
96 struct list_head group_next;
97 void *device_data;
98};
99
100#ifdef CONFIG_VFIO_NOIOMMU
101static bool noiommu __read_mostly;
102module_param_named(enable_unsafe_noiommu_mode,
103 noiommu, bool, S_IRUGO | S_IWUSR);
104MODULE_PARM_DESC(enable_unsafe_noiommu_mode, "Enable UNSAFE, no-IOMMU mode. This mode provides no device isolation, no DMA translation, no host kernel protection, cannot be used for device assignment to virtual machines, requires RAWIO permissions, and will taint the kernel. If you do not know what this is for, step away. (default: false)");
105#endif
106
107
108
109
110
111
112
113
114
115struct iommu_group *vfio_iommu_group_get(struct device *dev)
116{
117 struct iommu_group *group;
118 int __maybe_unused ret;
119
120 group = iommu_group_get(dev);
121
122#ifdef CONFIG_VFIO_NOIOMMU
123
124
125
126
127
128
129 if (group || !noiommu || iommu_present(dev->bus))
130 return group;
131
132 group = iommu_group_alloc();
133 if (IS_ERR(group))
134 return NULL;
135
136 iommu_group_set_name(group, "vfio-noiommu");
137 iommu_group_set_iommudata(group, &noiommu, NULL);
138 ret = iommu_group_add_device(group, dev);
139 iommu_group_put(group);
140 if (ret)
141 return NULL;
142
143
144
145
146
147
148
149
150
151 add_taint(TAINT_USER, LOCKDEP_STILL_OK);
152 dev_warn(dev, "Adding kernel taint for vfio-noiommu group on device\n");
153#endif
154
155 return group;
156}
157EXPORT_SYMBOL_GPL(vfio_iommu_group_get);
158
159void vfio_iommu_group_put(struct iommu_group *group, struct device *dev)
160{
161#ifdef CONFIG_VFIO_NOIOMMU
162 if (iommu_group_get_iommudata(group) == &noiommu)
163 iommu_group_remove_device(dev);
164#endif
165
166 iommu_group_put(group);
167}
168EXPORT_SYMBOL_GPL(vfio_iommu_group_put);
169
170#ifdef CONFIG_VFIO_NOIOMMU
171static void *vfio_noiommu_open(unsigned long arg)
172{
173 if (arg != VFIO_NOIOMMU_IOMMU)
174 return ERR_PTR(-EINVAL);
175 if (!capable(CAP_SYS_RAWIO))
176 return ERR_PTR(-EPERM);
177
178 return NULL;
179}
180
181static void vfio_noiommu_release(void *iommu_data)
182{
183}
184
185static long vfio_noiommu_ioctl(void *iommu_data,
186 unsigned int cmd, unsigned long arg)
187{
188 if (cmd == VFIO_CHECK_EXTENSION)
189 return noiommu && (arg == VFIO_NOIOMMU_IOMMU) ? 1 : 0;
190
191 return -ENOTTY;
192}
193
194static int vfio_noiommu_attach_group(void *iommu_data,
195 struct iommu_group *iommu_group)
196{
197 return iommu_group_get_iommudata(iommu_group) == &noiommu ? 0 : -EINVAL;
198}
199
200static void vfio_noiommu_detach_group(void *iommu_data,
201 struct iommu_group *iommu_group)
202{
203}
204
205static const struct vfio_iommu_driver_ops vfio_noiommu_ops = {
206 .name = "vfio-noiommu",
207 .owner = THIS_MODULE,
208 .open = vfio_noiommu_open,
209 .release = vfio_noiommu_release,
210 .ioctl = vfio_noiommu_ioctl,
211 .attach_group = vfio_noiommu_attach_group,
212 .detach_group = vfio_noiommu_detach_group,
213};
214#endif
215
216
217
218
219
220int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops)
221{
222 struct vfio_iommu_driver *driver, *tmp;
223
224 driver = kzalloc(sizeof(*driver), GFP_KERNEL);
225 if (!driver)
226 return -ENOMEM;
227
228 driver->ops = ops;
229
230 mutex_lock(&vfio.iommu_drivers_lock);
231
232
233 list_for_each_entry(tmp, &vfio.iommu_drivers_list, vfio_next) {
234 if (tmp->ops == ops) {
235 mutex_unlock(&vfio.iommu_drivers_lock);
236 kfree(driver);
237 return -EINVAL;
238 }
239 }
240
241 list_add(&driver->vfio_next, &vfio.iommu_drivers_list);
242
243 mutex_unlock(&vfio.iommu_drivers_lock);
244
245 return 0;
246}
247EXPORT_SYMBOL_GPL(vfio_register_iommu_driver);
248
249void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops)
250{
251 struct vfio_iommu_driver *driver;
252
253 mutex_lock(&vfio.iommu_drivers_lock);
254 list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) {
255 if (driver->ops == ops) {
256 list_del(&driver->vfio_next);
257 mutex_unlock(&vfio.iommu_drivers_lock);
258 kfree(driver);
259 return;
260 }
261 }
262 mutex_unlock(&vfio.iommu_drivers_lock);
263}
264EXPORT_SYMBOL_GPL(vfio_unregister_iommu_driver);
265
266
267
268
269static int vfio_alloc_group_minor(struct vfio_group *group)
270{
271 return idr_alloc(&vfio.group_idr, group, 0, MINORMASK + 1, GFP_KERNEL);
272}
273
274static void vfio_free_group_minor(int minor)
275{
276 idr_remove(&vfio.group_idr, minor);
277}
278
279static int vfio_iommu_group_notifier(struct notifier_block *nb,
280 unsigned long action, void *data);
281static void vfio_group_get(struct vfio_group *group);
282
283
284
285
286
287
288
289static void vfio_container_get(struct vfio_container *container)
290{
291 kref_get(&container->kref);
292}
293
294static void vfio_container_release(struct kref *kref)
295{
296 struct vfio_container *container;
297 container = container_of(kref, struct vfio_container, kref);
298
299 kfree(container);
300}
301
302static void vfio_container_put(struct vfio_container *container)
303{
304 kref_put(&container->kref, vfio_container_release);
305}
306
307static void vfio_group_unlock_and_free(struct vfio_group *group)
308{
309 mutex_unlock(&vfio.group_lock);
310
311
312
313
314 iommu_group_unregister_notifier(group->iommu_group, &group->nb);
315 kfree(group);
316}
317
318
319
320
321static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
322{
323 struct vfio_group *group, *tmp;
324 struct device *dev;
325 int ret, minor;
326
327 group = kzalloc(sizeof(*group), GFP_KERNEL);
328 if (!group)
329 return ERR_PTR(-ENOMEM);
330
331 kref_init(&group->kref);
332 INIT_LIST_HEAD(&group->device_list);
333 mutex_init(&group->device_lock);
334 INIT_LIST_HEAD(&group->unbound_list);
335 mutex_init(&group->unbound_lock);
336 atomic_set(&group->container_users, 0);
337 atomic_set(&group->opened, 0);
338 group->iommu_group = iommu_group;
339#ifdef CONFIG_VFIO_NOIOMMU
340 group->noiommu = (iommu_group_get_iommudata(iommu_group) == &noiommu);
341#endif
342
343 group->nb.notifier_call = vfio_iommu_group_notifier;
344
345
346
347
348
349
350
351
352 ret = iommu_group_register_notifier(iommu_group, &group->nb);
353 if (ret) {
354 kfree(group);
355 return ERR_PTR(ret);
356 }
357
358 mutex_lock(&vfio.group_lock);
359
360
361 list_for_each_entry(tmp, &vfio.group_list, vfio_next) {
362 if (tmp->iommu_group == iommu_group) {
363 vfio_group_get(tmp);
364 vfio_group_unlock_and_free(group);
365 return tmp;
366 }
367 }
368
369 minor = vfio_alloc_group_minor(group);
370 if (minor < 0) {
371 vfio_group_unlock_and_free(group);
372 return ERR_PTR(minor);
373 }
374
375 dev = device_create(vfio.class, NULL,
376 MKDEV(MAJOR(vfio.group_devt), minor),
377 group, "%s%d", group->noiommu ? "noiommu-" : "",
378 iommu_group_id(iommu_group));
379 if (IS_ERR(dev)) {
380 vfio_free_group_minor(minor);
381 vfio_group_unlock_and_free(group);
382 return (struct vfio_group *)dev;
383 }
384
385 group->minor = minor;
386 group->dev = dev;
387
388 list_add(&group->vfio_next, &vfio.group_list);
389
390 mutex_unlock(&vfio.group_lock);
391
392 return group;
393}
394
395
396static void vfio_group_release(struct kref *kref)
397{
398 struct vfio_group *group = container_of(kref, struct vfio_group, kref);
399 struct vfio_unbound_dev *unbound, *tmp;
400 struct iommu_group *iommu_group = group->iommu_group;
401
402 WARN_ON(!list_empty(&group->device_list));
403
404 list_for_each_entry_safe(unbound, tmp,
405 &group->unbound_list, unbound_next) {
406 list_del(&unbound->unbound_next);
407 kfree(unbound);
408 }
409
410 device_destroy(vfio.class, MKDEV(MAJOR(vfio.group_devt), group->minor));
411 list_del(&group->vfio_next);
412 vfio_free_group_minor(group->minor);
413 vfio_group_unlock_and_free(group);
414 iommu_group_put(iommu_group);
415}
416
417static void vfio_group_put(struct vfio_group *group)
418{
419 kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock);
420}
421
422
423static void vfio_group_get(struct vfio_group *group)
424{
425 kref_get(&group->kref);
426}
427
428
429
430
431
432static struct vfio_group *vfio_group_try_get(struct vfio_group *group)
433{
434 struct vfio_group *target = group;
435
436 mutex_lock(&vfio.group_lock);
437 list_for_each_entry(group, &vfio.group_list, vfio_next) {
438 if (group == target) {
439 vfio_group_get(group);
440 mutex_unlock(&vfio.group_lock);
441 return group;
442 }
443 }
444 mutex_unlock(&vfio.group_lock);
445
446 return NULL;
447}
448
449static
450struct vfio_group *vfio_group_get_from_iommu(struct iommu_group *iommu_group)
451{
452 struct vfio_group *group;
453
454 mutex_lock(&vfio.group_lock);
455 list_for_each_entry(group, &vfio.group_list, vfio_next) {
456 if (group->iommu_group == iommu_group) {
457 vfio_group_get(group);
458 mutex_unlock(&vfio.group_lock);
459 return group;
460 }
461 }
462 mutex_unlock(&vfio.group_lock);
463
464 return NULL;
465}
466
467static struct vfio_group *vfio_group_get_from_minor(int minor)
468{
469 struct vfio_group *group;
470
471 mutex_lock(&vfio.group_lock);
472 group = idr_find(&vfio.group_idr, minor);
473 if (!group) {
474 mutex_unlock(&vfio.group_lock);
475 return NULL;
476 }
477 vfio_group_get(group);
478 mutex_unlock(&vfio.group_lock);
479
480 return group;
481}
482
483
484
485
486static
487struct vfio_device *vfio_group_create_device(struct vfio_group *group,
488 struct device *dev,
489 const struct vfio_device_ops *ops,
490 void *device_data)
491{
492 struct vfio_device *device;
493
494 device = kzalloc(sizeof(*device), GFP_KERNEL);
495 if (!device)
496 return ERR_PTR(-ENOMEM);
497
498 kref_init(&device->kref);
499 device->dev = dev;
500 device->group = group;
501 device->ops = ops;
502 device->device_data = device_data;
503 dev_set_drvdata(dev, device);
504
505
506 vfio_group_get(group);
507
508 mutex_lock(&group->device_lock);
509 list_add(&device->group_next, &group->device_list);
510 mutex_unlock(&group->device_lock);
511
512 return device;
513}
514
515static void vfio_device_release(struct kref *kref)
516{
517 struct vfio_device *device = container_of(kref,
518 struct vfio_device, kref);
519 struct vfio_group *group = device->group;
520
521 list_del(&device->group_next);
522 mutex_unlock(&group->device_lock);
523
524 dev_set_drvdata(device->dev, NULL);
525
526 kfree(device);
527
528
529 wake_up(&vfio.release_q);
530}
531
532
533void vfio_device_put(struct vfio_device *device)
534{
535 struct vfio_group *group = device->group;
536 kref_put_mutex(&device->kref, vfio_device_release, &group->device_lock);
537 vfio_group_put(group);
538}
539EXPORT_SYMBOL_GPL(vfio_device_put);
540
541static void vfio_device_get(struct vfio_device *device)
542{
543 vfio_group_get(device->group);
544 kref_get(&device->kref);
545}
546
547static struct vfio_device *vfio_group_get_device(struct vfio_group *group,
548 struct device *dev)
549{
550 struct vfio_device *device;
551
552 mutex_lock(&group->device_lock);
553 list_for_each_entry(device, &group->device_list, group_next) {
554 if (device->dev == dev) {
555 vfio_device_get(device);
556 mutex_unlock(&group->device_lock);
557 return device;
558 }
559 }
560 mutex_unlock(&group->device_lock);
561 return NULL;
562}
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579static const char * const vfio_driver_whitelist[] = { "pci-stub" };
580
581static bool vfio_dev_whitelisted(struct device *dev, struct device_driver *drv)
582{
583 int i;
584
585 if (dev_is_pci(dev)) {
586 struct pci_dev *pdev = to_pci_dev(dev);
587
588 if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
589 return true;
590 }
591
592 for (i = 0; i < ARRAY_SIZE(vfio_driver_whitelist); i++) {
593 if (!strcmp(drv->name, vfio_driver_whitelist[i]))
594 return true;
595 }
596
597 return false;
598}
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614static int vfio_dev_viable(struct device *dev, void *data)
615{
616 struct vfio_group *group = data;
617 struct vfio_device *device;
618 struct device_driver *drv = ACCESS_ONCE(dev->driver);
619 struct vfio_unbound_dev *unbound;
620 int ret = -EINVAL;
621
622 mutex_lock(&group->unbound_lock);
623 list_for_each_entry(unbound, &group->unbound_list, unbound_next) {
624 if (dev == unbound->dev) {
625 ret = 0;
626 break;
627 }
628 }
629 mutex_unlock(&group->unbound_lock);
630
631 if (!ret || !drv || vfio_dev_whitelisted(dev, drv))
632 return 0;
633
634 device = vfio_group_get_device(group, dev);
635 if (device) {
636 vfio_device_put(device);
637 return 0;
638 }
639
640 return ret;
641}
642
643
644
645
646static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
647{
648 struct vfio_device *device;
649
650
651 device = vfio_group_get_device(group, dev);
652 if (WARN_ON_ONCE(device)) {
653 vfio_device_put(device);
654 return 0;
655 }
656
657
658 if (!atomic_read(&group->container_users))
659 return 0;
660
661
662 WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
663 iommu_group_id(group->iommu_group));
664
665 return 0;
666}
667
668static int vfio_group_nb_verify(struct vfio_group *group, struct device *dev)
669{
670
671 if (!atomic_read(&group->container_users))
672 return 0;
673
674 return vfio_dev_viable(dev, group);
675}
676
677static int vfio_iommu_group_notifier(struct notifier_block *nb,
678 unsigned long action, void *data)
679{
680 struct vfio_group *group = container_of(nb, struct vfio_group, nb);
681 struct device *dev = data;
682 struct vfio_unbound_dev *unbound;
683
684
685
686
687
688 group = vfio_group_try_get(group);
689 if (!group)
690 return NOTIFY_OK;
691
692 switch (action) {
693 case IOMMU_GROUP_NOTIFY_ADD_DEVICE:
694 vfio_group_nb_add_dev(group, dev);
695 break;
696 case IOMMU_GROUP_NOTIFY_DEL_DEVICE:
697
698
699
700
701
702
703
704 break;
705 case IOMMU_GROUP_NOTIFY_BIND_DRIVER:
706 pr_debug("%s: Device %s, group %d binding to driver\n",
707 __func__, dev_name(dev),
708 iommu_group_id(group->iommu_group));
709 break;
710 case IOMMU_GROUP_NOTIFY_BOUND_DRIVER:
711 pr_debug("%s: Device %s, group %d bound to driver %s\n",
712 __func__, dev_name(dev),
713 iommu_group_id(group->iommu_group), dev->driver->name);
714 BUG_ON(vfio_group_nb_verify(group, dev));
715 break;
716 case IOMMU_GROUP_NOTIFY_UNBIND_DRIVER:
717 pr_debug("%s: Device %s, group %d unbinding from driver %s\n",
718 __func__, dev_name(dev),
719 iommu_group_id(group->iommu_group), dev->driver->name);
720 break;
721 case IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER:
722 pr_debug("%s: Device %s, group %d unbound from driver\n",
723 __func__, dev_name(dev),
724 iommu_group_id(group->iommu_group));
725
726
727
728
729
730
731
732
733 mutex_lock(&group->unbound_lock);
734 list_for_each_entry(unbound,
735 &group->unbound_list, unbound_next) {
736 if (dev == unbound->dev) {
737 list_del(&unbound->unbound_next);
738 kfree(unbound);
739 break;
740 }
741 }
742 mutex_unlock(&group->unbound_lock);
743 break;
744 }
745
746 vfio_group_put(group);
747 return NOTIFY_OK;
748}
749
750
751
752
753int vfio_add_group_dev(struct device *dev,
754 const struct vfio_device_ops *ops, void *device_data)
755{
756 struct iommu_group *iommu_group;
757 struct vfio_group *group;
758 struct vfio_device *device;
759
760 iommu_group = iommu_group_get(dev);
761 if (!iommu_group)
762 return -EINVAL;
763
764 group = vfio_group_get_from_iommu(iommu_group);
765 if (!group) {
766 group = vfio_create_group(iommu_group);
767 if (IS_ERR(group)) {
768 iommu_group_put(iommu_group);
769 return PTR_ERR(group);
770 }
771 } else {
772
773
774
775
776 iommu_group_put(iommu_group);
777 }
778
779 device = vfio_group_get_device(group, dev);
780 if (device) {
781 WARN(1, "Device %s already exists on group %d\n",
782 dev_name(dev), iommu_group_id(iommu_group));
783 vfio_device_put(device);
784 vfio_group_put(group);
785 return -EBUSY;
786 }
787
788 device = vfio_group_create_device(group, dev, ops, device_data);
789 if (IS_ERR(device)) {
790 vfio_group_put(group);
791 return PTR_ERR(device);
792 }
793
794
795
796
797
798
799 vfio_group_put(group);
800
801 return 0;
802}
803EXPORT_SYMBOL_GPL(vfio_add_group_dev);
804
805
806
807
808
809
810
811
812struct vfio_device *vfio_device_get_from_dev(struct device *dev)
813{
814 struct iommu_group *iommu_group;
815 struct vfio_group *group;
816 struct vfio_device *device;
817
818 iommu_group = iommu_group_get(dev);
819 if (!iommu_group)
820 return NULL;
821
822 group = vfio_group_get_from_iommu(iommu_group);
823 iommu_group_put(iommu_group);
824 if (!group)
825 return NULL;
826
827 device = vfio_group_get_device(group, dev);
828 vfio_group_put(group);
829
830 return device;
831}
832EXPORT_SYMBOL_GPL(vfio_device_get_from_dev);
833
834static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group,
835 char *buf)
836{
837 struct vfio_device *it, *device = NULL;
838
839 mutex_lock(&group->device_lock);
840 list_for_each_entry(it, &group->device_list, group_next) {
841 if (!strcmp(dev_name(it->dev), buf)) {
842 device = it;
843 vfio_device_get(device);
844 break;
845 }
846 }
847 mutex_unlock(&group->device_lock);
848
849 return device;
850}
851
852
853
854
855void *vfio_device_data(struct vfio_device *device)
856{
857 return device->device_data;
858}
859EXPORT_SYMBOL_GPL(vfio_device_data);
860
861
862static bool vfio_dev_present(struct vfio_group *group, struct device *dev)
863{
864 struct vfio_device *device;
865
866 device = vfio_group_get_device(group, dev);
867 if (!device)
868 return false;
869
870 vfio_device_put(device);
871 return true;
872}
873
874
875
876
877void *vfio_del_group_dev(struct device *dev)
878{
879 struct vfio_device *device = dev_get_drvdata(dev);
880 struct vfio_group *group = device->group;
881 void *device_data = device->device_data;
882 struct vfio_unbound_dev *unbound;
883 unsigned int i = 0;
884 long ret;
885 bool interrupted = false;
886
887
888
889
890
891 vfio_group_get(group);
892
893
894
895
896
897
898
899
900
901
902 unbound = kzalloc(sizeof(*unbound), GFP_KERNEL);
903 if (unbound) {
904 unbound->dev = dev;
905 mutex_lock(&group->unbound_lock);
906 list_add(&unbound->unbound_next, &group->unbound_list);
907 mutex_unlock(&group->unbound_lock);
908 }
909 WARN_ON(!unbound);
910
911 vfio_device_put(device);
912
913
914
915
916
917
918
919
920
921 do {
922 device = vfio_group_get_device(group, dev);
923 if (!device)
924 break;
925
926 if (device->ops->request)
927 device->ops->request(device_data, i++);
928
929 vfio_device_put(device);
930
931 if (interrupted) {
932 ret = wait_event_timeout(vfio.release_q,
933 !vfio_dev_present(group, dev), HZ * 10);
934 } else {
935 ret = wait_event_interruptible_timeout(vfio.release_q,
936 !vfio_dev_present(group, dev), HZ * 10);
937 if (ret == -ERESTARTSYS) {
938 interrupted = true;
939 dev_warn(dev,
940 "Device is currently in use, task"
941 " \"%s\" (%d) "
942 "blocked until device is released",
943 current->comm, task_pid_nr(current));
944 }
945 }
946 } while (ret <= 0);
947
948 vfio_group_put(group);
949
950 return device_data;
951}
952EXPORT_SYMBOL_GPL(vfio_del_group_dev);
953
954
955
956
957static long vfio_ioctl_check_extension(struct vfio_container *container,
958 unsigned long arg)
959{
960 struct vfio_iommu_driver *driver;
961 long ret = 0;
962
963 down_read(&container->group_lock);
964
965 driver = container->iommu_driver;
966
967 switch (arg) {
968
969 default:
970
971
972
973
974
975
976 if (!driver) {
977 mutex_lock(&vfio.iommu_drivers_lock);
978 list_for_each_entry(driver, &vfio.iommu_drivers_list,
979 vfio_next) {
980
981#ifdef CONFIG_VFIO_NOIOMMU
982 if (!list_empty(&container->group_list) &&
983 (container->noiommu !=
984 (driver->ops == &vfio_noiommu_ops)))
985 continue;
986#endif
987
988 if (!try_module_get(driver->ops->owner))
989 continue;
990
991 ret = driver->ops->ioctl(NULL,
992 VFIO_CHECK_EXTENSION,
993 arg);
994 module_put(driver->ops->owner);
995 if (ret > 0)
996 break;
997 }
998 mutex_unlock(&vfio.iommu_drivers_lock);
999 } else
1000 ret = driver->ops->ioctl(container->iommu_data,
1001 VFIO_CHECK_EXTENSION, arg);
1002 }
1003
1004 up_read(&container->group_lock);
1005
1006 return ret;
1007}
1008
1009
1010static int __vfio_container_attach_groups(struct vfio_container *container,
1011 struct vfio_iommu_driver *driver,
1012 void *data)
1013{
1014 struct vfio_group *group;
1015 int ret = -ENODEV;
1016
1017 list_for_each_entry(group, &container->group_list, container_next) {
1018 ret = driver->ops->attach_group(data, group->iommu_group);
1019 if (ret)
1020 goto unwind;
1021 }
1022
1023 return ret;
1024
1025unwind:
1026 list_for_each_entry_continue_reverse(group, &container->group_list,
1027 container_next) {
1028 driver->ops->detach_group(data, group->iommu_group);
1029 }
1030
1031 return ret;
1032}
1033
1034static long vfio_ioctl_set_iommu(struct vfio_container *container,
1035 unsigned long arg)
1036{
1037 struct vfio_iommu_driver *driver;
1038 long ret = -ENODEV;
1039
1040 down_write(&container->group_lock);
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050 if (list_empty(&container->group_list) || container->iommu_driver) {
1051 up_write(&container->group_lock);
1052 return -EINVAL;
1053 }
1054
1055 mutex_lock(&vfio.iommu_drivers_lock);
1056 list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) {
1057 void *data;
1058
1059#ifdef CONFIG_VFIO_NOIOMMU
1060
1061
1062
1063
1064 if (container->noiommu != (driver->ops == &vfio_noiommu_ops))
1065 continue;
1066#endif
1067
1068 if (!try_module_get(driver->ops->owner))
1069 continue;
1070
1071
1072
1073
1074
1075
1076
1077
1078 if (driver->ops->ioctl(NULL, VFIO_CHECK_EXTENSION, arg) <= 0) {
1079 module_put(driver->ops->owner);
1080 continue;
1081 }
1082
1083 data = driver->ops->open(arg);
1084 if (IS_ERR(data)) {
1085 ret = PTR_ERR(data);
1086 module_put(driver->ops->owner);
1087 continue;
1088 }
1089
1090 ret = __vfio_container_attach_groups(container, driver, data);
1091 if (ret) {
1092 driver->ops->release(data);
1093 module_put(driver->ops->owner);
1094 continue;
1095 }
1096
1097 container->iommu_driver = driver;
1098 container->iommu_data = data;
1099 break;
1100 }
1101
1102 mutex_unlock(&vfio.iommu_drivers_lock);
1103 up_write(&container->group_lock);
1104
1105 return ret;
1106}
1107
1108static long vfio_fops_unl_ioctl(struct file *filep,
1109 unsigned int cmd, unsigned long arg)
1110{
1111 struct vfio_container *container = filep->private_data;
1112 struct vfio_iommu_driver *driver;
1113 void *data;
1114 long ret = -EINVAL;
1115
1116 if (!container)
1117 return ret;
1118
1119 switch (cmd) {
1120 case VFIO_GET_API_VERSION:
1121 ret = VFIO_API_VERSION;
1122 break;
1123 case VFIO_CHECK_EXTENSION:
1124 ret = vfio_ioctl_check_extension(container, arg);
1125 break;
1126 case VFIO_SET_IOMMU:
1127 ret = vfio_ioctl_set_iommu(container, arg);
1128 break;
1129 default:
1130 down_read(&container->group_lock);
1131
1132 driver = container->iommu_driver;
1133 data = container->iommu_data;
1134
1135 if (driver)
1136 ret = driver->ops->ioctl(data, cmd, arg);
1137
1138 up_read(&container->group_lock);
1139 }
1140
1141 return ret;
1142}
1143
1144#ifdef CONFIG_COMPAT
1145static long vfio_fops_compat_ioctl(struct file *filep,
1146 unsigned int cmd, unsigned long arg)
1147{
1148 arg = (unsigned long)compat_ptr(arg);
1149 return vfio_fops_unl_ioctl(filep, cmd, arg);
1150}
1151#endif
1152
1153static int vfio_fops_open(struct inode *inode, struct file *filep)
1154{
1155 struct vfio_container *container;
1156
1157 container = kzalloc(sizeof(*container), GFP_KERNEL);
1158 if (!container)
1159 return -ENOMEM;
1160
1161 INIT_LIST_HEAD(&container->group_list);
1162 init_rwsem(&container->group_lock);
1163 kref_init(&container->kref);
1164
1165 filep->private_data = container;
1166
1167 return 0;
1168}
1169
1170static int vfio_fops_release(struct inode *inode, struct file *filep)
1171{
1172 struct vfio_container *container = filep->private_data;
1173
1174 filep->private_data = NULL;
1175
1176 vfio_container_put(container);
1177
1178 return 0;
1179}
1180
1181
1182
1183
1184
1185static ssize_t vfio_fops_read(struct file *filep, char __user *buf,
1186 size_t count, loff_t *ppos)
1187{
1188 struct vfio_container *container = filep->private_data;
1189 struct vfio_iommu_driver *driver;
1190 ssize_t ret = -EINVAL;
1191
1192 down_read(&container->group_lock);
1193
1194 driver = container->iommu_driver;
1195 if (likely(driver && driver->ops->read))
1196 ret = driver->ops->read(container->iommu_data,
1197 buf, count, ppos);
1198
1199 up_read(&container->group_lock);
1200
1201 return ret;
1202}
1203
1204static ssize_t vfio_fops_write(struct file *filep, const char __user *buf,
1205 size_t count, loff_t *ppos)
1206{
1207 struct vfio_container *container = filep->private_data;
1208 struct vfio_iommu_driver *driver;
1209 ssize_t ret = -EINVAL;
1210
1211 down_read(&container->group_lock);
1212
1213 driver = container->iommu_driver;
1214 if (likely(driver && driver->ops->write))
1215 ret = driver->ops->write(container->iommu_data,
1216 buf, count, ppos);
1217
1218 up_read(&container->group_lock);
1219
1220 return ret;
1221}
1222
1223static int vfio_fops_mmap(struct file *filep, struct vm_area_struct *vma)
1224{
1225 struct vfio_container *container = filep->private_data;
1226 struct vfio_iommu_driver *driver;
1227 int ret = -EINVAL;
1228
1229 down_read(&container->group_lock);
1230
1231 driver = container->iommu_driver;
1232 if (likely(driver && driver->ops->mmap))
1233 ret = driver->ops->mmap(container->iommu_data, vma);
1234
1235 up_read(&container->group_lock);
1236
1237 return ret;
1238}
1239
1240static const struct file_operations vfio_fops = {
1241 .owner = THIS_MODULE,
1242 .open = vfio_fops_open,
1243 .release = vfio_fops_release,
1244 .read = vfio_fops_read,
1245 .write = vfio_fops_write,
1246 .unlocked_ioctl = vfio_fops_unl_ioctl,
1247#ifdef CONFIG_COMPAT
1248 .compat_ioctl = vfio_fops_compat_ioctl,
1249#endif
1250 .mmap = vfio_fops_mmap,
1251};
1252
1253
1254
1255
1256static void __vfio_group_unset_container(struct vfio_group *group)
1257{
1258 struct vfio_container *container = group->container;
1259 struct vfio_iommu_driver *driver;
1260
1261 down_write(&container->group_lock);
1262
1263 driver = container->iommu_driver;
1264 if (driver)
1265 driver->ops->detach_group(container->iommu_data,
1266 group->iommu_group);
1267
1268 group->container = NULL;
1269 list_del(&group->container_next);
1270
1271
1272 if (driver && list_empty(&container->group_list)) {
1273 driver->ops->release(container->iommu_data);
1274 module_put(driver->ops->owner);
1275 container->iommu_driver = NULL;
1276 container->iommu_data = NULL;
1277 }
1278
1279 up_write(&container->group_lock);
1280
1281 vfio_container_put(container);
1282}
1283
1284
1285
1286
1287
1288
1289
1290static int vfio_group_unset_container(struct vfio_group *group)
1291{
1292 int users = atomic_cmpxchg(&group->container_users, 1, 0);
1293
1294 if (!users)
1295 return -EINVAL;
1296 if (users != 1)
1297 return -EBUSY;
1298
1299 __vfio_group_unset_container(group);
1300
1301 return 0;
1302}
1303
1304
1305
1306
1307
1308
1309
1310static void vfio_group_try_dissolve_container(struct vfio_group *group)
1311{
1312 if (0 == atomic_dec_if_positive(&group->container_users))
1313 __vfio_group_unset_container(group);
1314}
1315
1316static int vfio_group_set_container(struct vfio_group *group, int container_fd)
1317{
1318 struct fd f;
1319 struct vfio_container *container;
1320 struct vfio_iommu_driver *driver;
1321 int ret = 0;
1322
1323 if (atomic_read(&group->container_users))
1324 return -EINVAL;
1325
1326 if (group->noiommu && !capable(CAP_SYS_RAWIO))
1327 return -EPERM;
1328
1329 f = fdget(container_fd);
1330 if (!f.file)
1331 return -EBADF;
1332
1333
1334 if (f.file->f_op != &vfio_fops) {
1335 fdput(f);
1336 return -EINVAL;
1337 }
1338
1339 container = f.file->private_data;
1340 WARN_ON(!container);
1341
1342 down_write(&container->group_lock);
1343
1344
1345 if (!list_empty(&container->group_list) &&
1346 container->noiommu != group->noiommu) {
1347 ret = -EPERM;
1348 goto unlock_out;
1349 }
1350
1351 driver = container->iommu_driver;
1352 if (driver) {
1353 ret = driver->ops->attach_group(container->iommu_data,
1354 group->iommu_group);
1355 if (ret)
1356 goto unlock_out;
1357 }
1358
1359 group->container = container;
1360 container->noiommu = group->noiommu;
1361 list_add(&group->container_next, &container->group_list);
1362
1363
1364 vfio_container_get(container);
1365 atomic_inc(&group->container_users);
1366
1367unlock_out:
1368 up_write(&container->group_lock);
1369 fdput(f);
1370 return ret;
1371}
1372
1373static bool vfio_group_viable(struct vfio_group *group)
1374{
1375 return (iommu_group_for_each_dev(group->iommu_group,
1376 group, vfio_dev_viable) == 0);
1377}
1378
1379static const struct file_operations vfio_device_fops;
1380
1381static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
1382{
1383 struct vfio_device *device;
1384 struct file *filep;
1385 int ret;
1386
1387 if (0 == atomic_read(&group->container_users) ||
1388 !group->container->iommu_driver || !vfio_group_viable(group))
1389 return -EINVAL;
1390
1391 if (group->noiommu && !capable(CAP_SYS_RAWIO))
1392 return -EPERM;
1393
1394 device = vfio_device_get_from_name(group, buf);
1395 if (!device)
1396 return -ENODEV;
1397
1398 ret = device->ops->open(device->device_data);
1399 if (ret) {
1400 vfio_device_put(device);
1401 return ret;
1402 }
1403
1404
1405
1406
1407
1408 ret = get_unused_fd_flags(O_CLOEXEC);
1409 if (ret < 0) {
1410 device->ops->release(device->device_data);
1411 vfio_device_put(device);
1412 return ret;
1413 }
1414
1415 filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops,
1416 device, O_RDWR);
1417 if (IS_ERR(filep)) {
1418 put_unused_fd(ret);
1419 ret = PTR_ERR(filep);
1420 device->ops->release(device->device_data);
1421 vfio_device_put(device);
1422 return ret;
1423 }
1424
1425
1426
1427
1428
1429
1430 filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
1431
1432 atomic_inc(&group->container_users);
1433
1434 fd_install(ret, filep);
1435
1436 if (group->noiommu)
1437 dev_warn(device->dev, "vfio-noiommu device opened by user "
1438 "(%s:%d)\n", current->comm, task_pid_nr(current));
1439
1440 return ret;
1441}
1442
1443static long vfio_group_fops_unl_ioctl(struct file *filep,
1444 unsigned int cmd, unsigned long arg)
1445{
1446 struct vfio_group *group = filep->private_data;
1447 long ret = -ENOTTY;
1448
1449 switch (cmd) {
1450 case VFIO_GROUP_GET_STATUS:
1451 {
1452 struct vfio_group_status status;
1453 unsigned long minsz;
1454
1455 minsz = offsetofend(struct vfio_group_status, flags);
1456
1457 if (copy_from_user(&status, (void __user *)arg, minsz))
1458 return -EFAULT;
1459
1460 if (status.argsz < minsz)
1461 return -EINVAL;
1462
1463 status.flags = 0;
1464
1465 if (vfio_group_viable(group))
1466 status.flags |= VFIO_GROUP_FLAGS_VIABLE;
1467
1468 if (group->container)
1469 status.flags |= VFIO_GROUP_FLAGS_CONTAINER_SET;
1470
1471 if (copy_to_user((void __user *)arg, &status, minsz))
1472 return -EFAULT;
1473
1474 ret = 0;
1475 break;
1476 }
1477 case VFIO_GROUP_SET_CONTAINER:
1478 {
1479 int fd;
1480
1481 if (get_user(fd, (int __user *)arg))
1482 return -EFAULT;
1483
1484 if (fd < 0)
1485 return -EINVAL;
1486
1487 ret = vfio_group_set_container(group, fd);
1488 break;
1489 }
1490 case VFIO_GROUP_UNSET_CONTAINER:
1491 ret = vfio_group_unset_container(group);
1492 break;
1493 case VFIO_GROUP_GET_DEVICE_FD:
1494 {
1495 char *buf;
1496
1497 buf = strndup_user((const char __user *)arg, PAGE_SIZE);
1498 if (IS_ERR(buf))
1499 return PTR_ERR(buf);
1500
1501 ret = vfio_group_get_device_fd(group, buf);
1502 kfree(buf);
1503 break;
1504 }
1505 }
1506
1507 return ret;
1508}
1509
1510#ifdef CONFIG_COMPAT
1511static long vfio_group_fops_compat_ioctl(struct file *filep,
1512 unsigned int cmd, unsigned long arg)
1513{
1514 arg = (unsigned long)compat_ptr(arg);
1515 return vfio_group_fops_unl_ioctl(filep, cmd, arg);
1516}
1517#endif
1518
1519static int vfio_group_fops_open(struct inode *inode, struct file *filep)
1520{
1521 struct vfio_group *group;
1522 int opened;
1523
1524 group = vfio_group_get_from_minor(iminor(inode));
1525 if (!group)
1526 return -ENODEV;
1527
1528 if (group->noiommu && !capable(CAP_SYS_RAWIO)) {
1529 vfio_group_put(group);
1530 return -EPERM;
1531 }
1532
1533
1534 opened = atomic_cmpxchg(&group->opened, 0, 1);
1535 if (opened) {
1536 vfio_group_put(group);
1537 return -EBUSY;
1538 }
1539
1540
1541 if (group->container) {
1542 atomic_dec(&group->opened);
1543 vfio_group_put(group);
1544 return -EBUSY;
1545 }
1546
1547 filep->private_data = group;
1548
1549 return 0;
1550}
1551
1552static int vfio_group_fops_release(struct inode *inode, struct file *filep)
1553{
1554 struct vfio_group *group = filep->private_data;
1555
1556 filep->private_data = NULL;
1557
1558 vfio_group_try_dissolve_container(group);
1559
1560 atomic_dec(&group->opened);
1561
1562 vfio_group_put(group);
1563
1564 return 0;
1565}
1566
1567static const struct file_operations vfio_group_fops = {
1568 .owner = THIS_MODULE,
1569 .unlocked_ioctl = vfio_group_fops_unl_ioctl,
1570#ifdef CONFIG_COMPAT
1571 .compat_ioctl = vfio_group_fops_compat_ioctl,
1572#endif
1573 .open = vfio_group_fops_open,
1574 .release = vfio_group_fops_release,
1575};
1576
1577
1578
1579
1580static int vfio_device_fops_release(struct inode *inode, struct file *filep)
1581{
1582 struct vfio_device *device = filep->private_data;
1583
1584 device->ops->release(device->device_data);
1585
1586 vfio_group_try_dissolve_container(device->group);
1587
1588 vfio_device_put(device);
1589
1590 return 0;
1591}
1592
1593static long vfio_device_fops_unl_ioctl(struct file *filep,
1594 unsigned int cmd, unsigned long arg)
1595{
1596 struct vfio_device *device = filep->private_data;
1597
1598 if (unlikely(!device->ops->ioctl))
1599 return -EINVAL;
1600
1601 return device->ops->ioctl(device->device_data, cmd, arg);
1602}
1603
1604static ssize_t vfio_device_fops_read(struct file *filep, char __user *buf,
1605 size_t count, loff_t *ppos)
1606{
1607 struct vfio_device *device = filep->private_data;
1608
1609 if (unlikely(!device->ops->read))
1610 return -EINVAL;
1611
1612 return device->ops->read(device->device_data, buf, count, ppos);
1613}
1614
1615static ssize_t vfio_device_fops_write(struct file *filep,
1616 const char __user *buf,
1617 size_t count, loff_t *ppos)
1618{
1619 struct vfio_device *device = filep->private_data;
1620
1621 if (unlikely(!device->ops->write))
1622 return -EINVAL;
1623
1624 return device->ops->write(device->device_data, buf, count, ppos);
1625}
1626
1627static int vfio_device_fops_mmap(struct file *filep, struct vm_area_struct *vma)
1628{
1629 struct vfio_device *device = filep->private_data;
1630
1631 if (unlikely(!device->ops->mmap))
1632 return -EINVAL;
1633
1634 return device->ops->mmap(device->device_data, vma);
1635}
1636
1637#ifdef CONFIG_COMPAT
1638static long vfio_device_fops_compat_ioctl(struct file *filep,
1639 unsigned int cmd, unsigned long arg)
1640{
1641 arg = (unsigned long)compat_ptr(arg);
1642 return vfio_device_fops_unl_ioctl(filep, cmd, arg);
1643}
1644#endif
1645
1646static const struct file_operations vfio_device_fops = {
1647 .owner = THIS_MODULE,
1648 .release = vfio_device_fops_release,
1649 .read = vfio_device_fops_read,
1650 .write = vfio_device_fops_write,
1651 .unlocked_ioctl = vfio_device_fops_unl_ioctl,
1652#ifdef CONFIG_COMPAT
1653 .compat_ioctl = vfio_device_fops_compat_ioctl,
1654#endif
1655 .mmap = vfio_device_fops_mmap,
1656};
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685struct vfio_group *vfio_group_get_external_user(struct file *filep)
1686{
1687 struct vfio_group *group = filep->private_data;
1688
1689 if (filep->f_op != &vfio_group_fops)
1690 return ERR_PTR(-EINVAL);
1691
1692 if (!atomic_inc_not_zero(&group->container_users))
1693 return ERR_PTR(-EINVAL);
1694
1695 if (group->noiommu) {
1696 atomic_dec(&group->container_users);
1697 return ERR_PTR(-EPERM);
1698 }
1699
1700 if (!group->container->iommu_driver ||
1701 !vfio_group_viable(group)) {
1702 atomic_dec(&group->container_users);
1703 return ERR_PTR(-EINVAL);
1704 }
1705
1706 vfio_group_get(group);
1707
1708 return group;
1709}
1710EXPORT_SYMBOL_GPL(vfio_group_get_external_user);
1711
1712void vfio_group_put_external_user(struct vfio_group *group)
1713{
1714 vfio_group_try_dissolve_container(group);
1715 vfio_group_put(group);
1716}
1717EXPORT_SYMBOL_GPL(vfio_group_put_external_user);
1718
1719int vfio_external_user_iommu_id(struct vfio_group *group)
1720{
1721 return iommu_group_id(group->iommu_group);
1722}
1723EXPORT_SYMBOL_GPL(vfio_external_user_iommu_id);
1724
1725long vfio_external_check_extension(struct vfio_group *group, unsigned long arg)
1726{
1727 return vfio_ioctl_check_extension(group->container, arg);
1728}
1729EXPORT_SYMBOL_GPL(vfio_external_check_extension);
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743struct vfio_info_cap_header *vfio_info_cap_add(struct vfio_info_cap *caps,
1744 size_t size, u16 id, u16 version)
1745{
1746 void *buf;
1747 struct vfio_info_cap_header *header, *tmp;
1748
1749 buf = krealloc(caps->buf, caps->size + size, GFP_KERNEL);
1750 if (!buf) {
1751 kfree(caps->buf);
1752 caps->size = 0;
1753 return ERR_PTR(-ENOMEM);
1754 }
1755
1756 caps->buf = buf;
1757 header = buf + caps->size;
1758
1759
1760 memset(header, 0, size);
1761
1762 header->id = id;
1763 header->version = version;
1764
1765
1766 for (tmp = caps->buf; tmp->next; tmp = (void *)tmp + tmp->next)
1767 ;
1768
1769 tmp->next = caps->size;
1770 caps->size += size;
1771
1772 return header;
1773}
1774EXPORT_SYMBOL_GPL(vfio_info_cap_add);
1775
1776void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset)
1777{
1778 struct vfio_info_cap_header *tmp;
1779
1780 for (tmp = caps->buf; tmp->next; tmp = (void *)tmp + tmp->next - offset)
1781 tmp->next += offset;
1782}
1783EXPORT_SYMBOL_GPL(vfio_info_cap_shift);
1784
1785
1786
1787
1788static char *vfio_devnode(struct device *dev, umode_t *mode)
1789{
1790 return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev));
1791}
1792
1793static struct miscdevice vfio_dev = {
1794 .minor = VFIO_MINOR,
1795 .name = "vfio",
1796 .fops = &vfio_fops,
1797 .nodename = "vfio/vfio",
1798 .mode = S_IRUGO | S_IWUGO,
1799};
1800
1801static int __init vfio_init(void)
1802{
1803 int ret;
1804
1805 idr_init(&vfio.group_idr);
1806 mutex_init(&vfio.group_lock);
1807 mutex_init(&vfio.iommu_drivers_lock);
1808 INIT_LIST_HEAD(&vfio.group_list);
1809 INIT_LIST_HEAD(&vfio.iommu_drivers_list);
1810 init_waitqueue_head(&vfio.release_q);
1811
1812 ret = misc_register(&vfio_dev);
1813 if (ret) {
1814 pr_err("vfio: misc device register failed\n");
1815 return ret;
1816 }
1817
1818
1819 vfio.class = class_create(THIS_MODULE, "vfio");
1820 if (IS_ERR(vfio.class)) {
1821 ret = PTR_ERR(vfio.class);
1822 goto err_class;
1823 }
1824
1825 vfio.class->devnode = vfio_devnode;
1826
1827 ret = alloc_chrdev_region(&vfio.group_devt, 0, MINORMASK, "vfio");
1828 if (ret)
1829 goto err_alloc_chrdev;
1830
1831 cdev_init(&vfio.group_cdev, &vfio_group_fops);
1832 ret = cdev_add(&vfio.group_cdev, vfio.group_devt, MINORMASK);
1833 if (ret)
1834 goto err_cdev_add;
1835
1836 pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
1837
1838
1839
1840
1841
1842
1843 request_module_nowait("vfio_iommu_type1");
1844 request_module_nowait("vfio_iommu_spapr_tce");
1845
1846#ifdef CONFIG_VFIO_NOIOMMU
1847 vfio_register_iommu_driver(&vfio_noiommu_ops);
1848#endif
1849 return 0;
1850
1851err_cdev_add:
1852 unregister_chrdev_region(vfio.group_devt, MINORMASK);
1853err_alloc_chrdev:
1854 class_destroy(vfio.class);
1855 vfio.class = NULL;
1856err_class:
1857 misc_deregister(&vfio_dev);
1858 return ret;
1859}
1860
1861static void __exit vfio_cleanup(void)
1862{
1863 WARN_ON(!list_empty(&vfio.group_list));
1864
1865#ifdef CONFIG_VFIO_NOIOMMU
1866 vfio_unregister_iommu_driver(&vfio_noiommu_ops);
1867#endif
1868 idr_destroy(&vfio.group_idr);
1869 cdev_del(&vfio.group_cdev);
1870 unregister_chrdev_region(vfio.group_devt, MINORMASK);
1871 class_destroy(vfio.class);
1872 vfio.class = NULL;
1873 misc_deregister(&vfio_dev);
1874}
1875
1876module_init(vfio_init);
1877module_exit(vfio_cleanup);
1878
1879MODULE_VERSION(DRIVER_VERSION);
1880MODULE_LICENSE("GPL v2");
1881MODULE_AUTHOR(DRIVER_AUTHOR);
1882MODULE_DESCRIPTION(DRIVER_DESC);
1883MODULE_ALIAS_MISCDEV(VFIO_MINOR);
1884MODULE_ALIAS("devname:vfio/vfio");
1885