linux/drivers/vfio/vfio.c
<<
>>
Prefs
   1/*
   2 * VFIO core
   3 *
   4 * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
   5 *     Author: Alex Williamson <alex.williamson@redhat.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 *
  11 * Derived from original vfio:
  12 * Copyright 2010 Cisco Systems, Inc.  All rights reserved.
  13 * Author: Tom Lyon, pugs@cisco.com
  14 */
  15
  16#include <linux/cdev.h>
  17#include <linux/compat.h>
  18#include <linux/device.h>
  19#include <linux/file.h>
  20#include <linux/anon_inodes.h>
  21#include <linux/fs.h>
  22#include <linux/idr.h>
  23#include <linux/iommu.h>
  24#include <linux/list.h>
  25#include <linux/miscdevice.h>
  26#include <linux/module.h>
  27#include <linux/mutex.h>
  28#include <linux/rwsem.h>
  29#include <linux/sched.h>
  30#include <linux/slab.h>
  31#include <linux/stat.h>
  32#include <linux/string.h>
  33#include <linux/uaccess.h>
  34#include <linux/vfio.h>
  35#include <linux/wait.h>
  36
  37#define DRIVER_VERSION  "0.3"
  38#define DRIVER_AUTHOR   "Alex Williamson <alex.williamson@redhat.com>"
  39#define DRIVER_DESC     "VFIO - User Level meta-driver"
  40
  41static struct vfio {
  42        struct class                    *class;
  43        struct list_head                iommu_drivers_list;
  44        struct mutex                    iommu_drivers_lock;
  45        struct list_head                group_list;
  46        struct idr                      group_idr;
  47        struct mutex                    group_lock;
  48        struct cdev                     group_cdev;
  49        dev_t                           group_devt;
  50        wait_queue_head_t               release_q;
  51} vfio;
  52
  53struct vfio_iommu_driver {
  54        const struct vfio_iommu_driver_ops      *ops;
  55        struct list_head                        vfio_next;
  56};
  57
  58struct vfio_container {
  59        struct kref                     kref;
  60        struct list_head                group_list;
  61        struct rw_semaphore             group_lock;
  62        struct vfio_iommu_driver        *iommu_driver;
  63        void                            *iommu_data;
  64};
  65
  66struct vfio_group {
  67        struct kref                     kref;
  68        int                             minor;
  69        atomic_t                        container_users;
  70        struct iommu_group              *iommu_group;
  71        struct vfio_container           *container;
  72        struct list_head                device_list;
  73        struct mutex                    device_lock;
  74        struct device                   *dev;
  75        struct notifier_block           nb;
  76        struct list_head                vfio_next;
  77        struct list_head                container_next;
  78        atomic_t                        opened;
  79};
  80
  81struct vfio_device {
  82        struct kref                     kref;
  83        struct device                   *dev;
  84        const struct vfio_device_ops    *ops;
  85        struct vfio_group               *group;
  86        struct list_head                group_next;
  87        void                            *device_data;
  88};
  89
  90/**
  91 * IOMMU driver registration
  92 */
  93int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops)
  94{
  95        struct vfio_iommu_driver *driver, *tmp;
  96
  97        driver = kzalloc(sizeof(*driver), GFP_KERNEL);
  98        if (!driver)
  99                return -ENOMEM;
 100
 101        driver->ops = ops;
 102
 103        mutex_lock(&vfio.iommu_drivers_lock);
 104
 105        /* Check for duplicates */
 106        list_for_each_entry(tmp, &vfio.iommu_drivers_list, vfio_next) {
 107                if (tmp->ops == ops) {
 108                        mutex_unlock(&vfio.iommu_drivers_lock);
 109                        kfree(driver);
 110                        return -EINVAL;
 111                }
 112        }
 113
 114        list_add(&driver->vfio_next, &vfio.iommu_drivers_list);
 115
 116        mutex_unlock(&vfio.iommu_drivers_lock);
 117
 118        return 0;
 119}
 120EXPORT_SYMBOL_GPL(vfio_register_iommu_driver);
 121
 122void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops)
 123{
 124        struct vfio_iommu_driver *driver;
 125
 126        mutex_lock(&vfio.iommu_drivers_lock);
 127        list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) {
 128                if (driver->ops == ops) {
 129                        list_del(&driver->vfio_next);
 130                        mutex_unlock(&vfio.iommu_drivers_lock);
 131                        kfree(driver);
 132                        return;
 133                }
 134        }
 135        mutex_unlock(&vfio.iommu_drivers_lock);
 136}
 137EXPORT_SYMBOL_GPL(vfio_unregister_iommu_driver);
 138
 139/**
 140 * Group minor allocation/free - both called with vfio.group_lock held
 141 */
 142static int vfio_alloc_group_minor(struct vfio_group *group)
 143{
 144        return idr_alloc(&vfio.group_idr, group, 0, MINORMASK + 1, GFP_KERNEL);
 145}
 146
 147static void vfio_free_group_minor(int minor)
 148{
 149        idr_remove(&vfio.group_idr, minor);
 150}
 151
 152static int vfio_iommu_group_notifier(struct notifier_block *nb,
 153                                     unsigned long action, void *data);
 154static void vfio_group_get(struct vfio_group *group);
 155
 156/**
 157 * Container objects - containers are created when /dev/vfio/vfio is
 158 * opened, but their lifecycle extends until the last user is done, so
 159 * it's freed via kref.  Must support container/group/device being
 160 * closed in any order.
 161 */
 162static void vfio_container_get(struct vfio_container *container)
 163{
 164        kref_get(&container->kref);
 165}
 166
 167static void vfio_container_release(struct kref *kref)
 168{
 169        struct vfio_container *container;
 170        container = container_of(kref, struct vfio_container, kref);
 171
 172        kfree(container);
 173}
 174
 175static void vfio_container_put(struct vfio_container *container)
 176{
 177        kref_put(&container->kref, vfio_container_release);
 178}
 179
 180static void vfio_group_unlock_and_free(struct vfio_group *group)
 181{
 182        mutex_unlock(&vfio.group_lock);
 183        /*
 184         * Unregister outside of lock.  A spurious callback is harmless now
 185         * that the group is no longer in vfio.group_list.
 186         */
 187        iommu_group_unregister_notifier(group->iommu_group, &group->nb);
 188        kfree(group);
 189}
 190
 191/**
 192 * Group objects - create, release, get, put, search
 193 */
 194static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
 195{
 196        struct vfio_group *group, *tmp;
 197        struct device *dev;
 198        int ret, minor;
 199
 200        group = kzalloc(sizeof(*group), GFP_KERNEL);
 201        if (!group)
 202                return ERR_PTR(-ENOMEM);
 203
 204        kref_init(&group->kref);
 205        INIT_LIST_HEAD(&group->device_list);
 206        mutex_init(&group->device_lock);
 207        atomic_set(&group->container_users, 0);
 208        atomic_set(&group->opened, 0);
 209        group->iommu_group = iommu_group;
 210
 211        group->nb.notifier_call = vfio_iommu_group_notifier;
 212
 213        /*
 214         * blocking notifiers acquire a rwsem around registering and hold
 215         * it around callback.  Therefore, need to register outside of
 216         * vfio.group_lock to avoid A-B/B-A contention.  Our callback won't
 217         * do anything unless it can find the group in vfio.group_list, so
 218         * no harm in registering early.
 219         */
 220        ret = iommu_group_register_notifier(iommu_group, &group->nb);
 221        if (ret) {
 222                kfree(group);
 223                return ERR_PTR(ret);
 224        }
 225
 226        mutex_lock(&vfio.group_lock);
 227
 228        minor = vfio_alloc_group_minor(group);
 229        if (minor < 0) {
 230                vfio_group_unlock_and_free(group);
 231                return ERR_PTR(minor);
 232        }
 233
 234        /* Did we race creating this group? */
 235        list_for_each_entry(tmp, &vfio.group_list, vfio_next) {
 236                if (tmp->iommu_group == iommu_group) {
 237                        vfio_group_get(tmp);
 238                        vfio_free_group_minor(minor);
 239                        vfio_group_unlock_and_free(group);
 240                        return tmp;
 241                }
 242        }
 243
 244        dev = device_create(vfio.class, NULL,
 245                            MKDEV(MAJOR(vfio.group_devt), minor),
 246                            group, "%d", iommu_group_id(iommu_group));
 247        if (IS_ERR(dev)) {
 248                vfio_free_group_minor(minor);
 249                vfio_group_unlock_and_free(group);
 250                return (struct vfio_group *)dev; /* ERR_PTR */
 251        }
 252
 253        group->minor = minor;
 254        group->dev = dev;
 255
 256        list_add(&group->vfio_next, &vfio.group_list);
 257
 258        mutex_unlock(&vfio.group_lock);
 259
 260        return group;
 261}
 262
 263/* called with vfio.group_lock held */
 264static void vfio_group_release(struct kref *kref)
 265{
 266        struct vfio_group *group = container_of(kref, struct vfio_group, kref);
 267
 268        WARN_ON(!list_empty(&group->device_list));
 269
 270        device_destroy(vfio.class, MKDEV(MAJOR(vfio.group_devt), group->minor));
 271        list_del(&group->vfio_next);
 272        vfio_free_group_minor(group->minor);
 273        vfio_group_unlock_and_free(group);
 274}
 275
 276static void vfio_group_put(struct vfio_group *group)
 277{
 278        kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock);
 279}
 280
 281/* Assume group_lock or group reference is held */
 282static void vfio_group_get(struct vfio_group *group)
 283{
 284        kref_get(&group->kref);
 285}
 286
 287/*
 288 * Not really a try as we will sleep for mutex, but we need to make
 289 * sure the group pointer is valid under lock and get a reference.
 290 */
 291static struct vfio_group *vfio_group_try_get(struct vfio_group *group)
 292{
 293        struct vfio_group *target = group;
 294
 295        mutex_lock(&vfio.group_lock);
 296        list_for_each_entry(group, &vfio.group_list, vfio_next) {
 297                if (group == target) {
 298                        vfio_group_get(group);
 299                        mutex_unlock(&vfio.group_lock);
 300                        return group;
 301                }
 302        }
 303        mutex_unlock(&vfio.group_lock);
 304
 305        return NULL;
 306}
 307
 308static
 309struct vfio_group *vfio_group_get_from_iommu(struct iommu_group *iommu_group)
 310{
 311        struct vfio_group *group;
 312
 313        mutex_lock(&vfio.group_lock);
 314        list_for_each_entry(group, &vfio.group_list, vfio_next) {
 315                if (group->iommu_group == iommu_group) {
 316                        vfio_group_get(group);
 317                        mutex_unlock(&vfio.group_lock);
 318                        return group;
 319                }
 320        }
 321        mutex_unlock(&vfio.group_lock);
 322
 323        return NULL;
 324}
 325
 326static struct vfio_group *vfio_group_get_from_minor(int minor)
 327{
 328        struct vfio_group *group;
 329
 330        mutex_lock(&vfio.group_lock);
 331        group = idr_find(&vfio.group_idr, minor);
 332        if (!group) {
 333                mutex_unlock(&vfio.group_lock);
 334                return NULL;
 335        }
 336        vfio_group_get(group);
 337        mutex_unlock(&vfio.group_lock);
 338
 339        return group;
 340}
 341
 342/**
 343 * Device objects - create, release, get, put, search
 344 */
 345static
 346struct vfio_device *vfio_group_create_device(struct vfio_group *group,
 347                                             struct device *dev,
 348                                             const struct vfio_device_ops *ops,
 349                                             void *device_data)
 350{
 351        struct vfio_device *device;
 352
 353        device = kzalloc(sizeof(*device), GFP_KERNEL);
 354        if (!device)
 355                return ERR_PTR(-ENOMEM);
 356
 357        kref_init(&device->kref);
 358        device->dev = dev;
 359        device->group = group;
 360        device->ops = ops;
 361        device->device_data = device_data;
 362        dev_set_drvdata(dev, device);
 363
 364        /* No need to get group_lock, caller has group reference */
 365        vfio_group_get(group);
 366
 367        mutex_lock(&group->device_lock);
 368        list_add(&device->group_next, &group->device_list);
 369        mutex_unlock(&group->device_lock);
 370
 371        return device;
 372}
 373
 374static void vfio_device_release(struct kref *kref)
 375{
 376        struct vfio_device *device = container_of(kref,
 377                                                  struct vfio_device, kref);
 378        struct vfio_group *group = device->group;
 379
 380        list_del(&device->group_next);
 381        mutex_unlock(&group->device_lock);
 382
 383        dev_set_drvdata(device->dev, NULL);
 384
 385        kfree(device);
 386
 387        /* vfio_del_group_dev may be waiting for this device */
 388        wake_up(&vfio.release_q);
 389}
 390
 391/* Device reference always implies a group reference */
 392void vfio_device_put(struct vfio_device *device)
 393{
 394        struct vfio_group *group = device->group;
 395        kref_put_mutex(&device->kref, vfio_device_release, &group->device_lock);
 396        vfio_group_put(group);
 397}
 398EXPORT_SYMBOL_GPL(vfio_device_put);
 399
 400static void vfio_device_get(struct vfio_device *device)
 401{
 402        vfio_group_get(device->group);
 403        kref_get(&device->kref);
 404}
 405
 406static struct vfio_device *vfio_group_get_device(struct vfio_group *group,
 407                                                 struct device *dev)
 408{
 409        struct vfio_device *device;
 410
 411        mutex_lock(&group->device_lock);
 412        list_for_each_entry(device, &group->device_list, group_next) {
 413                if (device->dev == dev) {
 414                        vfio_device_get(device);
 415                        mutex_unlock(&group->device_lock);
 416                        return device;
 417                }
 418        }
 419        mutex_unlock(&group->device_lock);
 420        return NULL;
 421}
 422
 423/*
 424 * Whitelist some drivers that we know are safe (no dma) or just sit on
 425 * a device.  It's not always practical to leave a device within a group
 426 * driverless as it could get re-bound to something unsafe.
 427 */
 428static const char * const vfio_driver_whitelist[] = { "pci-stub", "pcieport" };
 429
 430static bool vfio_whitelisted_driver(struct device_driver *drv)
 431{
 432        int i;
 433
 434        for (i = 0; i < ARRAY_SIZE(vfio_driver_whitelist); i++) {
 435                if (!strcmp(drv->name, vfio_driver_whitelist[i]))
 436                        return true;
 437        }
 438
 439        return false;
 440}
 441
 442/*
 443 * A vfio group is viable for use by userspace if all devices are either
 444 * driver-less or bound to a vfio or whitelisted driver.  We test the
 445 * latter by the existence of a struct vfio_device matching the dev.
 446 */
 447static int vfio_dev_viable(struct device *dev, void *data)
 448{
 449        struct vfio_group *group = data;
 450        struct vfio_device *device;
 451        struct device_driver *drv = ACCESS_ONCE(dev->driver);
 452
 453        if (!drv || vfio_whitelisted_driver(drv))
 454                return 0;
 455
 456        device = vfio_group_get_device(group, dev);
 457        if (device) {
 458                vfio_device_put(device);
 459                return 0;
 460        }
 461
 462        return -EINVAL;
 463}
 464
 465/**
 466 * Async device support
 467 */
 468static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
 469{
 470        struct vfio_device *device;
 471
 472        /* Do we already know about it?  We shouldn't */
 473        device = vfio_group_get_device(group, dev);
 474        if (WARN_ON_ONCE(device)) {
 475                vfio_device_put(device);
 476                return 0;
 477        }
 478
 479        /* Nothing to do for idle groups */
 480        if (!atomic_read(&group->container_users))
 481                return 0;
 482
 483        /* TODO Prevent device auto probing */
 484        WARN("Device %s added to live group %d!\n", dev_name(dev),
 485             iommu_group_id(group->iommu_group));
 486
 487        return 0;
 488}
 489
 490static int vfio_group_nb_verify(struct vfio_group *group, struct device *dev)
 491{
 492        /* We don't care what happens when the group isn't in use */
 493        if (!atomic_read(&group->container_users))
 494                return 0;
 495
 496        return vfio_dev_viable(dev, group);
 497}
 498
 499static int vfio_iommu_group_notifier(struct notifier_block *nb,
 500                                     unsigned long action, void *data)
 501{
 502        struct vfio_group *group = container_of(nb, struct vfio_group, nb);
 503        struct device *dev = data;
 504
 505        /*
 506         * Need to go through a group_lock lookup to get a reference or we
 507         * risk racing a group being removed.  Ignore spurious notifies.
 508         */
 509        group = vfio_group_try_get(group);
 510        if (!group)
 511                return NOTIFY_OK;
 512
 513        switch (action) {
 514        case IOMMU_GROUP_NOTIFY_ADD_DEVICE:
 515                vfio_group_nb_add_dev(group, dev);
 516                break;
 517        case IOMMU_GROUP_NOTIFY_DEL_DEVICE:
 518                /*
 519                 * Nothing to do here.  If the device is in use, then the
 520                 * vfio sub-driver should block the remove callback until
 521                 * it is unused.  If the device is unused or attached to a
 522                 * stub driver, then it should be released and we don't
 523                 * care that it will be going away.
 524                 */
 525                break;
 526        case IOMMU_GROUP_NOTIFY_BIND_DRIVER:
 527                pr_debug("%s: Device %s, group %d binding to driver\n",
 528                         __func__, dev_name(dev),
 529                         iommu_group_id(group->iommu_group));
 530                break;
 531        case IOMMU_GROUP_NOTIFY_BOUND_DRIVER:
 532                pr_debug("%s: Device %s, group %d bound to driver %s\n",
 533                         __func__, dev_name(dev),
 534                         iommu_group_id(group->iommu_group), dev->driver->name);
 535                BUG_ON(vfio_group_nb_verify(group, dev));
 536                break;
 537        case IOMMU_GROUP_NOTIFY_UNBIND_DRIVER:
 538                pr_debug("%s: Device %s, group %d unbinding from driver %s\n",
 539                         __func__, dev_name(dev),
 540                         iommu_group_id(group->iommu_group), dev->driver->name);
 541                break;
 542        case IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER:
 543                pr_debug("%s: Device %s, group %d unbound from driver\n",
 544                         __func__, dev_name(dev),
 545                         iommu_group_id(group->iommu_group));
 546                /*
 547                 * XXX An unbound device in a live group is ok, but we'd
 548                 * really like to avoid the above BUG_ON by preventing other
 549                 * drivers from binding to it.  Once that occurs, we have to
 550                 * stop the system to maintain isolation.  At a minimum, we'd
 551                 * want a toggle to disable driver auto probe for this device.
 552                 */
 553                break;
 554        }
 555
 556        vfio_group_put(group);
 557        return NOTIFY_OK;
 558}
 559
 560/**
 561 * VFIO driver API
 562 */
 563int vfio_add_group_dev(struct device *dev,
 564                       const struct vfio_device_ops *ops, void *device_data)
 565{
 566        struct iommu_group *iommu_group;
 567        struct vfio_group *group;
 568        struct vfio_device *device;
 569
 570        iommu_group = iommu_group_get(dev);
 571        if (!iommu_group)
 572                return -EINVAL;
 573
 574        group = vfio_group_get_from_iommu(iommu_group);
 575        if (!group) {
 576                group = vfio_create_group(iommu_group);
 577                if (IS_ERR(group)) {
 578                        iommu_group_put(iommu_group);
 579                        return PTR_ERR(group);
 580                }
 581        }
 582
 583        device = vfio_group_get_device(group, dev);
 584        if (device) {
 585                WARN(1, "Device %s already exists on group %d\n",
 586                     dev_name(dev), iommu_group_id(iommu_group));
 587                vfio_device_put(device);
 588                vfio_group_put(group);
 589                iommu_group_put(iommu_group);
 590                return -EBUSY;
 591        }
 592
 593        device = vfio_group_create_device(group, dev, ops, device_data);
 594        if (IS_ERR(device)) {
 595                vfio_group_put(group);
 596                iommu_group_put(iommu_group);
 597                return PTR_ERR(device);
 598        }
 599
 600        /*
 601         * Added device holds reference to iommu_group and vfio_device
 602         * (which in turn holds reference to vfio_group).  Drop extra
 603         * group reference used while acquiring device.
 604         */
 605        vfio_group_put(group);
 606
 607        return 0;
 608}
 609EXPORT_SYMBOL_GPL(vfio_add_group_dev);
 610
 611/**
 612 * Get a reference to the vfio_device for a device that is known to
 613 * be bound to a vfio driver.  The driver implicitly holds a
 614 * vfio_device reference between vfio_add_group_dev and
 615 * vfio_del_group_dev.  We can therefore use drvdata to increment
 616 * that reference from the struct device.  This additional
 617 * reference must be released by calling vfio_device_put.
 618 */
 619struct vfio_device *vfio_device_get_from_dev(struct device *dev)
 620{
 621        struct vfio_device *device = dev_get_drvdata(dev);
 622
 623        vfio_device_get(device);
 624
 625        return device;
 626}
 627EXPORT_SYMBOL_GPL(vfio_device_get_from_dev);
 628
 629/*
 630 * Caller must hold a reference to the vfio_device
 631 */
 632void *vfio_device_data(struct vfio_device *device)
 633{
 634        return device->device_data;
 635}
 636EXPORT_SYMBOL_GPL(vfio_device_data);
 637
 638/* Given a referenced group, check if it contains the device */
 639static bool vfio_dev_present(struct vfio_group *group, struct device *dev)
 640{
 641        struct vfio_device *device;
 642
 643        device = vfio_group_get_device(group, dev);
 644        if (!device)
 645                return false;
 646
 647        vfio_device_put(device);
 648        return true;
 649}
 650
 651/*
 652 * Decrement the device reference count and wait for the device to be
 653 * removed.  Open file descriptors for the device... */
 654void *vfio_del_group_dev(struct device *dev)
 655{
 656        struct vfio_device *device = dev_get_drvdata(dev);
 657        struct vfio_group *group = device->group;
 658        struct iommu_group *iommu_group = group->iommu_group;
 659        void *device_data = device->device_data;
 660
 661        /*
 662         * The group exists so long as we have a device reference.  Get
 663         * a group reference and use it to scan for the device going away.
 664         */
 665        vfio_group_get(group);
 666
 667        vfio_device_put(device);
 668
 669        /* TODO send a signal to encourage this to be released */
 670        wait_event(vfio.release_q, !vfio_dev_present(group, dev));
 671
 672        vfio_group_put(group);
 673
 674        iommu_group_put(iommu_group);
 675
 676        return device_data;
 677}
 678EXPORT_SYMBOL_GPL(vfio_del_group_dev);
 679
 680/**
 681 * VFIO base fd, /dev/vfio/vfio
 682 */
 683static long vfio_ioctl_check_extension(struct vfio_container *container,
 684                                       unsigned long arg)
 685{
 686        struct vfio_iommu_driver *driver;
 687        long ret = 0;
 688
 689        down_read(&container->group_lock);
 690
 691        driver = container->iommu_driver;
 692
 693        switch (arg) {
 694                /* No base extensions yet */
 695        default:
 696                /*
 697                 * If no driver is set, poll all registered drivers for
 698                 * extensions and return the first positive result.  If
 699                 * a driver is already set, further queries will be passed
 700                 * only to that driver.
 701                 */
 702                if (!driver) {
 703                        mutex_lock(&vfio.iommu_drivers_lock);
 704                        list_for_each_entry(driver, &vfio.iommu_drivers_list,
 705                                            vfio_next) {
 706                                if (!try_module_get(driver->ops->owner))
 707                                        continue;
 708
 709                                ret = driver->ops->ioctl(NULL,
 710                                                         VFIO_CHECK_EXTENSION,
 711                                                         arg);
 712                                module_put(driver->ops->owner);
 713                                if (ret > 0)
 714                                        break;
 715                        }
 716                        mutex_unlock(&vfio.iommu_drivers_lock);
 717                } else
 718                        ret = driver->ops->ioctl(container->iommu_data,
 719                                                 VFIO_CHECK_EXTENSION, arg);
 720        }
 721
 722        up_read(&container->group_lock);
 723
 724        return ret;
 725}
 726
 727/* hold write lock on container->group_lock */
 728static int __vfio_container_attach_groups(struct vfio_container *container,
 729                                          struct vfio_iommu_driver *driver,
 730                                          void *data)
 731{
 732        struct vfio_group *group;
 733        int ret = -ENODEV;
 734
 735        list_for_each_entry(group, &container->group_list, container_next) {
 736                ret = driver->ops->attach_group(data, group->iommu_group);
 737                if (ret)
 738                        goto unwind;
 739        }
 740
 741        return ret;
 742
 743unwind:
 744        list_for_each_entry_continue_reverse(group, &container->group_list,
 745                                             container_next) {
 746                driver->ops->detach_group(data, group->iommu_group);
 747        }
 748
 749        return ret;
 750}
 751
 752static long vfio_ioctl_set_iommu(struct vfio_container *container,
 753                                 unsigned long arg)
 754{
 755        struct vfio_iommu_driver *driver;
 756        long ret = -ENODEV;
 757
 758        down_write(&container->group_lock);
 759
 760        /*
 761         * The container is designed to be an unprivileged interface while
 762         * the group can be assigned to specific users.  Therefore, only by
 763         * adding a group to a container does the user get the privilege of
 764         * enabling the iommu, which may allocate finite resources.  There
 765         * is no unset_iommu, but by removing all the groups from a container,
 766         * the container is deprivileged and returns to an unset state.
 767         */
 768        if (list_empty(&container->group_list) || container->iommu_driver) {
 769                up_write(&container->group_lock);
 770                return -EINVAL;
 771        }
 772
 773        mutex_lock(&vfio.iommu_drivers_lock);
 774        list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) {
 775                void *data;
 776
 777                if (!try_module_get(driver->ops->owner))
 778                        continue;
 779
 780                /*
 781                 * The arg magic for SET_IOMMU is the same as CHECK_EXTENSION,
 782                 * so test which iommu driver reported support for this
 783                 * extension and call open on them.  We also pass them the
 784                 * magic, allowing a single driver to support multiple
 785                 * interfaces if they'd like.
 786                 */
 787                if (driver->ops->ioctl(NULL, VFIO_CHECK_EXTENSION, arg) <= 0) {
 788                        module_put(driver->ops->owner);
 789                        continue;
 790                }
 791
 792                /* module reference holds the driver we're working on */
 793                mutex_unlock(&vfio.iommu_drivers_lock);
 794
 795                data = driver->ops->open(arg);
 796                if (IS_ERR(data)) {
 797                        ret = PTR_ERR(data);
 798                        module_put(driver->ops->owner);
 799                        goto skip_drivers_unlock;
 800                }
 801
 802                ret = __vfio_container_attach_groups(container, driver, data);
 803                if (!ret) {
 804                        container->iommu_driver = driver;
 805                        container->iommu_data = data;
 806                } else {
 807                        driver->ops->release(data);
 808                        module_put(driver->ops->owner);
 809                }
 810
 811                goto skip_drivers_unlock;
 812        }
 813
 814        mutex_unlock(&vfio.iommu_drivers_lock);
 815skip_drivers_unlock:
 816        up_write(&container->group_lock);
 817
 818        return ret;
 819}
 820
 821static long vfio_fops_unl_ioctl(struct file *filep,
 822                                unsigned int cmd, unsigned long arg)
 823{
 824        struct vfio_container *container = filep->private_data;
 825        struct vfio_iommu_driver *driver;
 826        void *data;
 827        long ret = -EINVAL;
 828
 829        if (!container)
 830                return ret;
 831
 832        switch (cmd) {
 833        case VFIO_GET_API_VERSION:
 834                ret = VFIO_API_VERSION;
 835                break;
 836        case VFIO_CHECK_EXTENSION:
 837                ret = vfio_ioctl_check_extension(container, arg);
 838                break;
 839        case VFIO_SET_IOMMU:
 840                ret = vfio_ioctl_set_iommu(container, arg);
 841                break;
 842        default:
 843                down_read(&container->group_lock);
 844
 845                driver = container->iommu_driver;
 846                data = container->iommu_data;
 847
 848                if (driver) /* passthrough all unrecognized ioctls */
 849                        ret = driver->ops->ioctl(data, cmd, arg);
 850
 851                up_read(&container->group_lock);
 852        }
 853
 854        return ret;
 855}
 856
 857#ifdef CONFIG_COMPAT
 858static long vfio_fops_compat_ioctl(struct file *filep,
 859                                   unsigned int cmd, unsigned long arg)
 860{
 861        arg = (unsigned long)compat_ptr(arg);
 862        return vfio_fops_unl_ioctl(filep, cmd, arg);
 863}
 864#endif  /* CONFIG_COMPAT */
 865
 866static int vfio_fops_open(struct inode *inode, struct file *filep)
 867{
 868        struct vfio_container *container;
 869
 870        container = kzalloc(sizeof(*container), GFP_KERNEL);
 871        if (!container)
 872                return -ENOMEM;
 873
 874        INIT_LIST_HEAD(&container->group_list);
 875        init_rwsem(&container->group_lock);
 876        kref_init(&container->kref);
 877
 878        filep->private_data = container;
 879
 880        return 0;
 881}
 882
 883static int vfio_fops_release(struct inode *inode, struct file *filep)
 884{
 885        struct vfio_container *container = filep->private_data;
 886
 887        filep->private_data = NULL;
 888
 889        vfio_container_put(container);
 890
 891        return 0;
 892}
 893
 894/*
 895 * Once an iommu driver is set, we optionally pass read/write/mmap
 896 * on to the driver, allowing management interfaces beyond ioctl.
 897 */
 898static ssize_t vfio_fops_read(struct file *filep, char __user *buf,
 899                              size_t count, loff_t *ppos)
 900{
 901        struct vfio_container *container = filep->private_data;
 902        struct vfio_iommu_driver *driver;
 903        ssize_t ret = -EINVAL;
 904
 905        down_read(&container->group_lock);
 906
 907        driver = container->iommu_driver;
 908        if (likely(driver && driver->ops->read))
 909                ret = driver->ops->read(container->iommu_data,
 910                                        buf, count, ppos);
 911
 912        up_read(&container->group_lock);
 913
 914        return ret;
 915}
 916
 917static ssize_t vfio_fops_write(struct file *filep, const char __user *buf,
 918                               size_t count, loff_t *ppos)
 919{
 920        struct vfio_container *container = filep->private_data;
 921        struct vfio_iommu_driver *driver;
 922        ssize_t ret = -EINVAL;
 923
 924        down_read(&container->group_lock);
 925
 926        driver = container->iommu_driver;
 927        if (likely(driver && driver->ops->write))
 928                ret = driver->ops->write(container->iommu_data,
 929                                         buf, count, ppos);
 930
 931        up_read(&container->group_lock);
 932
 933        return ret;
 934}
 935
 936static int vfio_fops_mmap(struct file *filep, struct vm_area_struct *vma)
 937{
 938        struct vfio_container *container = filep->private_data;
 939        struct vfio_iommu_driver *driver;
 940        int ret = -EINVAL;
 941
 942        down_read(&container->group_lock);
 943
 944        driver = container->iommu_driver;
 945        if (likely(driver && driver->ops->mmap))
 946                ret = driver->ops->mmap(container->iommu_data, vma);
 947
 948        up_read(&container->group_lock);
 949
 950        return ret;
 951}
 952
 953static const struct file_operations vfio_fops = {
 954        .owner          = THIS_MODULE,
 955        .open           = vfio_fops_open,
 956        .release        = vfio_fops_release,
 957        .read           = vfio_fops_read,
 958        .write          = vfio_fops_write,
 959        .unlocked_ioctl = vfio_fops_unl_ioctl,
 960#ifdef CONFIG_COMPAT
 961        .compat_ioctl   = vfio_fops_compat_ioctl,
 962#endif
 963        .mmap           = vfio_fops_mmap,
 964};
 965
 966/**
 967 * VFIO Group fd, /dev/vfio/$GROUP
 968 */
 969static void __vfio_group_unset_container(struct vfio_group *group)
 970{
 971        struct vfio_container *container = group->container;
 972        struct vfio_iommu_driver *driver;
 973
 974        down_write(&container->group_lock);
 975
 976        driver = container->iommu_driver;
 977        if (driver)
 978                driver->ops->detach_group(container->iommu_data,
 979                                          group->iommu_group);
 980
 981        group->container = NULL;
 982        list_del(&group->container_next);
 983
 984        /* Detaching the last group deprivileges a container, remove iommu */
 985        if (driver && list_empty(&container->group_list)) {
 986                driver->ops->release(container->iommu_data);
 987                module_put(driver->ops->owner);
 988                container->iommu_driver = NULL;
 989                container->iommu_data = NULL;
 990        }
 991
 992        up_write(&container->group_lock);
 993
 994        vfio_container_put(container);
 995}
 996
 997/*
 998 * VFIO_GROUP_UNSET_CONTAINER should fail if there are other users or
 999 * if there was no container to unset.  Since the ioctl is called on
1000 * the group, we know that still exists, therefore the only valid
1001 * transition here is 1->0.
1002 */
1003static int vfio_group_unset_container(struct vfio_group *group)
1004{
1005        int users = atomic_cmpxchg(&group->container_users, 1, 0);
1006
1007        if (!users)
1008                return -EINVAL;
1009        if (users != 1)
1010                return -EBUSY;
1011
1012        __vfio_group_unset_container(group);
1013
1014        return 0;
1015}
1016
1017/*
1018 * When removing container users, anything that removes the last user
1019 * implicitly removes the group from the container.  That is, if the
1020 * group file descriptor is closed, as well as any device file descriptors,
1021 * the group is free.
1022 */
1023static void vfio_group_try_dissolve_container(struct vfio_group *group)
1024{
1025        if (0 == atomic_dec_if_positive(&group->container_users))
1026                __vfio_group_unset_container(group);
1027}
1028
1029static int vfio_group_set_container(struct vfio_group *group, int container_fd)
1030{
1031        struct fd f;
1032        struct vfio_container *container;
1033        struct vfio_iommu_driver *driver;
1034        int ret = 0;
1035
1036        if (atomic_read(&group->container_users))
1037                return -EINVAL;
1038
1039        f = fdget(container_fd);
1040        if (!f.file)
1041                return -EBADF;
1042
1043        /* Sanity check, is this really our fd? */
1044        if (f.file->f_op != &vfio_fops) {
1045                fdput(f);
1046                return -EINVAL;
1047        }
1048
1049        container = f.file->private_data;
1050        WARN_ON(!container); /* fget ensures we don't race vfio_release */
1051
1052        down_write(&container->group_lock);
1053
1054        driver = container->iommu_driver;
1055        if (driver) {
1056                ret = driver->ops->attach_group(container->iommu_data,
1057                                                group->iommu_group);
1058                if (ret)
1059                        goto unlock_out;
1060        }
1061
1062        group->container = container;
1063        list_add(&group->container_next, &container->group_list);
1064
1065        /* Get a reference on the container and mark a user within the group */
1066        vfio_container_get(container);
1067        atomic_inc(&group->container_users);
1068
1069unlock_out:
1070        up_write(&container->group_lock);
1071        fdput(f);
1072        return ret;
1073}
1074
1075static bool vfio_group_viable(struct vfio_group *group)
1076{
1077        return (iommu_group_for_each_dev(group->iommu_group,
1078                                         group, vfio_dev_viable) == 0);
1079}
1080
1081static const struct file_operations vfio_device_fops;
1082
1083static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
1084{
1085        struct vfio_device *device;
1086        struct file *filep;
1087        int ret = -ENODEV;
1088
1089        if (0 == atomic_read(&group->container_users) ||
1090            !group->container->iommu_driver || !vfio_group_viable(group))
1091                return -EINVAL;
1092
1093        mutex_lock(&group->device_lock);
1094        list_for_each_entry(device, &group->device_list, group_next) {
1095                if (strcmp(dev_name(device->dev), buf))
1096                        continue;
1097
1098                ret = device->ops->open(device->device_data);
1099                if (ret)
1100                        break;
1101                /*
1102                 * We can't use anon_inode_getfd() because we need to modify
1103                 * the f_mode flags directly to allow more than just ioctls
1104                 */
1105                ret = get_unused_fd_flags(O_CLOEXEC);
1106                if (ret < 0) {
1107                        device->ops->release(device->device_data);
1108                        break;
1109                }
1110
1111                filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops,
1112                                           device, O_RDWR);
1113                if (IS_ERR(filep)) {
1114                        put_unused_fd(ret);
1115                        ret = PTR_ERR(filep);
1116                        device->ops->release(device->device_data);
1117                        break;
1118                }
1119
1120                /*
1121                 * TODO: add an anon_inode interface to do this.
1122                 * Appears to be missing by lack of need rather than
1123                 * explicitly prevented.  Now there's need.
1124                 */
1125                filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
1126
1127                vfio_device_get(device);
1128                atomic_inc(&group->container_users);
1129
1130                fd_install(ret, filep);
1131                break;
1132        }
1133        mutex_unlock(&group->device_lock);
1134
1135        return ret;
1136}
1137
1138static long vfio_group_fops_unl_ioctl(struct file *filep,
1139                                      unsigned int cmd, unsigned long arg)
1140{
1141        struct vfio_group *group = filep->private_data;
1142        long ret = -ENOTTY;
1143
1144        switch (cmd) {
1145        case VFIO_GROUP_GET_STATUS:
1146        {
1147                struct vfio_group_status status;
1148                unsigned long minsz;
1149
1150                minsz = offsetofend(struct vfio_group_status, flags);
1151
1152                if (copy_from_user(&status, (void __user *)arg, minsz))
1153                        return -EFAULT;
1154
1155                if (status.argsz < minsz)
1156                        return -EINVAL;
1157
1158                status.flags = 0;
1159
1160                if (vfio_group_viable(group))
1161                        status.flags |= VFIO_GROUP_FLAGS_VIABLE;
1162
1163                if (group->container)
1164                        status.flags |= VFIO_GROUP_FLAGS_CONTAINER_SET;
1165
1166                if (copy_to_user((void __user *)arg, &status, minsz))
1167                        return -EFAULT;
1168
1169                ret = 0;
1170                break;
1171        }
1172        case VFIO_GROUP_SET_CONTAINER:
1173        {
1174                int fd;
1175
1176                if (get_user(fd, (int __user *)arg))
1177                        return -EFAULT;
1178
1179                if (fd < 0)
1180                        return -EINVAL;
1181
1182                ret = vfio_group_set_container(group, fd);
1183                break;
1184        }
1185        case VFIO_GROUP_UNSET_CONTAINER:
1186                ret = vfio_group_unset_container(group);
1187                break;
1188        case VFIO_GROUP_GET_DEVICE_FD:
1189        {
1190                char *buf;
1191
1192                buf = strndup_user((const char __user *)arg, PAGE_SIZE);
1193                if (IS_ERR(buf))
1194                        return PTR_ERR(buf);
1195
1196                ret = vfio_group_get_device_fd(group, buf);
1197                kfree(buf);
1198                break;
1199        }
1200        }
1201
1202        return ret;
1203}
1204
1205#ifdef CONFIG_COMPAT
1206static long vfio_group_fops_compat_ioctl(struct file *filep,
1207                                         unsigned int cmd, unsigned long arg)
1208{
1209        arg = (unsigned long)compat_ptr(arg);
1210        return vfio_group_fops_unl_ioctl(filep, cmd, arg);
1211}
1212#endif  /* CONFIG_COMPAT */
1213
1214static int vfio_group_fops_open(struct inode *inode, struct file *filep)
1215{
1216        struct vfio_group *group;
1217        int opened;
1218
1219        group = vfio_group_get_from_minor(iminor(inode));
1220        if (!group)
1221                return -ENODEV;
1222
1223        /* Do we need multiple instances of the group open?  Seems not. */
1224        opened = atomic_cmpxchg(&group->opened, 0, 1);
1225        if (opened) {
1226                vfio_group_put(group);
1227                return -EBUSY;
1228        }
1229
1230        /* Is something still in use from a previous open? */
1231        if (group->container) {
1232                atomic_dec(&group->opened);
1233                vfio_group_put(group);
1234                return -EBUSY;
1235        }
1236
1237        filep->private_data = group;
1238
1239        return 0;
1240}
1241
1242static int vfio_group_fops_release(struct inode *inode, struct file *filep)
1243{
1244        struct vfio_group *group = filep->private_data;
1245
1246        filep->private_data = NULL;
1247
1248        vfio_group_try_dissolve_container(group);
1249
1250        atomic_dec(&group->opened);
1251
1252        vfio_group_put(group);
1253
1254        return 0;
1255}
1256
1257static const struct file_operations vfio_group_fops = {
1258        .owner          = THIS_MODULE,
1259        .unlocked_ioctl = vfio_group_fops_unl_ioctl,
1260#ifdef CONFIG_COMPAT
1261        .compat_ioctl   = vfio_group_fops_compat_ioctl,
1262#endif
1263        .open           = vfio_group_fops_open,
1264        .release        = vfio_group_fops_release,
1265};
1266
1267/**
1268 * VFIO Device fd
1269 */
1270static int vfio_device_fops_release(struct inode *inode, struct file *filep)
1271{
1272        struct vfio_device *device = filep->private_data;
1273
1274        device->ops->release(device->device_data);
1275
1276        vfio_group_try_dissolve_container(device->group);
1277
1278        vfio_device_put(device);
1279
1280        return 0;
1281}
1282
1283static long vfio_device_fops_unl_ioctl(struct file *filep,
1284                                       unsigned int cmd, unsigned long arg)
1285{
1286        struct vfio_device *device = filep->private_data;
1287
1288        if (unlikely(!device->ops->ioctl))
1289                return -EINVAL;
1290
1291        return device->ops->ioctl(device->device_data, cmd, arg);
1292}
1293
1294static ssize_t vfio_device_fops_read(struct file *filep, char __user *buf,
1295                                     size_t count, loff_t *ppos)
1296{
1297        struct vfio_device *device = filep->private_data;
1298
1299        if (unlikely(!device->ops->read))
1300                return -EINVAL;
1301
1302        return device->ops->read(device->device_data, buf, count, ppos);
1303}
1304
1305static ssize_t vfio_device_fops_write(struct file *filep,
1306                                      const char __user *buf,
1307                                      size_t count, loff_t *ppos)
1308{
1309        struct vfio_device *device = filep->private_data;
1310
1311        if (unlikely(!device->ops->write))
1312                return -EINVAL;
1313
1314        return device->ops->write(device->device_data, buf, count, ppos);
1315}
1316
1317static int vfio_device_fops_mmap(struct file *filep, struct vm_area_struct *vma)
1318{
1319        struct vfio_device *device = filep->private_data;
1320
1321        if (unlikely(!device->ops->mmap))
1322                return -EINVAL;
1323
1324        return device->ops->mmap(device->device_data, vma);
1325}
1326
1327#ifdef CONFIG_COMPAT
1328static long vfio_device_fops_compat_ioctl(struct file *filep,
1329                                          unsigned int cmd, unsigned long arg)
1330{
1331        arg = (unsigned long)compat_ptr(arg);
1332        return vfio_device_fops_unl_ioctl(filep, cmd, arg);
1333}
1334#endif  /* CONFIG_COMPAT */
1335
1336static const struct file_operations vfio_device_fops = {
1337        .owner          = THIS_MODULE,
1338        .release        = vfio_device_fops_release,
1339        .read           = vfio_device_fops_read,
1340        .write          = vfio_device_fops_write,
1341        .unlocked_ioctl = vfio_device_fops_unl_ioctl,
1342#ifdef CONFIG_COMPAT
1343        .compat_ioctl   = vfio_device_fops_compat_ioctl,
1344#endif
1345        .mmap           = vfio_device_fops_mmap,
1346};
1347
1348/**
1349 * External user API, exported by symbols to be linked dynamically.
1350 *
1351 * The protocol includes:
1352 *  1. do normal VFIO init operation:
1353 *      - opening a new container;
1354 *      - attaching group(s) to it;
1355 *      - setting an IOMMU driver for a container.
1356 * When IOMMU is set for a container, all groups in it are
1357 * considered ready to use by an external user.
1358 *
1359 * 2. User space passes a group fd to an external user.
1360 * The external user calls vfio_group_get_external_user()
1361 * to verify that:
1362 *      - the group is initialized;
1363 *      - IOMMU is set for it.
1364 * If both checks passed, vfio_group_get_external_user()
1365 * increments the container user counter to prevent
1366 * the VFIO group from disposal before KVM exits.
1367 *
1368 * 3. The external user calls vfio_external_user_iommu_id()
1369 * to know an IOMMU ID.
1370 *
1371 * 4. When the external KVM finishes, it calls
1372 * vfio_group_put_external_user() to release the VFIO group.
1373 * This call decrements the container user counter.
1374 */
1375struct vfio_group *vfio_group_get_external_user(struct file *filep)
1376{
1377        struct vfio_group *group = filep->private_data;
1378
1379        if (filep->f_op != &vfio_group_fops)
1380                return ERR_PTR(-EINVAL);
1381
1382        if (!atomic_inc_not_zero(&group->container_users))
1383                return ERR_PTR(-EINVAL);
1384
1385        if (!group->container->iommu_driver ||
1386                        !vfio_group_viable(group)) {
1387                atomic_dec(&group->container_users);
1388                return ERR_PTR(-EINVAL);
1389        }
1390
1391        vfio_group_get(group);
1392
1393        return group;
1394}
1395EXPORT_SYMBOL_GPL(vfio_group_get_external_user);
1396
1397void vfio_group_put_external_user(struct vfio_group *group)
1398{
1399        vfio_group_put(group);
1400        vfio_group_try_dissolve_container(group);
1401}
1402EXPORT_SYMBOL_GPL(vfio_group_put_external_user);
1403
1404int vfio_external_user_iommu_id(struct vfio_group *group)
1405{
1406        return iommu_group_id(group->iommu_group);
1407}
1408EXPORT_SYMBOL_GPL(vfio_external_user_iommu_id);
1409
1410long vfio_external_check_extension(struct vfio_group *group, unsigned long arg)
1411{
1412        return vfio_ioctl_check_extension(group->container, arg);
1413}
1414EXPORT_SYMBOL_GPL(vfio_external_check_extension);
1415
1416/**
1417 * Module/class support
1418 */
1419static char *vfio_devnode(struct device *dev, umode_t *mode)
1420{
1421        return kasprintf(GFP_KERNEL, "vfio/%s", dev_name(dev));
1422}
1423
1424static struct miscdevice vfio_dev = {
1425        .minor = VFIO_MINOR,
1426        .name = "vfio",
1427        .fops = &vfio_fops,
1428        .nodename = "vfio/vfio",
1429        .mode = S_IRUGO | S_IWUGO,
1430};
1431
1432static int __init vfio_init(void)
1433{
1434        int ret;
1435
1436        idr_init(&vfio.group_idr);
1437        mutex_init(&vfio.group_lock);
1438        mutex_init(&vfio.iommu_drivers_lock);
1439        INIT_LIST_HEAD(&vfio.group_list);
1440        INIT_LIST_HEAD(&vfio.iommu_drivers_list);
1441        init_waitqueue_head(&vfio.release_q);
1442
1443        ret = misc_register(&vfio_dev);
1444        if (ret) {
1445                pr_err("vfio: misc device register failed\n");
1446                return ret;
1447        }
1448
1449        /* /dev/vfio/$GROUP */
1450        vfio.class = class_create(THIS_MODULE, "vfio");
1451        if (IS_ERR(vfio.class)) {
1452                ret = PTR_ERR(vfio.class);
1453                goto err_class;
1454        }
1455
1456        vfio.class->devnode = vfio_devnode;
1457
1458        ret = alloc_chrdev_region(&vfio.group_devt, 0, MINORMASK, "vfio");
1459        if (ret)
1460                goto err_alloc_chrdev;
1461
1462        cdev_init(&vfio.group_cdev, &vfio_group_fops);
1463        ret = cdev_add(&vfio.group_cdev, vfio.group_devt, MINORMASK);
1464        if (ret)
1465                goto err_cdev_add;
1466
1467        pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
1468
1469        /*
1470         * Attempt to load known iommu-drivers.  This gives us a working
1471         * environment without the user needing to explicitly load iommu
1472         * drivers.
1473         */
1474        request_module_nowait("vfio_iommu_type1");
1475        request_module_nowait("vfio_iommu_spapr_tce");
1476
1477        return 0;
1478
1479err_cdev_add:
1480        unregister_chrdev_region(vfio.group_devt, MINORMASK);
1481err_alloc_chrdev:
1482        class_destroy(vfio.class);
1483        vfio.class = NULL;
1484err_class:
1485        misc_deregister(&vfio_dev);
1486        return ret;
1487}
1488
1489static void __exit vfio_cleanup(void)
1490{
1491        WARN_ON(!list_empty(&vfio.group_list));
1492
1493        idr_destroy(&vfio.group_idr);
1494        cdev_del(&vfio.group_cdev);
1495        unregister_chrdev_region(vfio.group_devt, MINORMASK);
1496        class_destroy(vfio.class);
1497        vfio.class = NULL;
1498        misc_deregister(&vfio_dev);
1499}
1500
1501module_init(vfio_init);
1502module_exit(vfio_cleanup);
1503
1504MODULE_VERSION(DRIVER_VERSION);
1505MODULE_LICENSE("GPL v2");
1506MODULE_AUTHOR(DRIVER_AUTHOR);
1507MODULE_DESCRIPTION(DRIVER_DESC);
1508MODULE_ALIAS_MISCDEV(VFIO_MINOR);
1509MODULE_ALIAS("devname:vfio/vfio");
1510