qemu/hw/vfio/platform.c
<<
>>
Prefs
   1/*
   2 * vfio based device assignment support - platform devices
   3 *
   4 * Copyright Linaro Limited, 2014
   5 *
   6 * Authors:
   7 *  Kim Phillips <kim.phillips@linaro.org>
   8 *  Eric Auger <eric.auger@linaro.org>
   9 *
  10 * This work is licensed under the terms of the GNU GPL, version 2.  See
  11 * the COPYING file in the top-level directory.
  12 *
  13 * Based on vfio based PCI device assignment support:
  14 *  Copyright Red Hat, Inc. 2012
  15 */
  16
  17#include "qemu/osdep.h"
  18#include "qapi/error.h"
  19#include <sys/ioctl.h>
  20#include <linux/vfio.h>
  21
  22#include "hw/vfio/vfio-platform.h"
  23#include "qemu/error-report.h"
  24#include "qemu/range.h"
  25#include "sysemu/sysemu.h"
  26#include "exec/memory.h"
  27#include "qemu/queue.h"
  28#include "hw/sysbus.h"
  29#include "trace.h"
  30#include "hw/platform-bus.h"
  31#include "sysemu/kvm.h"
  32
  33/*
  34 * Functions used whatever the injection method
  35 */
  36
  37static inline bool vfio_irq_is_automasked(VFIOINTp *intp)
  38{
  39    return intp->flags & VFIO_IRQ_INFO_AUTOMASKED;
  40}
  41
  42/**
  43 * vfio_init_intp - allocate, initialize the IRQ struct pointer
  44 * and add it into the list of IRQs
  45 * @vbasedev: the VFIO device handle
  46 * @info: irq info struct retrieved from VFIO driver
  47 */
  48static VFIOINTp *vfio_init_intp(VFIODevice *vbasedev,
  49                                struct vfio_irq_info info)
  50{
  51    int ret;
  52    VFIOPlatformDevice *vdev =
  53        container_of(vbasedev, VFIOPlatformDevice, vbasedev);
  54    SysBusDevice *sbdev = SYS_BUS_DEVICE(vdev);
  55    VFIOINTp *intp;
  56
  57    intp = g_malloc0(sizeof(*intp));
  58    intp->vdev = vdev;
  59    intp->pin = info.index;
  60    intp->flags = info.flags;
  61    intp->state = VFIO_IRQ_INACTIVE;
  62    intp->kvm_accel = false;
  63
  64    sysbus_init_irq(sbdev, &intp->qemuirq);
  65
  66    /* Get an eventfd for trigger */
  67    intp->interrupt = g_malloc0(sizeof(EventNotifier));
  68    ret = event_notifier_init(intp->interrupt, 0);
  69    if (ret) {
  70        g_free(intp->interrupt);
  71        g_free(intp);
  72        error_report("vfio: Error: trigger event_notifier_init failed ");
  73        return NULL;
  74    }
  75    if (vfio_irq_is_automasked(intp)) {
  76        /* Get an eventfd for resample/unmask */
  77        intp->unmask = g_malloc0(sizeof(EventNotifier));
  78        ret = event_notifier_init(intp->unmask, 0);
  79        if (ret) {
  80            g_free(intp->interrupt);
  81            g_free(intp->unmask);
  82            g_free(intp);
  83            error_report("vfio: Error: resamplefd event_notifier_init failed");
  84            return NULL;
  85        }
  86    }
  87
  88    QLIST_INSERT_HEAD(&vdev->intp_list, intp, next);
  89    return intp;
  90}
  91
  92/**
  93 * vfio_set_trigger_eventfd - set VFIO eventfd handling
  94 *
  95 * @intp: IRQ struct handle
  96 * @handler: handler to be called on eventfd signaling
  97 *
  98 * Setup VFIO signaling and attach an optional user-side handler
  99 * to the eventfd
 100 */
 101static int vfio_set_trigger_eventfd(VFIOINTp *intp,
 102                                    eventfd_user_side_handler_t handler)
 103{
 104    VFIODevice *vbasedev = &intp->vdev->vbasedev;
 105    struct vfio_irq_set *irq_set;
 106    int argsz, ret;
 107    int32_t *pfd;
 108
 109    argsz = sizeof(*irq_set) + sizeof(*pfd);
 110    irq_set = g_malloc0(argsz);
 111    irq_set->argsz = argsz;
 112    irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
 113    irq_set->index = intp->pin;
 114    irq_set->start = 0;
 115    irq_set->count = 1;
 116    pfd = (int32_t *)&irq_set->data;
 117    *pfd = event_notifier_get_fd(intp->interrupt);
 118    qemu_set_fd_handler(*pfd, (IOHandler *)handler, NULL, intp);
 119    ret = ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, irq_set);
 120    g_free(irq_set);
 121    if (ret < 0) {
 122        error_report("vfio: Failed to set trigger eventfd: %m");
 123        qemu_set_fd_handler(*pfd, NULL, NULL, NULL);
 124    }
 125    return ret;
 126}
 127
 128/*
 129 * Functions only used when eventfds are handled on user-side
 130 * ie. without irqfd
 131 */
 132
 133/**
 134 * vfio_mmap_set_enabled - enable/disable the fast path mode
 135 * @vdev: the VFIO platform device
 136 * @enabled: the target mmap state
 137 *
 138 * enabled = true ~ fast path = MMIO region is mmaped (no KVM TRAP);
 139 * enabled = false ~ slow path = MMIO region is trapped and region callbacks
 140 * are called; slow path enables to trap the device IRQ status register reset
 141*/
 142
 143static void vfio_mmap_set_enabled(VFIOPlatformDevice *vdev, bool enabled)
 144{
 145    int i;
 146
 147    for (i = 0; i < vdev->vbasedev.num_regions; i++) {
 148        vfio_region_mmaps_set_enabled(vdev->regions[i], enabled);
 149    }
 150}
 151
 152/**
 153 * vfio_intp_mmap_enable - timer function, restores the fast path
 154 * if there is no more active IRQ
 155 * @opaque: actually points to the VFIO platform device
 156 *
 157 * Called on mmap timer timout, this function checks whether the
 158 * IRQ is still active and if not, restores the fast path.
 159 * by construction a single eventfd is handled at a time.
 160 * if the IRQ is still active, the timer is re-programmed.
 161 */
 162static void vfio_intp_mmap_enable(void *opaque)
 163{
 164    VFIOINTp *tmp;
 165    VFIOPlatformDevice *vdev = (VFIOPlatformDevice *)opaque;
 166
 167    qemu_mutex_lock(&vdev->intp_mutex);
 168    QLIST_FOREACH(tmp, &vdev->intp_list, next) {
 169        if (tmp->state == VFIO_IRQ_ACTIVE) {
 170            trace_vfio_platform_intp_mmap_enable(tmp->pin);
 171            /* re-program the timer to check active status later */
 172            timer_mod(vdev->mmap_timer,
 173                      qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
 174                          vdev->mmap_timeout);
 175            qemu_mutex_unlock(&vdev->intp_mutex);
 176            return;
 177        }
 178    }
 179    vfio_mmap_set_enabled(vdev, true);
 180    qemu_mutex_unlock(&vdev->intp_mutex);
 181}
 182
 183/**
 184 * vfio_intp_inject_pending_lockheld - Injects a pending IRQ
 185 * @opaque: opaque pointer, in practice the VFIOINTp handle
 186 *
 187 * The function is called on a previous IRQ completion, from
 188 * vfio_platform_eoi, while the intp_mutex is locked.
 189 * Also in such situation, the slow path already is set and
 190 * the mmap timer was already programmed.
 191 */
 192static void vfio_intp_inject_pending_lockheld(VFIOINTp *intp)
 193{
 194    trace_vfio_platform_intp_inject_pending_lockheld(intp->pin,
 195                              event_notifier_get_fd(intp->interrupt));
 196
 197    intp->state = VFIO_IRQ_ACTIVE;
 198
 199    /* trigger the virtual IRQ */
 200    qemu_set_irq(intp->qemuirq, 1);
 201}
 202
 203/**
 204 * vfio_intp_interrupt - The user-side eventfd handler
 205 * @opaque: opaque pointer which in practice is the VFIOINTp handle
 206 *
 207 * the function is entered in event handler context:
 208 * the vIRQ is injected into the guest if there is no other active
 209 * or pending IRQ.
 210 */
 211static void vfio_intp_interrupt(VFIOINTp *intp)
 212{
 213    int ret;
 214    VFIOINTp *tmp;
 215    VFIOPlatformDevice *vdev = intp->vdev;
 216    bool delay_handling = false;
 217
 218    qemu_mutex_lock(&vdev->intp_mutex);
 219    if (intp->state == VFIO_IRQ_INACTIVE) {
 220        QLIST_FOREACH(tmp, &vdev->intp_list, next) {
 221            if (tmp->state == VFIO_IRQ_ACTIVE ||
 222                tmp->state == VFIO_IRQ_PENDING) {
 223                delay_handling = true;
 224                break;
 225            }
 226        }
 227    }
 228    if (delay_handling) {
 229        /*
 230         * the new IRQ gets a pending status and is pushed in
 231         * the pending queue
 232         */
 233        intp->state = VFIO_IRQ_PENDING;
 234        trace_vfio_intp_interrupt_set_pending(intp->pin);
 235        QSIMPLEQ_INSERT_TAIL(&vdev->pending_intp_queue,
 236                             intp, pqnext);
 237        ret = event_notifier_test_and_clear(intp->interrupt);
 238        qemu_mutex_unlock(&vdev->intp_mutex);
 239        return;
 240    }
 241
 242    trace_vfio_platform_intp_interrupt(intp->pin,
 243                              event_notifier_get_fd(intp->interrupt));
 244
 245    ret = event_notifier_test_and_clear(intp->interrupt);
 246    if (!ret) {
 247        error_report("Error when clearing fd=%d (ret = %d)",
 248                     event_notifier_get_fd(intp->interrupt), ret);
 249    }
 250
 251    intp->state = VFIO_IRQ_ACTIVE;
 252
 253    /* sets slow path */
 254    vfio_mmap_set_enabled(vdev, false);
 255
 256    /* trigger the virtual IRQ */
 257    qemu_set_irq(intp->qemuirq, 1);
 258
 259    /*
 260     * Schedule the mmap timer which will restore fastpath when no IRQ
 261     * is active anymore
 262     */
 263    if (vdev->mmap_timeout) {
 264        timer_mod(vdev->mmap_timer,
 265                  qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
 266                      vdev->mmap_timeout);
 267    }
 268    qemu_mutex_unlock(&vdev->intp_mutex);
 269}
 270
 271/**
 272 * vfio_platform_eoi - IRQ completion routine
 273 * @vbasedev: the VFIO device handle
 274 *
 275 * De-asserts the active virtual IRQ and unmasks the physical IRQ
 276 * (effective for level sensitive IRQ auto-masked by the  VFIO driver).
 277 * Then it handles next pending IRQ if any.
 278 * eoi function is called on the first access to any MMIO region
 279 * after an IRQ was triggered, trapped since slow path was set.
 280 * It is assumed this access corresponds to the IRQ status
 281 * register reset. With such a mechanism, a single IRQ can be
 282 * handled at a time since there is no way to know which IRQ
 283 * was completed by the guest (we would need additional details
 284 * about the IRQ status register mask).
 285 */
 286static void vfio_platform_eoi(VFIODevice *vbasedev)
 287{
 288    VFIOINTp *intp;
 289    VFIOPlatformDevice *vdev =
 290        container_of(vbasedev, VFIOPlatformDevice, vbasedev);
 291
 292    qemu_mutex_lock(&vdev->intp_mutex);
 293    QLIST_FOREACH(intp, &vdev->intp_list, next) {
 294        if (intp->state == VFIO_IRQ_ACTIVE) {
 295            trace_vfio_platform_eoi(intp->pin,
 296                                event_notifier_get_fd(intp->interrupt));
 297            intp->state = VFIO_IRQ_INACTIVE;
 298
 299            /* deassert the virtual IRQ */
 300            qemu_set_irq(intp->qemuirq, 0);
 301
 302            if (vfio_irq_is_automasked(intp)) {
 303                /* unmasks the physical level-sensitive IRQ */
 304                vfio_unmask_single_irqindex(vbasedev, intp->pin);
 305            }
 306
 307            /* a single IRQ can be active at a time */
 308            break;
 309        }
 310    }
 311    /* in case there are pending IRQs, handle the first one */
 312    if (!QSIMPLEQ_EMPTY(&vdev->pending_intp_queue)) {
 313        intp = QSIMPLEQ_FIRST(&vdev->pending_intp_queue);
 314        vfio_intp_inject_pending_lockheld(intp);
 315        QSIMPLEQ_REMOVE_HEAD(&vdev->pending_intp_queue, pqnext);
 316    }
 317    qemu_mutex_unlock(&vdev->intp_mutex);
 318}
 319
 320/**
 321 * vfio_start_eventfd_injection - starts the virtual IRQ injection using
 322 * user-side handled eventfds
 323 * @sbdev: the sysbus device handle
 324 * @irq: the qemu irq handle
 325 */
 326
 327static void vfio_start_eventfd_injection(SysBusDevice *sbdev, qemu_irq irq)
 328{
 329    int ret;
 330    VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(sbdev);
 331    VFIOINTp *intp;
 332
 333    QLIST_FOREACH(intp, &vdev->intp_list, next) {
 334        if (intp->qemuirq == irq) {
 335            break;
 336        }
 337    }
 338    assert(intp);
 339
 340    ret = vfio_set_trigger_eventfd(intp, vfio_intp_interrupt);
 341    if (ret) {
 342        error_report("vfio: failed to start eventfd signaling for IRQ %d: %m",
 343                     intp->pin);
 344        abort();
 345    }
 346}
 347
 348/*
 349 * Functions used for irqfd
 350 */
 351
 352/**
 353 * vfio_set_resample_eventfd - sets the resamplefd for an IRQ
 354 * @intp: the IRQ struct handle
 355 * programs the VFIO driver to unmask this IRQ when the
 356 * intp->unmask eventfd is triggered
 357 */
 358static int vfio_set_resample_eventfd(VFIOINTp *intp)
 359{
 360    VFIODevice *vbasedev = &intp->vdev->vbasedev;
 361    struct vfio_irq_set *irq_set;
 362    int argsz, ret;
 363    int32_t *pfd;
 364
 365    argsz = sizeof(*irq_set) + sizeof(*pfd);
 366    irq_set = g_malloc0(argsz);
 367    irq_set->argsz = argsz;
 368    irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_UNMASK;
 369    irq_set->index = intp->pin;
 370    irq_set->start = 0;
 371    irq_set->count = 1;
 372    pfd = (int32_t *)&irq_set->data;
 373    *pfd = event_notifier_get_fd(intp->unmask);
 374    qemu_set_fd_handler(*pfd, NULL, NULL, NULL);
 375    ret = ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, irq_set);
 376    g_free(irq_set);
 377    if (ret < 0) {
 378        error_report("vfio: Failed to set resample eventfd: %m");
 379    }
 380    return ret;
 381}
 382
 383/**
 384 * vfio_start_irqfd_injection - starts the virtual IRQ injection using
 385 * irqfd
 386 *
 387 * @sbdev: the sysbus device handle
 388 * @irq: the qemu irq handle
 389 *
 390 * In case the irqfd setup fails, we fallback to userspace handled eventfd
 391 */
 392static void vfio_start_irqfd_injection(SysBusDevice *sbdev, qemu_irq irq)
 393{
 394    VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(sbdev);
 395    VFIOINTp *intp;
 396
 397    if (!kvm_irqfds_enabled() || !kvm_resamplefds_enabled() ||
 398        !vdev->irqfd_allowed) {
 399        goto fail_irqfd;
 400    }
 401
 402    QLIST_FOREACH(intp, &vdev->intp_list, next) {
 403        if (intp->qemuirq == irq) {
 404            break;
 405        }
 406    }
 407    assert(intp);
 408
 409    if (kvm_irqchip_add_irqfd_notifier(kvm_state, intp->interrupt,
 410                                   intp->unmask, irq) < 0) {
 411        goto fail_irqfd;
 412    }
 413
 414    if (vfio_set_trigger_eventfd(intp, NULL) < 0) {
 415        goto fail_vfio;
 416    }
 417    if (vfio_irq_is_automasked(intp)) {
 418        if (vfio_set_resample_eventfd(intp) < 0) {
 419            goto fail_vfio;
 420        }
 421        trace_vfio_platform_start_level_irqfd_injection(intp->pin,
 422                                    event_notifier_get_fd(intp->interrupt),
 423                                    event_notifier_get_fd(intp->unmask));
 424    } else {
 425        trace_vfio_platform_start_edge_irqfd_injection(intp->pin,
 426                                    event_notifier_get_fd(intp->interrupt));
 427    }
 428
 429    intp->kvm_accel = true;
 430
 431    return;
 432fail_vfio:
 433    kvm_irqchip_remove_irqfd_notifier(kvm_state, intp->interrupt, irq);
 434    error_report("vfio: failed to start eventfd signaling for IRQ %d: %m",
 435                 intp->pin);
 436    abort();
 437fail_irqfd:
 438    vfio_start_eventfd_injection(sbdev, irq);
 439    return;
 440}
 441
 442/* VFIO skeleton */
 443
 444static void vfio_platform_compute_needs_reset(VFIODevice *vbasedev)
 445{
 446    vbasedev->needs_reset = true;
 447}
 448
 449/* not implemented yet */
 450static int vfio_platform_hot_reset_multi(VFIODevice *vbasedev)
 451{
 452    return -1;
 453}
 454
 455/**
 456 * vfio_populate_device - Allocate and populate MMIO region
 457 * and IRQ structs according to driver returned information
 458 * @vbasedev: the VFIO device handle
 459 *
 460 */
 461static int vfio_populate_device(VFIODevice *vbasedev)
 462{
 463    VFIOINTp *intp, *tmp;
 464    int i, ret = -1;
 465    VFIOPlatformDevice *vdev =
 466        container_of(vbasedev, VFIOPlatformDevice, vbasedev);
 467
 468    if (!(vbasedev->flags & VFIO_DEVICE_FLAGS_PLATFORM)) {
 469        error_report("vfio: Um, this isn't a platform device");
 470        return ret;
 471    }
 472
 473    vdev->regions = g_new0(VFIORegion *, vbasedev->num_regions);
 474
 475    for (i = 0; i < vbasedev->num_regions; i++) {
 476        char *name = g_strdup_printf("VFIO %s region %d\n", vbasedev->name, i);
 477
 478        vdev->regions[i] = g_new0(VFIORegion, 1);
 479        ret = vfio_region_setup(OBJECT(vdev), vbasedev,
 480                                vdev->regions[i], i, name);
 481        g_free(name);
 482        if (ret) {
 483            error_report("vfio: Error getting region %d info: %m", i);
 484            goto reg_error;
 485        }
 486    }
 487
 488    vdev->mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
 489                                    vfio_intp_mmap_enable, vdev);
 490
 491    QSIMPLEQ_INIT(&vdev->pending_intp_queue);
 492
 493    for (i = 0; i < vbasedev->num_irqs; i++) {
 494        struct vfio_irq_info irq = { .argsz = sizeof(irq) };
 495
 496        irq.index = i;
 497        ret = ioctl(vbasedev->fd, VFIO_DEVICE_GET_IRQ_INFO, &irq);
 498        if (ret) {
 499            error_printf("vfio: error getting device %s irq info",
 500                         vbasedev->name);
 501            goto irq_err;
 502        } else {
 503            trace_vfio_platform_populate_interrupts(irq.index,
 504                                                    irq.count,
 505                                                    irq.flags);
 506            intp = vfio_init_intp(vbasedev, irq);
 507            if (!intp) {
 508                error_report("vfio: Error installing IRQ %d up", i);
 509                goto irq_err;
 510            }
 511        }
 512    }
 513    return 0;
 514irq_err:
 515    timer_del(vdev->mmap_timer);
 516    QLIST_FOREACH_SAFE(intp, &vdev->intp_list, next, tmp) {
 517        QLIST_REMOVE(intp, next);
 518        g_free(intp);
 519    }
 520reg_error:
 521    for (i = 0; i < vbasedev->num_regions; i++) {
 522        if (vdev->regions[i]) {
 523            vfio_region_finalize(vdev->regions[i]);
 524        }
 525        g_free(vdev->regions[i]);
 526    }
 527    g_free(vdev->regions);
 528    return ret;
 529}
 530
 531/* specialized functions for VFIO Platform devices */
 532static VFIODeviceOps vfio_platform_ops = {
 533    .vfio_compute_needs_reset = vfio_platform_compute_needs_reset,
 534    .vfio_hot_reset_multi = vfio_platform_hot_reset_multi,
 535    .vfio_eoi = vfio_platform_eoi,
 536};
 537
 538/**
 539 * vfio_base_device_init - perform preliminary VFIO setup
 540 * @vbasedev: the VFIO device handle
 541 *
 542 * Implement the VFIO command sequence that allows to discover
 543 * assigned device resources: group extraction, device
 544 * fd retrieval, resource query.
 545 * Precondition: the device name must be initialized
 546 */
 547static int vfio_base_device_init(VFIODevice *vbasedev)
 548{
 549    VFIOGroup *group;
 550    VFIODevice *vbasedev_iter;
 551    char *tmp, group_path[PATH_MAX], *group_name;
 552    ssize_t len;
 553    struct stat st;
 554    int groupid;
 555    int ret;
 556
 557    /* @sysfsdev takes precedence over @host */
 558    if (vbasedev->sysfsdev) {
 559        g_free(vbasedev->name);
 560        vbasedev->name = g_strdup(basename(vbasedev->sysfsdev));
 561    } else {
 562        if (!vbasedev->name || strchr(vbasedev->name, '/')) {
 563            return -EINVAL;
 564        }
 565
 566        vbasedev->sysfsdev = g_strdup_printf("/sys/bus/platform/devices/%s",
 567                                             vbasedev->name);
 568    }
 569
 570    if (stat(vbasedev->sysfsdev, &st) < 0) {
 571        error_report("vfio: error: no such host device: %s",
 572                     vbasedev->sysfsdev);
 573        return -errno;
 574    }
 575
 576    tmp = g_strdup_printf("%s/iommu_group", vbasedev->sysfsdev);
 577    len = readlink(tmp, group_path, sizeof(group_path));
 578    g_free(tmp);
 579
 580    if (len < 0 || len >= sizeof(group_path)) {
 581        error_report("vfio: error no iommu_group for device");
 582        return len < 0 ? -errno : -ENAMETOOLONG;
 583    }
 584
 585    group_path[len] = 0;
 586
 587    group_name = basename(group_path);
 588    if (sscanf(group_name, "%d", &groupid) != 1) {
 589        error_report("vfio: error reading %s: %m", group_path);
 590        return -errno;
 591    }
 592
 593    trace_vfio_platform_base_device_init(vbasedev->name, groupid);
 594
 595    group = vfio_get_group(groupid, &address_space_memory);
 596    if (!group) {
 597        error_report("vfio: failed to get group %d", groupid);
 598        return -ENOENT;
 599    }
 600
 601    QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
 602        if (strcmp(vbasedev_iter->name, vbasedev->name) == 0) {
 603            error_report("vfio: error: device %s is already attached",
 604                         vbasedev->name);
 605            vfio_put_group(group);
 606            return -EBUSY;
 607        }
 608    }
 609    ret = vfio_get_device(group, vbasedev->name, vbasedev);
 610    if (ret) {
 611        error_report("vfio: failed to get device %s", vbasedev->name);
 612        vfio_put_group(group);
 613        return ret;
 614    }
 615
 616    ret = vfio_populate_device(vbasedev);
 617    if (ret) {
 618        error_report("vfio: failed to populate device %s", vbasedev->name);
 619        vfio_put_group(group);
 620    }
 621
 622    return ret;
 623}
 624
 625/**
 626 * vfio_platform_realize  - the device realize function
 627 * @dev: device state pointer
 628 * @errp: error
 629 *
 630 * initialize the device, its memory regions and IRQ structures
 631 * IRQ are started separately
 632 */
 633static void vfio_platform_realize(DeviceState *dev, Error **errp)
 634{
 635    VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(dev);
 636    SysBusDevice *sbdev = SYS_BUS_DEVICE(dev);
 637    VFIODevice *vbasedev = &vdev->vbasedev;
 638    int i, ret;
 639
 640    vbasedev->type = VFIO_DEVICE_TYPE_PLATFORM;
 641    vbasedev->ops = &vfio_platform_ops;
 642
 643    trace_vfio_platform_realize(vbasedev->sysfsdev ?
 644                                vbasedev->sysfsdev : vbasedev->name,
 645                                vdev->compat);
 646
 647    ret = vfio_base_device_init(vbasedev);
 648    if (ret) {
 649        error_setg(errp, "vfio: vfio_base_device_init failed for %s",
 650                   vbasedev->name);
 651        return;
 652    }
 653
 654    for (i = 0; i < vbasedev->num_regions; i++) {
 655        if (vfio_region_mmap(vdev->regions[i])) {
 656            error_report("%s mmap unsupported. Performance may be slow",
 657                         memory_region_name(vdev->regions[i]->mem));
 658        }
 659        sysbus_init_mmio(sbdev, vdev->regions[i]->mem);
 660    }
 661}
 662
 663static const VMStateDescription vfio_platform_vmstate = {
 664    .name = TYPE_VFIO_PLATFORM,
 665    .unmigratable = 1,
 666};
 667
 668static Property vfio_platform_dev_properties[] = {
 669    DEFINE_PROP_STRING("host", VFIOPlatformDevice, vbasedev.name),
 670    DEFINE_PROP_STRING("sysfsdev", VFIOPlatformDevice, vbasedev.sysfsdev),
 671    DEFINE_PROP_BOOL("x-no-mmap", VFIOPlatformDevice, vbasedev.no_mmap, false),
 672    DEFINE_PROP_UINT32("mmap-timeout-ms", VFIOPlatformDevice,
 673                       mmap_timeout, 1100),
 674    DEFINE_PROP_BOOL("x-irqfd", VFIOPlatformDevice, irqfd_allowed, true),
 675    DEFINE_PROP_END_OF_LIST(),
 676};
 677
 678static void vfio_platform_class_init(ObjectClass *klass, void *data)
 679{
 680    DeviceClass *dc = DEVICE_CLASS(klass);
 681    SysBusDeviceClass *sbc = SYS_BUS_DEVICE_CLASS(klass);
 682
 683    dc->realize = vfio_platform_realize;
 684    dc->props = vfio_platform_dev_properties;
 685    dc->vmsd = &vfio_platform_vmstate;
 686    dc->desc = "VFIO-based platform device assignment";
 687    sbc->connect_irq_notifier = vfio_start_irqfd_injection;
 688    set_bit(DEVICE_CATEGORY_MISC, dc->categories);
 689}
 690
 691static const TypeInfo vfio_platform_dev_info = {
 692    .name = TYPE_VFIO_PLATFORM,
 693    .parent = TYPE_SYS_BUS_DEVICE,
 694    .instance_size = sizeof(VFIOPlatformDevice),
 695    .class_init = vfio_platform_class_init,
 696    .class_size = sizeof(VFIOPlatformDeviceClass),
 697    .abstract   = true,
 698};
 699
 700static void register_vfio_platform_dev_type(void)
 701{
 702    type_register_static(&vfio_platform_dev_info);
 703}
 704
 705type_init(register_vfio_platform_dev_type)
 706