qemu/hw/vfio/pci.c
<<
>>
Prefs
   1/*
   2 * vfio based device assignment support
   3 *
   4 * Copyright Red Hat, Inc. 2012
   5 *
   6 * Authors:
   7 *  Alex Williamson <alex.williamson@redhat.com>
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2.  See
  10 * the COPYING file in the top-level directory.
  11 *
  12 * Based on qemu-kvm device-assignment:
  13 *  Adapted for KVM by Qumranet.
  14 *  Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com)
  15 *  Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com)
  16 *  Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com)
  17 *  Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com)
  18 *  Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com)
  19 */
  20
  21#include "qemu/osdep.h"
  22#include <linux/vfio.h>
  23#include <sys/ioctl.h>
  24
  25#include "hw/pci/msi.h"
  26#include "hw/pci/msix.h"
  27#include "hw/pci/pci_bridge.h"
  28#include "qemu/error-report.h"
  29#include "qemu/option.h"
  30#include "qemu/range.h"
  31#include "sysemu/kvm.h"
  32#include "sysemu/sysemu.h"
  33#include "pci.h"
  34#include "trace.h"
  35#include "qapi/error.h"
  36
  37#define MSIX_CAP_LENGTH 12
  38
  39static void vfio_disable_interrupts(VFIOPCIDevice *vdev);
  40static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled);
  41
  42/*
  43 * Disabling BAR mmaping can be slow, but toggling it around INTx can
  44 * also be a huge overhead.  We try to get the best of both worlds by
  45 * waiting until an interrupt to disable mmaps (subsequent transitions
  46 * to the same state are effectively no overhead).  If the interrupt has
  47 * been serviced and the time gap is long enough, we re-enable mmaps for
  48 * performance.  This works well for things like graphics cards, which
  49 * may not use their interrupt at all and are penalized to an unusable
  50 * level by read/write BAR traps.  Other devices, like NICs, have more
  51 * regular interrupts and see much better latency by staying in non-mmap
  52 * mode.  We therefore set the default mmap_timeout such that a ping
  53 * is just enough to keep the mmap disabled.  Users can experiment with
  54 * other options with the x-intx-mmap-timeout-ms parameter (a value of
  55 * zero disables the timer).
  56 */
  57static void vfio_intx_mmap_enable(void *opaque)
  58{
  59    VFIOPCIDevice *vdev = opaque;
  60
  61    if (vdev->intx.pending) {
  62        timer_mod(vdev->intx.mmap_timer,
  63                       qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout);
  64        return;
  65    }
  66
  67    vfio_mmap_set_enabled(vdev, true);
  68}
  69
  70static void vfio_intx_interrupt(void *opaque)
  71{
  72    VFIOPCIDevice *vdev = opaque;
  73
  74    if (!event_notifier_test_and_clear(&vdev->intx.interrupt)) {
  75        return;
  76    }
  77
  78    trace_vfio_intx_interrupt(vdev->vbasedev.name, 'A' + vdev->intx.pin);
  79
  80    vdev->intx.pending = true;
  81    pci_irq_assert(&vdev->pdev);
  82    vfio_mmap_set_enabled(vdev, false);
  83    if (vdev->intx.mmap_timeout) {
  84        timer_mod(vdev->intx.mmap_timer,
  85                       qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + vdev->intx.mmap_timeout);
  86    }
  87}
  88
  89static void vfio_intx_eoi(VFIODevice *vbasedev)
  90{
  91    VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
  92
  93    if (!vdev->intx.pending) {
  94        return;
  95    }
  96
  97    trace_vfio_intx_eoi(vbasedev->name);
  98
  99    vdev->intx.pending = false;
 100    pci_irq_deassert(&vdev->pdev);
 101    vfio_unmask_single_irqindex(vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
 102}
 103
 104static void vfio_intx_enable_kvm(VFIOPCIDevice *vdev, Error **errp)
 105{
 106#ifdef CONFIG_KVM
 107    struct kvm_irqfd irqfd = {
 108        .fd = event_notifier_get_fd(&vdev->intx.interrupt),
 109        .gsi = vdev->intx.route.irq,
 110        .flags = KVM_IRQFD_FLAG_RESAMPLE,
 111    };
 112    struct vfio_irq_set *irq_set;
 113    int ret, argsz;
 114    int32_t *pfd;
 115
 116    if (vdev->no_kvm_intx || !kvm_irqfds_enabled() ||
 117        vdev->intx.route.mode != PCI_INTX_ENABLED ||
 118        !kvm_resamplefds_enabled()) {
 119        return;
 120    }
 121
 122    /* Get to a known interrupt state */
 123    qemu_set_fd_handler(irqfd.fd, NULL, NULL, vdev);
 124    vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
 125    vdev->intx.pending = false;
 126    pci_irq_deassert(&vdev->pdev);
 127
 128    /* Get an eventfd for resample/unmask */
 129    if (event_notifier_init(&vdev->intx.unmask, 0)) {
 130        error_setg(errp, "event_notifier_init failed eoi");
 131        goto fail;
 132    }
 133
 134    /* KVM triggers it, VFIO listens for it */
 135    irqfd.resamplefd = event_notifier_get_fd(&vdev->intx.unmask);
 136
 137    if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
 138        error_setg_errno(errp, errno, "failed to setup resample irqfd");
 139        goto fail_irqfd;
 140    }
 141
 142    argsz = sizeof(*irq_set) + sizeof(*pfd);
 143
 144    irq_set = g_malloc0(argsz);
 145    irq_set->argsz = argsz;
 146    irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_UNMASK;
 147    irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
 148    irq_set->start = 0;
 149    irq_set->count = 1;
 150    pfd = (int32_t *)&irq_set->data;
 151
 152    *pfd = irqfd.resamplefd;
 153
 154    ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
 155    g_free(irq_set);
 156    if (ret) {
 157        error_setg_errno(errp, -ret, "failed to setup INTx unmask fd");
 158        goto fail_vfio;
 159    }
 160
 161    /* Let'em rip */
 162    vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
 163
 164    vdev->intx.kvm_accel = true;
 165
 166    trace_vfio_intx_enable_kvm(vdev->vbasedev.name);
 167
 168    return;
 169
 170fail_vfio:
 171    irqfd.flags = KVM_IRQFD_FLAG_DEASSIGN;
 172    kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd);
 173fail_irqfd:
 174    event_notifier_cleanup(&vdev->intx.unmask);
 175fail:
 176    qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);
 177    vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
 178#endif
 179}
 180
 181static void vfio_intx_disable_kvm(VFIOPCIDevice *vdev)
 182{
 183#ifdef CONFIG_KVM
 184    struct kvm_irqfd irqfd = {
 185        .fd = event_notifier_get_fd(&vdev->intx.interrupt),
 186        .gsi = vdev->intx.route.irq,
 187        .flags = KVM_IRQFD_FLAG_DEASSIGN,
 188    };
 189
 190    if (!vdev->intx.kvm_accel) {
 191        return;
 192    }
 193
 194    /*
 195     * Get to a known state, hardware masked, QEMU ready to accept new
 196     * interrupts, QEMU IRQ de-asserted.
 197     */
 198    vfio_mask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
 199    vdev->intx.pending = false;
 200    pci_irq_deassert(&vdev->pdev);
 201
 202    /* Tell KVM to stop listening for an INTx irqfd */
 203    if (kvm_vm_ioctl(kvm_state, KVM_IRQFD, &irqfd)) {
 204        error_report("vfio: Error: Failed to disable INTx irqfd: %m");
 205    }
 206
 207    /* We only need to close the eventfd for VFIO to cleanup the kernel side */
 208    event_notifier_cleanup(&vdev->intx.unmask);
 209
 210    /* QEMU starts listening for interrupt events. */
 211    qemu_set_fd_handler(irqfd.fd, vfio_intx_interrupt, NULL, vdev);
 212
 213    vdev->intx.kvm_accel = false;
 214
 215    /* If we've missed an event, let it re-fire through QEMU */
 216    vfio_unmask_single_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
 217
 218    trace_vfio_intx_disable_kvm(vdev->vbasedev.name);
 219#endif
 220}
 221
 222static void vfio_intx_update(PCIDevice *pdev)
 223{
 224    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
 225    PCIINTxRoute route;
 226    Error *err = NULL;
 227
 228    if (vdev->interrupt != VFIO_INT_INTx) {
 229        return;
 230    }
 231
 232    route = pci_device_route_intx_to_irq(&vdev->pdev, vdev->intx.pin);
 233
 234    if (!pci_intx_route_changed(&vdev->intx.route, &route)) {
 235        return; /* Nothing changed */
 236    }
 237
 238    trace_vfio_intx_update(vdev->vbasedev.name,
 239                           vdev->intx.route.irq, route.irq);
 240
 241    vfio_intx_disable_kvm(vdev);
 242
 243    vdev->intx.route = route;
 244
 245    if (route.mode != PCI_INTX_ENABLED) {
 246        return;
 247    }
 248
 249    vfio_intx_enable_kvm(vdev, &err);
 250    if (err) {
 251        error_reportf_err(err, WARN_PREFIX, vdev->vbasedev.name);
 252    }
 253
 254    /* Re-enable the interrupt in cased we missed an EOI */
 255    vfio_intx_eoi(&vdev->vbasedev);
 256}
 257
 258static int vfio_intx_enable(VFIOPCIDevice *vdev, Error **errp)
 259{
 260    uint8_t pin = vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1);
 261    int ret, argsz, retval = 0;
 262    struct vfio_irq_set *irq_set;
 263    int32_t *pfd;
 264    Error *err = NULL;
 265
 266    if (!pin) {
 267        return 0;
 268    }
 269
 270    vfio_disable_interrupts(vdev);
 271
 272    vdev->intx.pin = pin - 1; /* Pin A (1) -> irq[0] */
 273    pci_config_set_interrupt_pin(vdev->pdev.config, pin);
 274
 275#ifdef CONFIG_KVM
 276    /*
 277     * Only conditional to avoid generating error messages on platforms
 278     * where we won't actually use the result anyway.
 279     */
 280    if (kvm_irqfds_enabled() && kvm_resamplefds_enabled()) {
 281        vdev->intx.route = pci_device_route_intx_to_irq(&vdev->pdev,
 282                                                        vdev->intx.pin);
 283    }
 284#endif
 285
 286    ret = event_notifier_init(&vdev->intx.interrupt, 0);
 287    if (ret) {
 288        error_setg_errno(errp, -ret, "event_notifier_init failed");
 289        return ret;
 290    }
 291
 292    argsz = sizeof(*irq_set) + sizeof(*pfd);
 293
 294    irq_set = g_malloc0(argsz);
 295    irq_set->argsz = argsz;
 296    irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
 297    irq_set->index = VFIO_PCI_INTX_IRQ_INDEX;
 298    irq_set->start = 0;
 299    irq_set->count = 1;
 300    pfd = (int32_t *)&irq_set->data;
 301
 302    *pfd = event_notifier_get_fd(&vdev->intx.interrupt);
 303    qemu_set_fd_handler(*pfd, vfio_intx_interrupt, NULL, vdev);
 304
 305    ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
 306    if (ret) {
 307        error_setg_errno(errp, -ret, "failed to setup INTx fd");
 308        qemu_set_fd_handler(*pfd, NULL, NULL, vdev);
 309        event_notifier_cleanup(&vdev->intx.interrupt);
 310        retval = -errno;
 311        goto cleanup;
 312    }
 313
 314    vfio_intx_enable_kvm(vdev, &err);
 315    if (err) {
 316        error_reportf_err(err, WARN_PREFIX, vdev->vbasedev.name);
 317    }
 318
 319    vdev->interrupt = VFIO_INT_INTx;
 320
 321    trace_vfio_intx_enable(vdev->vbasedev.name);
 322
 323cleanup:
 324    g_free(irq_set);
 325
 326    return retval;
 327}
 328
 329static void vfio_intx_disable(VFIOPCIDevice *vdev)
 330{
 331    int fd;
 332
 333    timer_del(vdev->intx.mmap_timer);
 334    vfio_intx_disable_kvm(vdev);
 335    vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX);
 336    vdev->intx.pending = false;
 337    pci_irq_deassert(&vdev->pdev);
 338    vfio_mmap_set_enabled(vdev, true);
 339
 340    fd = event_notifier_get_fd(&vdev->intx.interrupt);
 341    qemu_set_fd_handler(fd, NULL, NULL, vdev);
 342    event_notifier_cleanup(&vdev->intx.interrupt);
 343
 344    vdev->interrupt = VFIO_INT_NONE;
 345
 346    trace_vfio_intx_disable(vdev->vbasedev.name);
 347}
 348
 349/*
 350 * MSI/X
 351 */
 352static void vfio_msi_interrupt(void *opaque)
 353{
 354    VFIOMSIVector *vector = opaque;
 355    VFIOPCIDevice *vdev = vector->vdev;
 356    MSIMessage (*get_msg)(PCIDevice *dev, unsigned vector);
 357    void (*notify)(PCIDevice *dev, unsigned vector);
 358    MSIMessage msg;
 359    int nr = vector - vdev->msi_vectors;
 360
 361    if (!event_notifier_test_and_clear(&vector->interrupt)) {
 362        return;
 363    }
 364
 365    if (vdev->interrupt == VFIO_INT_MSIX) {
 366        get_msg = msix_get_message;
 367        notify = msix_notify;
 368
 369        /* A masked vector firing needs to use the PBA, enable it */
 370        if (msix_is_masked(&vdev->pdev, nr)) {
 371            set_bit(nr, vdev->msix->pending);
 372            memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, true);
 373            trace_vfio_msix_pba_enable(vdev->vbasedev.name);
 374        }
 375    } else if (vdev->interrupt == VFIO_INT_MSI) {
 376        get_msg = msi_get_message;
 377        notify = msi_notify;
 378    } else {
 379        abort();
 380    }
 381
 382    msg = get_msg(&vdev->pdev, nr);
 383    trace_vfio_msi_interrupt(vdev->vbasedev.name, nr, msg.address, msg.data);
 384    notify(&vdev->pdev, nr);
 385}
 386
 387static int vfio_enable_vectors(VFIOPCIDevice *vdev, bool msix)
 388{
 389    struct vfio_irq_set *irq_set;
 390    int ret = 0, i, argsz;
 391    int32_t *fds;
 392
 393    argsz = sizeof(*irq_set) + (vdev->nr_vectors * sizeof(*fds));
 394
 395    irq_set = g_malloc0(argsz);
 396    irq_set->argsz = argsz;
 397    irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
 398    irq_set->index = msix ? VFIO_PCI_MSIX_IRQ_INDEX : VFIO_PCI_MSI_IRQ_INDEX;
 399    irq_set->start = 0;
 400    irq_set->count = vdev->nr_vectors;
 401    fds = (int32_t *)&irq_set->data;
 402
 403    for (i = 0; i < vdev->nr_vectors; i++) {
 404        int fd = -1;
 405
 406        /*
 407         * MSI vs MSI-X - The guest has direct access to MSI mask and pending
 408         * bits, therefore we always use the KVM signaling path when setup.
 409         * MSI-X mask and pending bits are emulated, so we want to use the
 410         * KVM signaling path only when configured and unmasked.
 411         */
 412        if (vdev->msi_vectors[i].use) {
 413            if (vdev->msi_vectors[i].virq < 0 ||
 414                (msix && msix_is_masked(&vdev->pdev, i))) {
 415                fd = event_notifier_get_fd(&vdev->msi_vectors[i].interrupt);
 416            } else {
 417                fd = event_notifier_get_fd(&vdev->msi_vectors[i].kvm_interrupt);
 418            }
 419        }
 420
 421        fds[i] = fd;
 422    }
 423
 424    ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
 425
 426    g_free(irq_set);
 427
 428    return ret;
 429}
 430
 431static void vfio_add_kvm_msi_virq(VFIOPCIDevice *vdev, VFIOMSIVector *vector,
 432                                  int vector_n, bool msix)
 433{
 434    int virq;
 435
 436    if ((msix && vdev->no_kvm_msix) || (!msix && vdev->no_kvm_msi)) {
 437        return;
 438    }
 439
 440    if (event_notifier_init(&vector->kvm_interrupt, 0)) {
 441        return;
 442    }
 443
 444    virq = kvm_irqchip_add_msi_route(kvm_state, vector_n, &vdev->pdev);
 445    if (virq < 0) {
 446        event_notifier_cleanup(&vector->kvm_interrupt);
 447        return;
 448    }
 449
 450    if (kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt,
 451                                       NULL, virq) < 0) {
 452        kvm_irqchip_release_virq(kvm_state, virq);
 453        event_notifier_cleanup(&vector->kvm_interrupt);
 454        return;
 455    }
 456
 457    vector->virq = virq;
 458}
 459
 460static void vfio_remove_kvm_msi_virq(VFIOMSIVector *vector)
 461{
 462    kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt,
 463                                          vector->virq);
 464    kvm_irqchip_release_virq(kvm_state, vector->virq);
 465    vector->virq = -1;
 466    event_notifier_cleanup(&vector->kvm_interrupt);
 467}
 468
 469static void vfio_update_kvm_msi_virq(VFIOMSIVector *vector, MSIMessage msg,
 470                                     PCIDevice *pdev)
 471{
 472    kvm_irqchip_update_msi_route(kvm_state, vector->virq, msg, pdev);
 473    kvm_irqchip_commit_routes(kvm_state);
 474}
 475
 476static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr,
 477                                   MSIMessage *msg, IOHandler *handler)
 478{
 479    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
 480    VFIOMSIVector *vector;
 481    int ret;
 482
 483    trace_vfio_msix_vector_do_use(vdev->vbasedev.name, nr);
 484
 485    vector = &vdev->msi_vectors[nr];
 486
 487    if (!vector->use) {
 488        vector->vdev = vdev;
 489        vector->virq = -1;
 490        if (event_notifier_init(&vector->interrupt, 0)) {
 491            error_report("vfio: Error: event_notifier_init failed");
 492        }
 493        vector->use = true;
 494        msix_vector_use(pdev, nr);
 495    }
 496
 497    qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
 498                        handler, NULL, vector);
 499
 500    /*
 501     * Attempt to enable route through KVM irqchip,
 502     * default to userspace handling if unavailable.
 503     */
 504    if (vector->virq >= 0) {
 505        if (!msg) {
 506            vfio_remove_kvm_msi_virq(vector);
 507        } else {
 508            vfio_update_kvm_msi_virq(vector, *msg, pdev);
 509        }
 510    } else {
 511        if (msg) {
 512            vfio_add_kvm_msi_virq(vdev, vector, nr, true);
 513        }
 514    }
 515
 516    /*
 517     * We don't want to have the host allocate all possible MSI vectors
 518     * for a device if they're not in use, so we shutdown and incrementally
 519     * increase them as needed.
 520     */
 521    if (vdev->nr_vectors < nr + 1) {
 522        vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX);
 523        vdev->nr_vectors = nr + 1;
 524        ret = vfio_enable_vectors(vdev, true);
 525        if (ret) {
 526            error_report("vfio: failed to enable vectors, %d", ret);
 527        }
 528    } else {
 529        int argsz;
 530        struct vfio_irq_set *irq_set;
 531        int32_t *pfd;
 532
 533        argsz = sizeof(*irq_set) + sizeof(*pfd);
 534
 535        irq_set = g_malloc0(argsz);
 536        irq_set->argsz = argsz;
 537        irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
 538                         VFIO_IRQ_SET_ACTION_TRIGGER;
 539        irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
 540        irq_set->start = nr;
 541        irq_set->count = 1;
 542        pfd = (int32_t *)&irq_set->data;
 543
 544        if (vector->virq >= 0) {
 545            *pfd = event_notifier_get_fd(&vector->kvm_interrupt);
 546        } else {
 547            *pfd = event_notifier_get_fd(&vector->interrupt);
 548        }
 549
 550        ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
 551        g_free(irq_set);
 552        if (ret) {
 553            error_report("vfio: failed to modify vector, %d", ret);
 554        }
 555    }
 556
 557    /* Disable PBA emulation when nothing more is pending. */
 558    clear_bit(nr, vdev->msix->pending);
 559    if (find_first_bit(vdev->msix->pending,
 560                       vdev->nr_vectors) == vdev->nr_vectors) {
 561        memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, false);
 562        trace_vfio_msix_pba_disable(vdev->vbasedev.name);
 563    }
 564
 565    return 0;
 566}
 567
 568static int vfio_msix_vector_use(PCIDevice *pdev,
 569                                unsigned int nr, MSIMessage msg)
 570{
 571    return vfio_msix_vector_do_use(pdev, nr, &msg, vfio_msi_interrupt);
 572}
 573
 574static void vfio_msix_vector_release(PCIDevice *pdev, unsigned int nr)
 575{
 576    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
 577    VFIOMSIVector *vector = &vdev->msi_vectors[nr];
 578
 579    trace_vfio_msix_vector_release(vdev->vbasedev.name, nr);
 580
 581    /*
 582     * There are still old guests that mask and unmask vectors on every
 583     * interrupt.  If we're using QEMU bypass with a KVM irqfd, leave all of
 584     * the KVM setup in place, simply switch VFIO to use the non-bypass
 585     * eventfd.  We'll then fire the interrupt through QEMU and the MSI-X
 586     * core will mask the interrupt and set pending bits, allowing it to
 587     * be re-asserted on unmask.  Nothing to do if already using QEMU mode.
 588     */
 589    if (vector->virq >= 0) {
 590        int argsz;
 591        struct vfio_irq_set *irq_set;
 592        int32_t *pfd;
 593
 594        argsz = sizeof(*irq_set) + sizeof(*pfd);
 595
 596        irq_set = g_malloc0(argsz);
 597        irq_set->argsz = argsz;
 598        irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
 599                         VFIO_IRQ_SET_ACTION_TRIGGER;
 600        irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
 601        irq_set->start = nr;
 602        irq_set->count = 1;
 603        pfd = (int32_t *)&irq_set->data;
 604
 605        *pfd = event_notifier_get_fd(&vector->interrupt);
 606
 607        ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
 608
 609        g_free(irq_set);
 610    }
 611}
 612
 613static void vfio_msix_enable(VFIOPCIDevice *vdev)
 614{
 615    vfio_disable_interrupts(vdev);
 616
 617    vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->msix->entries);
 618
 619    vdev->interrupt = VFIO_INT_MSIX;
 620
 621    /*
 622     * Some communication channels between VF & PF or PF & fw rely on the
 623     * physical state of the device and expect that enabling MSI-X from the
 624     * guest enables the same on the host.  When our guest is Linux, the
 625     * guest driver call to pci_enable_msix() sets the enabling bit in the
 626     * MSI-X capability, but leaves the vector table masked.  We therefore
 627     * can't rely on a vector_use callback (from request_irq() in the guest)
 628     * to switch the physical device into MSI-X mode because that may come a
 629     * long time after pci_enable_msix().  This code enables vector 0 with
 630     * triggering to userspace, then immediately release the vector, leaving
 631     * the physical device with no vectors enabled, but MSI-X enabled, just
 632     * like the guest view.
 633     */
 634    vfio_msix_vector_do_use(&vdev->pdev, 0, NULL, NULL);
 635    vfio_msix_vector_release(&vdev->pdev, 0);
 636
 637    if (msix_set_vector_notifiers(&vdev->pdev, vfio_msix_vector_use,
 638                                  vfio_msix_vector_release, NULL)) {
 639        error_report("vfio: msix_set_vector_notifiers failed");
 640    }
 641
 642    trace_vfio_msix_enable(vdev->vbasedev.name);
 643}
 644
 645static void vfio_msi_enable(VFIOPCIDevice *vdev)
 646{
 647    int ret, i;
 648
 649    vfio_disable_interrupts(vdev);
 650
 651    vdev->nr_vectors = msi_nr_vectors_allocated(&vdev->pdev);
 652retry:
 653    vdev->msi_vectors = g_new0(VFIOMSIVector, vdev->nr_vectors);
 654
 655    for (i = 0; i < vdev->nr_vectors; i++) {
 656        VFIOMSIVector *vector = &vdev->msi_vectors[i];
 657
 658        vector->vdev = vdev;
 659        vector->virq = -1;
 660        vector->use = true;
 661
 662        if (event_notifier_init(&vector->interrupt, 0)) {
 663            error_report("vfio: Error: event_notifier_init failed");
 664        }
 665
 666        qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
 667                            vfio_msi_interrupt, NULL, vector);
 668
 669        /*
 670         * Attempt to enable route through KVM irqchip,
 671         * default to userspace handling if unavailable.
 672         */
 673        vfio_add_kvm_msi_virq(vdev, vector, i, false);
 674    }
 675
 676    /* Set interrupt type prior to possible interrupts */
 677    vdev->interrupt = VFIO_INT_MSI;
 678
 679    ret = vfio_enable_vectors(vdev, false);
 680    if (ret) {
 681        if (ret < 0) {
 682            error_report("vfio: Error: Failed to setup MSI fds: %m");
 683        } else if (ret != vdev->nr_vectors) {
 684            error_report("vfio: Error: Failed to enable %d "
 685                         "MSI vectors, retry with %d", vdev->nr_vectors, ret);
 686        }
 687
 688        for (i = 0; i < vdev->nr_vectors; i++) {
 689            VFIOMSIVector *vector = &vdev->msi_vectors[i];
 690            if (vector->virq >= 0) {
 691                vfio_remove_kvm_msi_virq(vector);
 692            }
 693            qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
 694                                NULL, NULL, NULL);
 695            event_notifier_cleanup(&vector->interrupt);
 696        }
 697
 698        g_free(vdev->msi_vectors);
 699
 700        if (ret > 0 && ret != vdev->nr_vectors) {
 701            vdev->nr_vectors = ret;
 702            goto retry;
 703        }
 704        vdev->nr_vectors = 0;
 705
 706        /*
 707         * Failing to setup MSI doesn't really fall within any specification.
 708         * Let's try leaving interrupts disabled and hope the guest figures
 709         * out to fall back to INTx for this device.
 710         */
 711        error_report("vfio: Error: Failed to enable MSI");
 712        vdev->interrupt = VFIO_INT_NONE;
 713
 714        return;
 715    }
 716
 717    trace_vfio_msi_enable(vdev->vbasedev.name, vdev->nr_vectors);
 718}
 719
 720static void vfio_msi_disable_common(VFIOPCIDevice *vdev)
 721{
 722    Error *err = NULL;
 723    int i;
 724
 725    for (i = 0; i < vdev->nr_vectors; i++) {
 726        VFIOMSIVector *vector = &vdev->msi_vectors[i];
 727        if (vdev->msi_vectors[i].use) {
 728            if (vector->virq >= 0) {
 729                vfio_remove_kvm_msi_virq(vector);
 730            }
 731            qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
 732                                NULL, NULL, NULL);
 733            event_notifier_cleanup(&vector->interrupt);
 734        }
 735    }
 736
 737    g_free(vdev->msi_vectors);
 738    vdev->msi_vectors = NULL;
 739    vdev->nr_vectors = 0;
 740    vdev->interrupt = VFIO_INT_NONE;
 741
 742    vfio_intx_enable(vdev, &err);
 743    if (err) {
 744        error_reportf_err(err, ERR_PREFIX, vdev->vbasedev.name);
 745    }
 746}
 747
 748static void vfio_msix_disable(VFIOPCIDevice *vdev)
 749{
 750    int i;
 751
 752    msix_unset_vector_notifiers(&vdev->pdev);
 753
 754    /*
 755     * MSI-X will only release vectors if MSI-X is still enabled on the
 756     * device, check through the rest and release it ourselves if necessary.
 757     */
 758    for (i = 0; i < vdev->nr_vectors; i++) {
 759        if (vdev->msi_vectors[i].use) {
 760            vfio_msix_vector_release(&vdev->pdev, i);
 761            msix_vector_unuse(&vdev->pdev, i);
 762        }
 763    }
 764
 765    if (vdev->nr_vectors) {
 766        vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSIX_IRQ_INDEX);
 767    }
 768
 769    vfio_msi_disable_common(vdev);
 770
 771    memset(vdev->msix->pending, 0,
 772           BITS_TO_LONGS(vdev->msix->entries) * sizeof(unsigned long));
 773
 774    trace_vfio_msix_disable(vdev->vbasedev.name);
 775}
 776
 777static void vfio_msi_disable(VFIOPCIDevice *vdev)
 778{
 779    vfio_disable_irqindex(&vdev->vbasedev, VFIO_PCI_MSI_IRQ_INDEX);
 780    vfio_msi_disable_common(vdev);
 781
 782    trace_vfio_msi_disable(vdev->vbasedev.name);
 783}
 784
 785static void vfio_update_msi(VFIOPCIDevice *vdev)
 786{
 787    int i;
 788
 789    for (i = 0; i < vdev->nr_vectors; i++) {
 790        VFIOMSIVector *vector = &vdev->msi_vectors[i];
 791        MSIMessage msg;
 792
 793        if (!vector->use || vector->virq < 0) {
 794            continue;
 795        }
 796
 797        msg = msi_get_message(&vdev->pdev, i);
 798        vfio_update_kvm_msi_virq(vector, msg, &vdev->pdev);
 799    }
 800}
 801
 802static void vfio_pci_load_rom(VFIOPCIDevice *vdev)
 803{
 804    struct vfio_region_info *reg_info;
 805    uint64_t size;
 806    off_t off = 0;
 807    ssize_t bytes;
 808
 809    if (vfio_get_region_info(&vdev->vbasedev,
 810                             VFIO_PCI_ROM_REGION_INDEX, &reg_info)) {
 811        error_report("vfio: Error getting ROM info: %m");
 812        return;
 813    }
 814
 815    trace_vfio_pci_load_rom(vdev->vbasedev.name, (unsigned long)reg_info->size,
 816                            (unsigned long)reg_info->offset,
 817                            (unsigned long)reg_info->flags);
 818
 819    vdev->rom_size = size = reg_info->size;
 820    vdev->rom_offset = reg_info->offset;
 821
 822    g_free(reg_info);
 823
 824    if (!vdev->rom_size) {
 825        vdev->rom_read_failed = true;
 826        error_report("vfio-pci: Cannot read device rom at "
 827                    "%s", vdev->vbasedev.name);
 828        error_printf("Device option ROM contents are probably invalid "
 829                    "(check dmesg).\nSkip option ROM probe with rombar=0, "
 830                    "or load from file with romfile=\n");
 831        return;
 832    }
 833
 834    vdev->rom = g_malloc(size);
 835    memset(vdev->rom, 0xff, size);
 836
 837    while (size) {
 838        bytes = pread(vdev->vbasedev.fd, vdev->rom + off,
 839                      size, vdev->rom_offset + off);
 840        if (bytes == 0) {
 841            break;
 842        } else if (bytes > 0) {
 843            off += bytes;
 844            size -= bytes;
 845        } else {
 846            if (errno == EINTR || errno == EAGAIN) {
 847                continue;
 848            }
 849            error_report("vfio: Error reading device ROM: %m");
 850            break;
 851        }
 852    }
 853
 854    /*
 855     * Test the ROM signature against our device, if the vendor is correct
 856     * but the device ID doesn't match, store the correct device ID and
 857     * recompute the checksum.  Intel IGD devices need this and are known
 858     * to have bogus checksums so we can't simply adjust the checksum.
 859     */
 860    if (pci_get_word(vdev->rom) == 0xaa55 &&
 861        pci_get_word(vdev->rom + 0x18) + 8 < vdev->rom_size &&
 862        !memcmp(vdev->rom + pci_get_word(vdev->rom + 0x18), "PCIR", 4)) {
 863        uint16_t vid, did;
 864
 865        vid = pci_get_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 4);
 866        did = pci_get_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 6);
 867
 868        if (vid == vdev->vendor_id && did != vdev->device_id) {
 869            int i;
 870            uint8_t csum, *data = vdev->rom;
 871
 872            pci_set_word(vdev->rom + pci_get_word(vdev->rom + 0x18) + 6,
 873                         vdev->device_id);
 874            data[6] = 0;
 875
 876            for (csum = 0, i = 0; i < vdev->rom_size; i++) {
 877                csum += data[i];
 878            }
 879
 880            data[6] = -csum;
 881        }
 882    }
 883}
 884
 885static uint64_t vfio_rom_read(void *opaque, hwaddr addr, unsigned size)
 886{
 887    VFIOPCIDevice *vdev = opaque;
 888    union {
 889        uint8_t byte;
 890        uint16_t word;
 891        uint32_t dword;
 892        uint64_t qword;
 893    } val;
 894    uint64_t data = 0;
 895
 896    /* Load the ROM lazily when the guest tries to read it */
 897    if (unlikely(!vdev->rom && !vdev->rom_read_failed)) {
 898        vfio_pci_load_rom(vdev);
 899    }
 900
 901    memcpy(&val, vdev->rom + addr,
 902           (addr < vdev->rom_size) ? MIN(size, vdev->rom_size - addr) : 0);
 903
 904    switch (size) {
 905    case 1:
 906        data = val.byte;
 907        break;
 908    case 2:
 909        data = le16_to_cpu(val.word);
 910        break;
 911    case 4:
 912        data = le32_to_cpu(val.dword);
 913        break;
 914    default:
 915        hw_error("vfio: unsupported read size, %d bytes\n", size);
 916        break;
 917    }
 918
 919    trace_vfio_rom_read(vdev->vbasedev.name, addr, size, data);
 920
 921    return data;
 922}
 923
 924static void vfio_rom_write(void *opaque, hwaddr addr,
 925                           uint64_t data, unsigned size)
 926{
 927}
 928
 929static const MemoryRegionOps vfio_rom_ops = {
 930    .read = vfio_rom_read,
 931    .write = vfio_rom_write,
 932    .endianness = DEVICE_LITTLE_ENDIAN,
 933};
 934
 935static void vfio_pci_size_rom(VFIOPCIDevice *vdev)
 936{
 937    uint32_t orig, size = cpu_to_le32((uint32_t)PCI_ROM_ADDRESS_MASK);
 938    off_t offset = vdev->config_offset + PCI_ROM_ADDRESS;
 939    DeviceState *dev = DEVICE(vdev);
 940    char *name;
 941    int fd = vdev->vbasedev.fd;
 942
 943    if (vdev->pdev.romfile || !vdev->pdev.rom_bar) {
 944        /* Since pci handles romfile, just print a message and return */
 945        if (vfio_blacklist_opt_rom(vdev) && vdev->pdev.romfile) {
 946            error_printf("Warning : Device at %s is known to cause system instability issues during option rom execution. Proceeding anyway since user specified romfile\n",
 947                         vdev->vbasedev.name);
 948        }
 949        return;
 950    }
 951
 952    /*
 953     * Use the same size ROM BAR as the physical device.  The contents
 954     * will get filled in later when the guest tries to read it.
 955     */
 956    if (pread(fd, &orig, 4, offset) != 4 ||
 957        pwrite(fd, &size, 4, offset) != 4 ||
 958        pread(fd, &size, 4, offset) != 4 ||
 959        pwrite(fd, &orig, 4, offset) != 4) {
 960        error_report("%s(%s) failed: %m", __func__, vdev->vbasedev.name);
 961        return;
 962    }
 963
 964    size = ~(le32_to_cpu(size) & PCI_ROM_ADDRESS_MASK) + 1;
 965
 966    if (!size) {
 967        return;
 968    }
 969
 970    if (vfio_blacklist_opt_rom(vdev)) {
 971        if (dev->opts && qemu_opt_get(dev->opts, "rombar")) {
 972            error_printf("Warning : Device at %s is known to cause system instability issues during option rom execution. Proceeding anyway since user specified non zero value for rombar\n",
 973                         vdev->vbasedev.name);
 974        } else {
 975            error_printf("Warning : Rom loading for device at %s has been disabled due to system instability issues. Specify rombar=1 or romfile to force\n",
 976                         vdev->vbasedev.name);
 977            return;
 978        }
 979    }
 980
 981    trace_vfio_pci_size_rom(vdev->vbasedev.name, size);
 982
 983    name = g_strdup_printf("vfio[%s].rom", vdev->vbasedev.name);
 984
 985    memory_region_init_io(&vdev->pdev.rom, OBJECT(vdev),
 986                          &vfio_rom_ops, vdev, name, size);
 987    g_free(name);
 988
 989    pci_register_bar(&vdev->pdev, PCI_ROM_SLOT,
 990                     PCI_BASE_ADDRESS_SPACE_MEMORY, &vdev->pdev.rom);
 991
 992    vdev->pdev.has_rom = true;
 993    vdev->rom_read_failed = false;
 994}
 995
 996void vfio_vga_write(void *opaque, hwaddr addr,
 997                           uint64_t data, unsigned size)
 998{
 999    VFIOVGARegion *region = opaque;
1000    VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]);
1001    union {
1002        uint8_t byte;
1003        uint16_t word;
1004        uint32_t dword;
1005        uint64_t qword;
1006    } buf;
1007    off_t offset = vga->fd_offset + region->offset + addr;
1008
1009    switch (size) {
1010    case 1:
1011        buf.byte = data;
1012        break;
1013    case 2:
1014        buf.word = cpu_to_le16(data);
1015        break;
1016    case 4:
1017        buf.dword = cpu_to_le32(data);
1018        break;
1019    default:
1020        hw_error("vfio: unsupported write size, %d bytes", size);
1021        break;
1022    }
1023
1024    if (pwrite(vga->fd, &buf, size, offset) != size) {
1025        error_report("%s(,0x%"HWADDR_PRIx", 0x%"PRIx64", %d) failed: %m",
1026                     __func__, region->offset + addr, data, size);
1027    }
1028
1029    trace_vfio_vga_write(region->offset + addr, data, size);
1030}
1031
1032uint64_t vfio_vga_read(void *opaque, hwaddr addr, unsigned size)
1033{
1034    VFIOVGARegion *region = opaque;
1035    VFIOVGA *vga = container_of(region, VFIOVGA, region[region->nr]);
1036    union {
1037        uint8_t byte;
1038        uint16_t word;
1039        uint32_t dword;
1040        uint64_t qword;
1041    } buf;
1042    uint64_t data = 0;
1043    off_t offset = vga->fd_offset + region->offset + addr;
1044
1045    if (pread(vga->fd, &buf, size, offset) != size) {
1046        error_report("%s(,0x%"HWADDR_PRIx", %d) failed: %m",
1047                     __func__, region->offset + addr, size);
1048        return (uint64_t)-1;
1049    }
1050
1051    switch (size) {
1052    case 1:
1053        data = buf.byte;
1054        break;
1055    case 2:
1056        data = le16_to_cpu(buf.word);
1057        break;
1058    case 4:
1059        data = le32_to_cpu(buf.dword);
1060        break;
1061    default:
1062        hw_error("vfio: unsupported read size, %d bytes", size);
1063        break;
1064    }
1065
1066    trace_vfio_vga_read(region->offset + addr, size, data);
1067
1068    return data;
1069}
1070
1071static const MemoryRegionOps vfio_vga_ops = {
1072    .read = vfio_vga_read,
1073    .write = vfio_vga_write,
1074    .endianness = DEVICE_LITTLE_ENDIAN,
1075};
1076
1077/*
1078 * Expand memory region of sub-page(size < PAGE_SIZE) MMIO BAR to page
1079 * size if the BAR is in an exclusive page in host so that we could map
1080 * this BAR to guest. But this sub-page BAR may not occupy an exclusive
1081 * page in guest. So we should set the priority of the expanded memory
1082 * region to zero in case of overlap with BARs which share the same page
1083 * with the sub-page BAR in guest. Besides, we should also recover the
1084 * size of this sub-page BAR when its base address is changed in guest
1085 * and not page aligned any more.
1086 */
1087static void vfio_sub_page_bar_update_mapping(PCIDevice *pdev, int bar)
1088{
1089    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
1090    VFIORegion *region = &vdev->bars[bar].region;
1091    MemoryRegion *mmap_mr, *region_mr, *base_mr;
1092    PCIIORegion *r;
1093    pcibus_t bar_addr;
1094    uint64_t size = region->size;
1095
1096    /* Make sure that the whole region is allowed to be mmapped */
1097    if (region->nr_mmaps != 1 || !region->mmaps[0].mmap ||
1098        region->mmaps[0].size != region->size) {
1099        return;
1100    }
1101
1102    r = &pdev->io_regions[bar];
1103    bar_addr = r->addr;
1104    base_mr = vdev->bars[bar].mr;
1105    region_mr = region->mem;
1106    mmap_mr = &region->mmaps[0].mem;
1107
1108    /* If BAR is mapped and page aligned, update to fill PAGE_SIZE */
1109    if (bar_addr != PCI_BAR_UNMAPPED &&
1110        !(bar_addr & ~qemu_real_host_page_mask)) {
1111        size = qemu_real_host_page_size;
1112    }
1113
1114    memory_region_transaction_begin();
1115
1116    if (vdev->bars[bar].size < size) {
1117        memory_region_set_size(base_mr, size);
1118    }
1119    memory_region_set_size(region_mr, size);
1120    memory_region_set_size(mmap_mr, size);
1121    if (size != vdev->bars[bar].size && memory_region_is_mapped(base_mr)) {
1122        memory_region_del_subregion(r->address_space, base_mr);
1123        memory_region_add_subregion_overlap(r->address_space,
1124                                            bar_addr, base_mr, 0);
1125    }
1126
1127    memory_region_transaction_commit();
1128}
1129
1130/*
1131 * PCI config space
1132 */
1133uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len)
1134{
1135    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
1136    uint32_t emu_bits = 0, emu_val = 0, phys_val = 0, val;
1137
1138    memcpy(&emu_bits, vdev->emulated_config_bits + addr, len);
1139    emu_bits = le32_to_cpu(emu_bits);
1140
1141    if (emu_bits) {
1142        emu_val = pci_default_read_config(pdev, addr, len);
1143    }
1144
1145    if (~emu_bits & (0xffffffffU >> (32 - len * 8))) {
1146        ssize_t ret;
1147
1148        ret = pread(vdev->vbasedev.fd, &phys_val, len,
1149                    vdev->config_offset + addr);
1150        if (ret != len) {
1151            error_report("%s(%s, 0x%x, 0x%x) failed: %m",
1152                         __func__, vdev->vbasedev.name, addr, len);
1153            return -errno;
1154        }
1155        phys_val = le32_to_cpu(phys_val);
1156    }
1157
1158    val = (emu_val & emu_bits) | (phys_val & ~emu_bits);
1159
1160    trace_vfio_pci_read_config(vdev->vbasedev.name, addr, len, val);
1161
1162    return val;
1163}
1164
1165void vfio_pci_write_config(PCIDevice *pdev,
1166                           uint32_t addr, uint32_t val, int len)
1167{
1168    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
1169    uint32_t val_le = cpu_to_le32(val);
1170
1171    trace_vfio_pci_write_config(vdev->vbasedev.name, addr, val, len);
1172
1173    /* Write everything to VFIO, let it filter out what we can't write */
1174    if (pwrite(vdev->vbasedev.fd, &val_le, len, vdev->config_offset + addr)
1175                != len) {
1176        error_report("%s(%s, 0x%x, 0x%x, 0x%x) failed: %m",
1177                     __func__, vdev->vbasedev.name, addr, val, len);
1178    }
1179
1180    /* MSI/MSI-X Enabling/Disabling */
1181    if (pdev->cap_present & QEMU_PCI_CAP_MSI &&
1182        ranges_overlap(addr, len, pdev->msi_cap, vdev->msi_cap_size)) {
1183        int is_enabled, was_enabled = msi_enabled(pdev);
1184
1185        pci_default_write_config(pdev, addr, val, len);
1186
1187        is_enabled = msi_enabled(pdev);
1188
1189        if (!was_enabled) {
1190            if (is_enabled) {
1191                vfio_msi_enable(vdev);
1192            }
1193        } else {
1194            if (!is_enabled) {
1195                vfio_msi_disable(vdev);
1196            } else {
1197                vfio_update_msi(vdev);
1198            }
1199        }
1200    } else if (pdev->cap_present & QEMU_PCI_CAP_MSIX &&
1201        ranges_overlap(addr, len, pdev->msix_cap, MSIX_CAP_LENGTH)) {
1202        int is_enabled, was_enabled = msix_enabled(pdev);
1203
1204        pci_default_write_config(pdev, addr, val, len);
1205
1206        is_enabled = msix_enabled(pdev);
1207
1208        if (!was_enabled && is_enabled) {
1209            vfio_msix_enable(vdev);
1210        } else if (was_enabled && !is_enabled) {
1211            vfio_msix_disable(vdev);
1212        }
1213    } else if (ranges_overlap(addr, len, PCI_BASE_ADDRESS_0, 24) ||
1214        range_covers_byte(addr, len, PCI_COMMAND)) {
1215        pcibus_t old_addr[PCI_NUM_REGIONS - 1];
1216        int bar;
1217
1218        for (bar = 0; bar < PCI_ROM_SLOT; bar++) {
1219            old_addr[bar] = pdev->io_regions[bar].addr;
1220        }
1221
1222        pci_default_write_config(pdev, addr, val, len);
1223
1224        for (bar = 0; bar < PCI_ROM_SLOT; bar++) {
1225            if (old_addr[bar] != pdev->io_regions[bar].addr &&
1226                vdev->bars[bar].region.size > 0 &&
1227                vdev->bars[bar].region.size < qemu_real_host_page_size) {
1228                vfio_sub_page_bar_update_mapping(pdev, bar);
1229            }
1230        }
1231    } else {
1232        /* Write everything to QEMU to keep emulated bits correct */
1233        pci_default_write_config(pdev, addr, val, len);
1234    }
1235}
1236
1237/*
1238 * Interrupt setup
1239 */
1240static void vfio_disable_interrupts(VFIOPCIDevice *vdev)
1241{
1242    /*
1243     * More complicated than it looks.  Disabling MSI/X transitions the
1244     * device to INTx mode (if supported).  Therefore we need to first
1245     * disable MSI/X and then cleanup by disabling INTx.
1246     */
1247    if (vdev->interrupt == VFIO_INT_MSIX) {
1248        vfio_msix_disable(vdev);
1249    } else if (vdev->interrupt == VFIO_INT_MSI) {
1250        vfio_msi_disable(vdev);
1251    }
1252
1253    if (vdev->interrupt == VFIO_INT_INTx) {
1254        vfio_intx_disable(vdev);
1255    }
1256}
1257
1258static int vfio_msi_setup(VFIOPCIDevice *vdev, int pos, Error **errp)
1259{
1260    uint16_t ctrl;
1261    bool msi_64bit, msi_maskbit;
1262    int ret, entries;
1263    Error *err = NULL;
1264
1265    if (pread(vdev->vbasedev.fd, &ctrl, sizeof(ctrl),
1266              vdev->config_offset + pos + PCI_CAP_FLAGS) != sizeof(ctrl)) {
1267        error_setg_errno(errp, errno, "failed reading MSI PCI_CAP_FLAGS");
1268        return -errno;
1269    }
1270    ctrl = le16_to_cpu(ctrl);
1271
1272    msi_64bit = !!(ctrl & PCI_MSI_FLAGS_64BIT);
1273    msi_maskbit = !!(ctrl & PCI_MSI_FLAGS_MASKBIT);
1274    entries = 1 << ((ctrl & PCI_MSI_FLAGS_QMASK) >> 1);
1275
1276    trace_vfio_msi_setup(vdev->vbasedev.name, pos);
1277
1278    ret = msi_init(&vdev->pdev, pos, entries, msi_64bit, msi_maskbit, &err);
1279    if (ret < 0) {
1280        if (ret == -ENOTSUP) {
1281            return 0;
1282        }
1283        error_prepend(&err, "msi_init failed: ");
1284        error_propagate(errp, err);
1285        return ret;
1286    }
1287    vdev->msi_cap_size = 0xa + (msi_maskbit ? 0xa : 0) + (msi_64bit ? 0x4 : 0);
1288
1289    return 0;
1290}
1291
1292static void vfio_pci_fixup_msix_region(VFIOPCIDevice *vdev)
1293{
1294    off_t start, end;
1295    VFIORegion *region = &vdev->bars[vdev->msix->table_bar].region;
1296
1297    /*
1298     * If the host driver allows mapping of a MSIX data, we are going to
1299     * do map the entire BAR and emulate MSIX table on top of that.
1300     */
1301    if (vfio_has_region_cap(&vdev->vbasedev, region->nr,
1302                            VFIO_REGION_INFO_CAP_MSIX_MAPPABLE)) {
1303        return;
1304    }
1305
1306    /*
1307     * We expect to find a single mmap covering the whole BAR, anything else
1308     * means it's either unsupported or already setup.
1309     */
1310    if (region->nr_mmaps != 1 || region->mmaps[0].offset ||
1311        region->size != region->mmaps[0].size) {
1312        return;
1313    }
1314
1315    /* MSI-X table start and end aligned to host page size */
1316    start = vdev->msix->table_offset & qemu_real_host_page_mask;
1317    end = REAL_HOST_PAGE_ALIGN((uint64_t)vdev->msix->table_offset +
1318                               (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE));
1319
1320    /*
1321     * Does the MSI-X table cover the beginning of the BAR?  The whole BAR?
1322     * NB - Host page size is necessarily a power of two and so is the PCI
1323     * BAR (not counting EA yet), therefore if we have host page aligned
1324     * @start and @end, then any remainder of the BAR before or after those
1325     * must be at least host page sized and therefore mmap'able.
1326     */
1327    if (!start) {
1328        if (end >= region->size) {
1329            region->nr_mmaps = 0;
1330            g_free(region->mmaps);
1331            region->mmaps = NULL;
1332            trace_vfio_msix_fixup(vdev->vbasedev.name,
1333                                  vdev->msix->table_bar, 0, 0);
1334        } else {
1335            region->mmaps[0].offset = end;
1336            region->mmaps[0].size = region->size - end;
1337            trace_vfio_msix_fixup(vdev->vbasedev.name,
1338                              vdev->msix->table_bar, region->mmaps[0].offset,
1339                              region->mmaps[0].offset + region->mmaps[0].size);
1340        }
1341
1342    /* Maybe it's aligned at the end of the BAR */
1343    } else if (end >= region->size) {
1344        region->mmaps[0].size = start;
1345        trace_vfio_msix_fixup(vdev->vbasedev.name,
1346                              vdev->msix->table_bar, region->mmaps[0].offset,
1347                              region->mmaps[0].offset + region->mmaps[0].size);
1348
1349    /* Otherwise it must split the BAR */
1350    } else {
1351        region->nr_mmaps = 2;
1352        region->mmaps = g_renew(VFIOMmap, region->mmaps, 2);
1353
1354        memcpy(&region->mmaps[1], &region->mmaps[0], sizeof(VFIOMmap));
1355
1356        region->mmaps[0].size = start;
1357        trace_vfio_msix_fixup(vdev->vbasedev.name,
1358                              vdev->msix->table_bar, region->mmaps[0].offset,
1359                              region->mmaps[0].offset + region->mmaps[0].size);
1360
1361        region->mmaps[1].offset = end;
1362        region->mmaps[1].size = region->size - end;
1363        trace_vfio_msix_fixup(vdev->vbasedev.name,
1364                              vdev->msix->table_bar, region->mmaps[1].offset,
1365                              region->mmaps[1].offset + region->mmaps[1].size);
1366    }
1367}
1368
1369static void vfio_pci_relocate_msix(VFIOPCIDevice *vdev, Error **errp)
1370{
1371    int target_bar = -1;
1372    size_t msix_sz;
1373
1374    if (!vdev->msix || vdev->msix_relo == OFF_AUTOPCIBAR_OFF) {
1375        return;
1376    }
1377
1378    /* The actual minimum size of MSI-X structures */
1379    msix_sz = (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE) +
1380              (QEMU_ALIGN_UP(vdev->msix->entries, 64) / 8);
1381    /* Round up to host pages, we don't want to share a page */
1382    msix_sz = REAL_HOST_PAGE_ALIGN(msix_sz);
1383    /* PCI BARs must be a power of 2 */
1384    msix_sz = pow2ceil(msix_sz);
1385
1386    if (vdev->msix_relo == OFF_AUTOPCIBAR_AUTO) {
1387        /*
1388         * TODO: Lookup table for known devices.
1389         *
1390         * Logically we might use an algorithm here to select the BAR adding
1391         * the least additional MMIO space, but we cannot programatically
1392         * predict the driver dependency on BAR ordering or sizing, therefore
1393         * 'auto' becomes a lookup for combinations reported to work.
1394         */
1395        if (target_bar < 0) {
1396            error_setg(errp, "No automatic MSI-X relocation available for "
1397                       "device %04x:%04x", vdev->vendor_id, vdev->device_id);
1398            return;
1399        }
1400    } else {
1401        target_bar = (int)(vdev->msix_relo - OFF_AUTOPCIBAR_BAR0);
1402    }
1403
1404    /* I/O port BARs cannot host MSI-X structures */
1405    if (vdev->bars[target_bar].ioport) {
1406        error_setg(errp, "Invalid MSI-X relocation BAR %d, "
1407                   "I/O port BAR", target_bar);
1408        return;
1409    }
1410
1411    /* Cannot use a BAR in the "shadow" of a 64-bit BAR */
1412    if (!vdev->bars[target_bar].size &&
1413         target_bar > 0 && vdev->bars[target_bar - 1].mem64) {
1414        error_setg(errp, "Invalid MSI-X relocation BAR %d, "
1415                   "consumed by 64-bit BAR %d", target_bar, target_bar - 1);
1416        return;
1417    }
1418
1419    /* 2GB max size for 32-bit BARs, cannot double if already > 1G */
1420    if (vdev->bars[target_bar].size > (1 * 1024 * 1024 * 1024) &&
1421        !vdev->bars[target_bar].mem64) {
1422        error_setg(errp, "Invalid MSI-X relocation BAR %d, "
1423                   "no space to extend 32-bit BAR", target_bar);
1424        return;
1425    }
1426
1427    /*
1428     * If adding a new BAR, test if we can make it 64bit.  We make it
1429     * prefetchable since QEMU MSI-X emulation has no read side effects
1430     * and doing so makes mapping more flexible.
1431     */
1432    if (!vdev->bars[target_bar].size) {
1433        if (target_bar < (PCI_ROM_SLOT - 1) &&
1434            !vdev->bars[target_bar + 1].size) {
1435            vdev->bars[target_bar].mem64 = true;
1436            vdev->bars[target_bar].type = PCI_BASE_ADDRESS_MEM_TYPE_64;
1437        }
1438        vdev->bars[target_bar].type |= PCI_BASE_ADDRESS_MEM_PREFETCH;
1439        vdev->bars[target_bar].size = msix_sz;
1440        vdev->msix->table_offset = 0;
1441    } else {
1442        vdev->bars[target_bar].size = MAX(vdev->bars[target_bar].size * 2,
1443                                          msix_sz * 2);
1444        /*
1445         * Due to above size calc, MSI-X always starts halfway into the BAR,
1446         * which will always be a separate host page.
1447         */
1448        vdev->msix->table_offset = vdev->bars[target_bar].size / 2;
1449    }
1450
1451    vdev->msix->table_bar = target_bar;
1452    vdev->msix->pba_bar = target_bar;
1453    /* Requires 8-byte alignment, but PCI_MSIX_ENTRY_SIZE guarantees that */
1454    vdev->msix->pba_offset = vdev->msix->table_offset +
1455                                  (vdev->msix->entries * PCI_MSIX_ENTRY_SIZE);
1456
1457    trace_vfio_msix_relo(vdev->vbasedev.name,
1458                         vdev->msix->table_bar, vdev->msix->table_offset);
1459}
1460
1461/*
1462 * We don't have any control over how pci_add_capability() inserts
1463 * capabilities into the chain.  In order to setup MSI-X we need a
1464 * MemoryRegion for the BAR.  In order to setup the BAR and not
1465 * attempt to mmap the MSI-X table area, which VFIO won't allow, we
1466 * need to first look for where the MSI-X table lives.  So we
1467 * unfortunately split MSI-X setup across two functions.
1468 */
1469static void vfio_msix_early_setup(VFIOPCIDevice *vdev, Error **errp)
1470{
1471    uint8_t pos;
1472    uint16_t ctrl;
1473    uint32_t table, pba;
1474    int fd = vdev->vbasedev.fd;
1475    VFIOMSIXInfo *msix;
1476
1477    pos = pci_find_capability(&vdev->pdev, PCI_CAP_ID_MSIX);
1478    if (!pos) {
1479        return;
1480    }
1481
1482    if (pread(fd, &ctrl, sizeof(ctrl),
1483              vdev->config_offset + pos + PCI_MSIX_FLAGS) != sizeof(ctrl)) {
1484        error_setg_errno(errp, errno, "failed to read PCI MSIX FLAGS");
1485        return;
1486    }
1487
1488    if (pread(fd, &table, sizeof(table),
1489              vdev->config_offset + pos + PCI_MSIX_TABLE) != sizeof(table)) {
1490        error_setg_errno(errp, errno, "failed to read PCI MSIX TABLE");
1491        return;
1492    }
1493
1494    if (pread(fd, &pba, sizeof(pba),
1495              vdev->config_offset + pos + PCI_MSIX_PBA) != sizeof(pba)) {
1496        error_setg_errno(errp, errno, "failed to read PCI MSIX PBA");
1497        return;
1498    }
1499
1500    ctrl = le16_to_cpu(ctrl);
1501    table = le32_to_cpu(table);
1502    pba = le32_to_cpu(pba);
1503
1504    msix = g_malloc0(sizeof(*msix));
1505    msix->table_bar = table & PCI_MSIX_FLAGS_BIRMASK;
1506    msix->table_offset = table & ~PCI_MSIX_FLAGS_BIRMASK;
1507    msix->pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK;
1508    msix->pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK;
1509    msix->entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
1510
1511    /*
1512     * Test the size of the pba_offset variable and catch if it extends outside
1513     * of the specified BAR. If it is the case, we need to apply a hardware
1514     * specific quirk if the device is known or we have a broken configuration.
1515     */
1516    if (msix->pba_offset >= vdev->bars[msix->pba_bar].region.size) {
1517        /*
1518         * Chelsio T5 Virtual Function devices are encoded as 0x58xx for T5
1519         * adapters. The T5 hardware returns an incorrect value of 0x8000 for
1520         * the VF PBA offset while the BAR itself is only 8k. The correct value
1521         * is 0x1000, so we hard code that here.
1522         */
1523        if (vdev->vendor_id == PCI_VENDOR_ID_CHELSIO &&
1524            (vdev->device_id & 0xff00) == 0x5800) {
1525            msix->pba_offset = 0x1000;
1526        } else {
1527            error_setg(errp, "hardware reports invalid configuration, "
1528                       "MSIX PBA outside of specified BAR");
1529            g_free(msix);
1530            return;
1531        }
1532    }
1533
1534    trace_vfio_msix_early_setup(vdev->vbasedev.name, pos, msix->table_bar,
1535                                msix->table_offset, msix->entries);
1536    vdev->msix = msix;
1537
1538    vfio_pci_fixup_msix_region(vdev);
1539
1540    vfio_pci_relocate_msix(vdev, errp);
1541}
1542
1543static int vfio_msix_setup(VFIOPCIDevice *vdev, int pos, Error **errp)
1544{
1545    int ret;
1546    Error *err = NULL;
1547
1548    vdev->msix->pending = g_malloc0(BITS_TO_LONGS(vdev->msix->entries) *
1549                                    sizeof(unsigned long));
1550    ret = msix_init(&vdev->pdev, vdev->msix->entries,
1551                    vdev->bars[vdev->msix->table_bar].mr,
1552                    vdev->msix->table_bar, vdev->msix->table_offset,
1553                    vdev->bars[vdev->msix->pba_bar].mr,
1554                    vdev->msix->pba_bar, vdev->msix->pba_offset, pos,
1555                    &err);
1556    if (ret < 0) {
1557        if (ret == -ENOTSUP) {
1558            error_report_err(err);
1559            return 0;
1560        }
1561
1562        error_propagate(errp, err);
1563        return ret;
1564    }
1565
1566    /*
1567     * The PCI spec suggests that devices provide additional alignment for
1568     * MSI-X structures and avoid overlapping non-MSI-X related registers.
1569     * For an assigned device, this hopefully means that emulation of MSI-X
1570     * structures does not affect the performance of the device.  If devices
1571     * fail to provide that alignment, a significant performance penalty may
1572     * result, for instance Mellanox MT27500 VFs:
1573     * http://www.spinics.net/lists/kvm/msg125881.html
1574     *
1575     * The PBA is simply not that important for such a serious regression and
1576     * most drivers do not appear to look at it.  The solution for this is to
1577     * disable the PBA MemoryRegion unless it's being used.  We disable it
1578     * here and only enable it if a masked vector fires through QEMU.  As the
1579     * vector-use notifier is called, which occurs on unmask, we test whether
1580     * PBA emulation is needed and again disable if not.
1581     */
1582    memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, false);
1583
1584    /*
1585     * The emulated machine may provide a paravirt interface for MSIX setup
1586     * so it is not strictly necessary to emulate MSIX here. This becomes
1587     * helpful when frequently accessed MMIO registers are located in
1588     * subpages adjacent to the MSIX table but the MSIX data containing page
1589     * cannot be mapped because of a host page size bigger than the MSIX table
1590     * alignment.
1591     */
1592    if (object_property_get_bool(OBJECT(qdev_get_machine()),
1593                                 "vfio-no-msix-emulation", NULL)) {
1594        memory_region_set_enabled(&vdev->pdev.msix_table_mmio, false);
1595    }
1596
1597    return 0;
1598}
1599
1600static void vfio_teardown_msi(VFIOPCIDevice *vdev)
1601{
1602    msi_uninit(&vdev->pdev);
1603
1604    if (vdev->msix) {
1605        msix_uninit(&vdev->pdev,
1606                    vdev->bars[vdev->msix->table_bar].mr,
1607                    vdev->bars[vdev->msix->pba_bar].mr);
1608        g_free(vdev->msix->pending);
1609    }
1610}
1611
1612/*
1613 * Resource setup
1614 */
1615static void vfio_mmap_set_enabled(VFIOPCIDevice *vdev, bool enabled)
1616{
1617    int i;
1618
1619    for (i = 0; i < PCI_ROM_SLOT; i++) {
1620        vfio_region_mmaps_set_enabled(&vdev->bars[i].region, enabled);
1621    }
1622}
1623
1624static void vfio_bar_prepare(VFIOPCIDevice *vdev, int nr)
1625{
1626    VFIOBAR *bar = &vdev->bars[nr];
1627
1628    uint32_t pci_bar;
1629    int ret;
1630
1631    /* Skip both unimplemented BARs and the upper half of 64bit BARS. */
1632    if (!bar->region.size) {
1633        return;
1634    }
1635
1636    /* Determine what type of BAR this is for registration */
1637    ret = pread(vdev->vbasedev.fd, &pci_bar, sizeof(pci_bar),
1638                vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr));
1639    if (ret != sizeof(pci_bar)) {
1640        error_report("vfio: Failed to read BAR %d (%m)", nr);
1641        return;
1642    }
1643
1644    pci_bar = le32_to_cpu(pci_bar);
1645    bar->ioport = (pci_bar & PCI_BASE_ADDRESS_SPACE_IO);
1646    bar->mem64 = bar->ioport ? 0 : (pci_bar & PCI_BASE_ADDRESS_MEM_TYPE_64);
1647    bar->type = pci_bar & (bar->ioport ? ~PCI_BASE_ADDRESS_IO_MASK :
1648                                         ~PCI_BASE_ADDRESS_MEM_MASK);
1649    bar->size = bar->region.size;
1650}
1651
1652static void vfio_bars_prepare(VFIOPCIDevice *vdev)
1653{
1654    int i;
1655
1656    for (i = 0; i < PCI_ROM_SLOT; i++) {
1657        vfio_bar_prepare(vdev, i);
1658    }
1659}
1660
1661static void vfio_bar_register(VFIOPCIDevice *vdev, int nr)
1662{
1663    VFIOBAR *bar = &vdev->bars[nr];
1664    char *name;
1665
1666    if (!bar->size) {
1667        return;
1668    }
1669
1670    bar->mr = g_new0(MemoryRegion, 1);
1671    name = g_strdup_printf("%s base BAR %d", vdev->vbasedev.name, nr);
1672    memory_region_init_io(bar->mr, OBJECT(vdev), NULL, NULL, name, bar->size);
1673    g_free(name);
1674
1675    if (bar->region.size) {
1676        memory_region_add_subregion(bar->mr, 0, bar->region.mem);
1677
1678        if (vfio_region_mmap(&bar->region)) {
1679            error_report("Failed to mmap %s BAR %d. Performance may be slow",
1680                         vdev->vbasedev.name, nr);
1681        }
1682    }
1683
1684    pci_register_bar(&vdev->pdev, nr, bar->type, bar->mr);
1685}
1686
1687static void vfio_bars_register(VFIOPCIDevice *vdev)
1688{
1689    int i;
1690
1691    for (i = 0; i < PCI_ROM_SLOT; i++) {
1692        vfio_bar_register(vdev, i);
1693    }
1694}
1695
1696static void vfio_bars_exit(VFIOPCIDevice *vdev)
1697{
1698    int i;
1699
1700    for (i = 0; i < PCI_ROM_SLOT; i++) {
1701        VFIOBAR *bar = &vdev->bars[i];
1702
1703        vfio_bar_quirk_exit(vdev, i);
1704        vfio_region_exit(&bar->region);
1705        if (bar->region.size) {
1706            memory_region_del_subregion(bar->mr, bar->region.mem);
1707        }
1708    }
1709
1710    if (vdev->vga) {
1711        pci_unregister_vga(&vdev->pdev);
1712        vfio_vga_quirk_exit(vdev);
1713    }
1714}
1715
1716static void vfio_bars_finalize(VFIOPCIDevice *vdev)
1717{
1718    int i;
1719
1720    for (i = 0; i < PCI_ROM_SLOT; i++) {
1721        VFIOBAR *bar = &vdev->bars[i];
1722
1723        vfio_bar_quirk_finalize(vdev, i);
1724        vfio_region_finalize(&bar->region);
1725        if (bar->size) {
1726            object_unparent(OBJECT(bar->mr));
1727            g_free(bar->mr);
1728        }
1729    }
1730
1731    if (vdev->vga) {
1732        vfio_vga_quirk_finalize(vdev);
1733        for (i = 0; i < ARRAY_SIZE(vdev->vga->region); i++) {
1734            object_unparent(OBJECT(&vdev->vga->region[i].mem));
1735        }
1736        g_free(vdev->vga);
1737    }
1738}
1739
1740/*
1741 * General setup
1742 */
1743static uint8_t vfio_std_cap_max_size(PCIDevice *pdev, uint8_t pos)
1744{
1745    uint8_t tmp;
1746    uint16_t next = PCI_CONFIG_SPACE_SIZE;
1747
1748    for (tmp = pdev->config[PCI_CAPABILITY_LIST]; tmp;
1749         tmp = pdev->config[tmp + PCI_CAP_LIST_NEXT]) {
1750        if (tmp > pos && tmp < next) {
1751            next = tmp;
1752        }
1753    }
1754
1755    return next - pos;
1756}
1757
1758
1759static uint16_t vfio_ext_cap_max_size(const uint8_t *config, uint16_t pos)
1760{
1761    uint16_t tmp, next = PCIE_CONFIG_SPACE_SIZE;
1762
1763    for (tmp = PCI_CONFIG_SPACE_SIZE; tmp;
1764        tmp = PCI_EXT_CAP_NEXT(pci_get_long(config + tmp))) {
1765        if (tmp > pos && tmp < next) {
1766            next = tmp;
1767        }
1768    }
1769
1770    return next - pos;
1771}
1772
1773static void vfio_set_word_bits(uint8_t *buf, uint16_t val, uint16_t mask)
1774{
1775    pci_set_word(buf, (pci_get_word(buf) & ~mask) | val);
1776}
1777
1778static void vfio_add_emulated_word(VFIOPCIDevice *vdev, int pos,
1779                                   uint16_t val, uint16_t mask)
1780{
1781    vfio_set_word_bits(vdev->pdev.config + pos, val, mask);
1782    vfio_set_word_bits(vdev->pdev.wmask + pos, ~mask, mask);
1783    vfio_set_word_bits(vdev->emulated_config_bits + pos, mask, mask);
1784}
1785
1786static void vfio_set_long_bits(uint8_t *buf, uint32_t val, uint32_t mask)
1787{
1788    pci_set_long(buf, (pci_get_long(buf) & ~mask) | val);
1789}
1790
1791static void vfio_add_emulated_long(VFIOPCIDevice *vdev, int pos,
1792                                   uint32_t val, uint32_t mask)
1793{
1794    vfio_set_long_bits(vdev->pdev.config + pos, val, mask);
1795    vfio_set_long_bits(vdev->pdev.wmask + pos, ~mask, mask);
1796    vfio_set_long_bits(vdev->emulated_config_bits + pos, mask, mask);
1797}
1798
1799static int vfio_setup_pcie_cap(VFIOPCIDevice *vdev, int pos, uint8_t size,
1800                               Error **errp)
1801{
1802    uint16_t flags;
1803    uint8_t type;
1804
1805    flags = pci_get_word(vdev->pdev.config + pos + PCI_CAP_FLAGS);
1806    type = (flags & PCI_EXP_FLAGS_TYPE) >> 4;
1807
1808    if (type != PCI_EXP_TYPE_ENDPOINT &&
1809        type != PCI_EXP_TYPE_LEG_END &&
1810        type != PCI_EXP_TYPE_RC_END) {
1811
1812        error_setg(errp, "assignment of PCIe type 0x%x "
1813                   "devices is not currently supported", type);
1814        return -EINVAL;
1815    }
1816
1817    if (!pci_bus_is_express(pci_get_bus(&vdev->pdev))) {
1818        PCIBus *bus = pci_get_bus(&vdev->pdev);
1819        PCIDevice *bridge;
1820
1821        /*
1822         * Traditionally PCI device assignment exposes the PCIe capability
1823         * as-is on non-express buses.  The reason being that some drivers
1824         * simply assume that it's there, for example tg3.  However when
1825         * we're running on a native PCIe machine type, like Q35, we need
1826         * to hide the PCIe capability.  The reason for this is twofold;
1827         * first Windows guests get a Code 10 error when the PCIe capability
1828         * is exposed in this configuration.  Therefore express devices won't
1829         * work at all unless they're attached to express buses in the VM.
1830         * Second, a native PCIe machine introduces the possibility of fine
1831         * granularity IOMMUs supporting both translation and isolation.
1832         * Guest code to discover the IOMMU visibility of a device, such as
1833         * IOMMU grouping code on Linux, is very aware of device types and
1834         * valid transitions between bus types.  An express device on a non-
1835         * express bus is not a valid combination on bare metal systems.
1836         *
1837         * Drivers that require a PCIe capability to make the device
1838         * functional are simply going to need to have their devices placed
1839         * on a PCIe bus in the VM.
1840         */
1841        while (!pci_bus_is_root(bus)) {
1842            bridge = pci_bridge_get_device(bus);
1843            bus = pci_get_bus(bridge);
1844        }
1845
1846        if (pci_bus_is_express(bus)) {
1847            return 0;
1848        }
1849
1850    } else if (pci_bus_is_root(pci_get_bus(&vdev->pdev))) {
1851        /*
1852         * On a Root Complex bus Endpoints become Root Complex Integrated
1853         * Endpoints, which changes the type and clears the LNK & LNK2 fields.
1854         */
1855        if (type == PCI_EXP_TYPE_ENDPOINT) {
1856            vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
1857                                   PCI_EXP_TYPE_RC_END << 4,
1858                                   PCI_EXP_FLAGS_TYPE);
1859
1860            /* Link Capabilities, Status, and Control goes away */
1861            if (size > PCI_EXP_LNKCTL) {
1862                vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP, 0, ~0);
1863                vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0);
1864                vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA, 0, ~0);
1865
1866#ifndef PCI_EXP_LNKCAP2
1867#define PCI_EXP_LNKCAP2 44
1868#endif
1869#ifndef PCI_EXP_LNKSTA2
1870#define PCI_EXP_LNKSTA2 50
1871#endif
1872                /* Link 2 Capabilities, Status, and Control goes away */
1873                if (size > PCI_EXP_LNKCAP2) {
1874                    vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP2, 0, ~0);
1875                    vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL2, 0, ~0);
1876                    vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA2, 0, ~0);
1877                }
1878            }
1879
1880        } else if (type == PCI_EXP_TYPE_LEG_END) {
1881            /*
1882             * Legacy endpoints don't belong on the root complex.  Windows
1883             * seems to be happier with devices if we skip the capability.
1884             */
1885            return 0;
1886        }
1887
1888    } else {
1889        /*
1890         * Convert Root Complex Integrated Endpoints to regular endpoints.
1891         * These devices don't support LNK/LNK2 capabilities, so make them up.
1892         */
1893        if (type == PCI_EXP_TYPE_RC_END) {
1894            vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
1895                                   PCI_EXP_TYPE_ENDPOINT << 4,
1896                                   PCI_EXP_FLAGS_TYPE);
1897            vfio_add_emulated_long(vdev, pos + PCI_EXP_LNKCAP,
1898                                   PCI_EXP_LNK_MLW_1 | PCI_EXP_LNK_LS_25, ~0);
1899            vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKCTL, 0, ~0);
1900        }
1901
1902        /* Mark the Link Status bits as emulated to allow virtual negotiation */
1903        vfio_add_emulated_word(vdev, pos + PCI_EXP_LNKSTA,
1904                               pci_get_word(vdev->pdev.config + pos +
1905                                            PCI_EXP_LNKSTA),
1906                               PCI_EXP_LNKCAP_MLW | PCI_EXP_LNKCAP_SLS);
1907    }
1908
1909    /*
1910     * Intel 82599 SR-IOV VFs report an invalid PCIe capability version 0
1911     * (Niantic errate #35) causing Windows to error with a Code 10 for the
1912     * device on Q35.  Fixup any such devices to report version 1.  If we
1913     * were to remove the capability entirely the guest would lose extended
1914     * config space.
1915     */
1916    if ((flags & PCI_EXP_FLAGS_VERS) == 0) {
1917        vfio_add_emulated_word(vdev, pos + PCI_CAP_FLAGS,
1918                               1, PCI_EXP_FLAGS_VERS);
1919    }
1920
1921    pos = pci_add_capability(&vdev->pdev, PCI_CAP_ID_EXP, pos, size,
1922                             errp);
1923    if (pos < 0) {
1924        return pos;
1925    }
1926
1927    vdev->pdev.exp.exp_cap = pos;
1928
1929    return pos;
1930}
1931
1932static void vfio_check_pcie_flr(VFIOPCIDevice *vdev, uint8_t pos)
1933{
1934    uint32_t cap = pci_get_long(vdev->pdev.config + pos + PCI_EXP_DEVCAP);
1935
1936    if (cap & PCI_EXP_DEVCAP_FLR) {
1937        trace_vfio_check_pcie_flr(vdev->vbasedev.name);
1938        vdev->has_flr = true;
1939    }
1940}
1941
1942static void vfio_check_pm_reset(VFIOPCIDevice *vdev, uint8_t pos)
1943{
1944    uint16_t csr = pci_get_word(vdev->pdev.config + pos + PCI_PM_CTRL);
1945
1946    if (!(csr & PCI_PM_CTRL_NO_SOFT_RESET)) {
1947        trace_vfio_check_pm_reset(vdev->vbasedev.name);
1948        vdev->has_pm_reset = true;
1949    }
1950}
1951
1952static void vfio_check_af_flr(VFIOPCIDevice *vdev, uint8_t pos)
1953{
1954    uint8_t cap = pci_get_byte(vdev->pdev.config + pos + PCI_AF_CAP);
1955
1956    if ((cap & PCI_AF_CAP_TP) && (cap & PCI_AF_CAP_FLR)) {
1957        trace_vfio_check_af_flr(vdev->vbasedev.name);
1958        vdev->has_flr = true;
1959    }
1960}
1961
1962static int vfio_add_std_cap(VFIOPCIDevice *vdev, uint8_t pos, Error **errp)
1963{
1964    PCIDevice *pdev = &vdev->pdev;
1965    uint8_t cap_id, next, size;
1966    int ret;
1967
1968    cap_id = pdev->config[pos];
1969    next = pdev->config[pos + PCI_CAP_LIST_NEXT];
1970
1971    /*
1972     * If it becomes important to configure capabilities to their actual
1973     * size, use this as the default when it's something we don't recognize.
1974     * Since QEMU doesn't actually handle many of the config accesses,
1975     * exact size doesn't seem worthwhile.
1976     */
1977    size = vfio_std_cap_max_size(pdev, pos);
1978
1979    /*
1980     * pci_add_capability always inserts the new capability at the head
1981     * of the chain.  Therefore to end up with a chain that matches the
1982     * physical device, we insert from the end by making this recursive.
1983     * This is also why we pre-calculate size above as cached config space
1984     * will be changed as we unwind the stack.
1985     */
1986    if (next) {
1987        ret = vfio_add_std_cap(vdev, next, errp);
1988        if (ret) {
1989            return ret;
1990        }
1991    } else {
1992        /* Begin the rebuild, use QEMU emulated list bits */
1993        pdev->config[PCI_CAPABILITY_LIST] = 0;
1994        vdev->emulated_config_bits[PCI_CAPABILITY_LIST] = 0xff;
1995        vdev->emulated_config_bits[PCI_STATUS] |= PCI_STATUS_CAP_LIST;
1996
1997        ret = vfio_add_virt_caps(vdev, errp);
1998        if (ret) {
1999            return ret;
2000        }
2001    }
2002
2003    /* Scale down size, esp in case virt caps were added above */
2004    size = MIN(size, vfio_std_cap_max_size(pdev, pos));
2005
2006    /* Use emulated next pointer to allow dropping caps */
2007    pci_set_byte(vdev->emulated_config_bits + pos + PCI_CAP_LIST_NEXT, 0xff);
2008
2009    switch (cap_id) {
2010    case PCI_CAP_ID_MSI:
2011        ret = vfio_msi_setup(vdev, pos, errp);
2012        break;
2013    case PCI_CAP_ID_EXP:
2014        vfio_check_pcie_flr(vdev, pos);
2015        ret = vfio_setup_pcie_cap(vdev, pos, size, errp);
2016        break;
2017    case PCI_CAP_ID_MSIX:
2018        ret = vfio_msix_setup(vdev, pos, errp);
2019        break;
2020    case PCI_CAP_ID_PM:
2021        vfio_check_pm_reset(vdev, pos);
2022        vdev->pm_cap = pos;
2023        ret = pci_add_capability(pdev, cap_id, pos, size, errp);
2024        break;
2025    case PCI_CAP_ID_AF:
2026        vfio_check_af_flr(vdev, pos);
2027        ret = pci_add_capability(pdev, cap_id, pos, size, errp);
2028        break;
2029    default:
2030        ret = pci_add_capability(pdev, cap_id, pos, size, errp);
2031        break;
2032    }
2033
2034    if (ret < 0) {
2035        error_prepend(errp,
2036                      "failed to add PCI capability 0x%x[0x%x]@0x%x: ",
2037                      cap_id, size, pos);
2038        return ret;
2039    }
2040
2041    return 0;
2042}
2043
2044static void vfio_add_ext_cap(VFIOPCIDevice *vdev)
2045{
2046    PCIDevice *pdev = &vdev->pdev;
2047    uint32_t header;
2048    uint16_t cap_id, next, size;
2049    uint8_t cap_ver;
2050    uint8_t *config;
2051
2052    /* Only add extended caps if we have them and the guest can see them */
2053    if (!pci_is_express(pdev) || !pci_bus_is_express(pci_get_bus(pdev)) ||
2054        !pci_get_long(pdev->config + PCI_CONFIG_SPACE_SIZE)) {
2055        return;
2056    }
2057
2058    /*
2059     * pcie_add_capability always inserts the new capability at the tail
2060     * of the chain.  Therefore to end up with a chain that matches the
2061     * physical device, we cache the config space to avoid overwriting
2062     * the original config space when we parse the extended capabilities.
2063     */
2064    config = g_memdup(pdev->config, vdev->config_size);
2065
2066    /*
2067     * Extended capabilities are chained with each pointing to the next, so we
2068     * can drop anything other than the head of the chain simply by modifying
2069     * the previous next pointer.  Seed the head of the chain here such that
2070     * we can simply skip any capabilities we want to drop below, regardless
2071     * of their position in the chain.  If this stub capability still exists
2072     * after we add the capabilities we want to expose, update the capability
2073     * ID to zero.  Note that we cannot seed with the capability header being
2074     * zero as this conflicts with definition of an absent capability chain
2075     * and prevents capabilities beyond the head of the list from being added.
2076     * By replacing the dummy capability ID with zero after walking the device
2077     * chain, we also transparently mark extended capabilities as absent if
2078     * no capabilities were added.  Note that the PCIe spec defines an absence
2079     * of extended capabilities to be determined by a value of zero for the
2080     * capability ID, version, AND next pointer.  A non-zero next pointer
2081     * should be sufficient to indicate additional capabilities are present,
2082     * which will occur if we call pcie_add_capability() below.  The entire
2083     * first dword is emulated to support this.
2084     *
2085     * NB. The kernel side does similar masking, so be prepared that our
2086     * view of the device may also contain a capability ID zero in the head
2087     * of the chain.  Skip it for the same reason that we cannot seed the
2088     * chain with a zero capability.
2089     */
2090    pci_set_long(pdev->config + PCI_CONFIG_SPACE_SIZE,
2091                 PCI_EXT_CAP(0xFFFF, 0, 0));
2092    pci_set_long(pdev->wmask + PCI_CONFIG_SPACE_SIZE, 0);
2093    pci_set_long(vdev->emulated_config_bits + PCI_CONFIG_SPACE_SIZE, ~0);
2094
2095    for (next = PCI_CONFIG_SPACE_SIZE; next;
2096         next = PCI_EXT_CAP_NEXT(pci_get_long(config + next))) {
2097        header = pci_get_long(config + next);
2098        cap_id = PCI_EXT_CAP_ID(header);
2099        cap_ver = PCI_EXT_CAP_VER(header);
2100
2101        /*
2102         * If it becomes important to configure extended capabilities to their
2103         * actual size, use this as the default when it's something we don't
2104         * recognize. Since QEMU doesn't actually handle many of the config
2105         * accesses, exact size doesn't seem worthwhile.
2106         */
2107        size = vfio_ext_cap_max_size(config, next);
2108
2109        /* Use emulated next pointer to allow dropping extended caps */
2110        pci_long_test_and_set_mask(vdev->emulated_config_bits + next,
2111                                   PCI_EXT_CAP_NEXT_MASK);
2112
2113        switch (cap_id) {
2114        case 0: /* kernel masked capability */
2115        case PCI_EXT_CAP_ID_SRIOV: /* Read-only VF BARs confuse OVMF */
2116        case PCI_EXT_CAP_ID_ARI: /* XXX Needs next function virtualization */
2117            trace_vfio_add_ext_cap_dropped(vdev->vbasedev.name, cap_id, next);
2118            break;
2119        default:
2120            pcie_add_capability(pdev, cap_id, cap_ver, next, size);
2121        }
2122
2123    }
2124
2125    /* Cleanup chain head ID if necessary */
2126    if (pci_get_word(pdev->config + PCI_CONFIG_SPACE_SIZE) == 0xFFFF) {
2127        pci_set_word(pdev->config + PCI_CONFIG_SPACE_SIZE, 0);
2128    }
2129
2130    g_free(config);
2131    return;
2132}
2133
2134static int vfio_add_capabilities(VFIOPCIDevice *vdev, Error **errp)
2135{
2136    PCIDevice *pdev = &vdev->pdev;
2137    int ret;
2138
2139    if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST) ||
2140        !pdev->config[PCI_CAPABILITY_LIST]) {
2141        return 0; /* Nothing to add */
2142    }
2143
2144    ret = vfio_add_std_cap(vdev, pdev->config[PCI_CAPABILITY_LIST], errp);
2145    if (ret) {
2146        return ret;
2147    }
2148
2149    vfio_add_ext_cap(vdev);
2150    return 0;
2151}
2152
2153static void vfio_pci_pre_reset(VFIOPCIDevice *vdev)
2154{
2155    PCIDevice *pdev = &vdev->pdev;
2156    uint16_t cmd;
2157
2158    vfio_disable_interrupts(vdev);
2159
2160    /* Make sure the device is in D0 */
2161    if (vdev->pm_cap) {
2162        uint16_t pmcsr;
2163        uint8_t state;
2164
2165        pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2);
2166        state = pmcsr & PCI_PM_CTRL_STATE_MASK;
2167        if (state) {
2168            pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2169            vfio_pci_write_config(pdev, vdev->pm_cap + PCI_PM_CTRL, pmcsr, 2);
2170            /* vfio handles the necessary delay here */
2171            pmcsr = vfio_pci_read_config(pdev, vdev->pm_cap + PCI_PM_CTRL, 2);
2172            state = pmcsr & PCI_PM_CTRL_STATE_MASK;
2173            if (state) {
2174                error_report("vfio: Unable to power on device, stuck in D%d",
2175                             state);
2176            }
2177        }
2178    }
2179
2180    /*
2181     * Stop any ongoing DMA by disconecting I/O, MMIO, and bus master.
2182     * Also put INTx Disable in known state.
2183     */
2184    cmd = vfio_pci_read_config(pdev, PCI_COMMAND, 2);
2185    cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
2186             PCI_COMMAND_INTX_DISABLE);
2187    vfio_pci_write_config(pdev, PCI_COMMAND, cmd, 2);
2188}
2189
2190static void vfio_pci_post_reset(VFIOPCIDevice *vdev)
2191{
2192    Error *err = NULL;
2193    int nr;
2194
2195    vfio_intx_enable(vdev, &err);
2196    if (err) {
2197        error_reportf_err(err, ERR_PREFIX, vdev->vbasedev.name);
2198    }
2199
2200    for (nr = 0; nr < PCI_NUM_REGIONS - 1; ++nr) {
2201        off_t addr = vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr);
2202        uint32_t val = 0;
2203        uint32_t len = sizeof(val);
2204
2205        if (pwrite(vdev->vbasedev.fd, &val, len, addr) != len) {
2206            error_report("%s(%s) reset bar %d failed: %m", __func__,
2207                         vdev->vbasedev.name, nr);
2208        }
2209    }
2210}
2211
2212static bool vfio_pci_host_match(PCIHostDeviceAddress *addr, const char *name)
2213{
2214    char tmp[13];
2215
2216    sprintf(tmp, "%04x:%02x:%02x.%1x", addr->domain,
2217            addr->bus, addr->slot, addr->function);
2218
2219    return (strcmp(tmp, name) == 0);
2220}
2221
2222static int vfio_pci_hot_reset(VFIOPCIDevice *vdev, bool single)
2223{
2224    VFIOGroup *group;
2225    struct vfio_pci_hot_reset_info *info;
2226    struct vfio_pci_dependent_device *devices;
2227    struct vfio_pci_hot_reset *reset;
2228    int32_t *fds;
2229    int ret, i, count;
2230    bool multi = false;
2231
2232    trace_vfio_pci_hot_reset(vdev->vbasedev.name, single ? "one" : "multi");
2233
2234    if (!single) {
2235        vfio_pci_pre_reset(vdev);
2236    }
2237    vdev->vbasedev.needs_reset = false;
2238
2239    info = g_malloc0(sizeof(*info));
2240    info->argsz = sizeof(*info);
2241
2242    ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info);
2243    if (ret && errno != ENOSPC) {
2244        ret = -errno;
2245        if (!vdev->has_pm_reset) {
2246            error_report("vfio: Cannot reset device %s, "
2247                         "no available reset mechanism.", vdev->vbasedev.name);
2248        }
2249        goto out_single;
2250    }
2251
2252    count = info->count;
2253    info = g_realloc(info, sizeof(*info) + (count * sizeof(*devices)));
2254    info->argsz = sizeof(*info) + (count * sizeof(*devices));
2255    devices = &info->devices[0];
2256
2257    ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info);
2258    if (ret) {
2259        ret = -errno;
2260        error_report("vfio: hot reset info failed: %m");
2261        goto out_single;
2262    }
2263
2264    trace_vfio_pci_hot_reset_has_dep_devices(vdev->vbasedev.name);
2265
2266    /* Verify that we have all the groups required */
2267    for (i = 0; i < info->count; i++) {
2268        PCIHostDeviceAddress host;
2269        VFIOPCIDevice *tmp;
2270        VFIODevice *vbasedev_iter;
2271
2272        host.domain = devices[i].segment;
2273        host.bus = devices[i].bus;
2274        host.slot = PCI_SLOT(devices[i].devfn);
2275        host.function = PCI_FUNC(devices[i].devfn);
2276
2277        trace_vfio_pci_hot_reset_dep_devices(host.domain,
2278                host.bus, host.slot, host.function, devices[i].group_id);
2279
2280        if (vfio_pci_host_match(&host, vdev->vbasedev.name)) {
2281            continue;
2282        }
2283
2284        QLIST_FOREACH(group, &vfio_group_list, next) {
2285            if (group->groupid == devices[i].group_id) {
2286                break;
2287            }
2288        }
2289
2290        if (!group) {
2291            if (!vdev->has_pm_reset) {
2292                error_report("vfio: Cannot reset device %s, "
2293                             "depends on group %d which is not owned.",
2294                             vdev->vbasedev.name, devices[i].group_id);
2295            }
2296            ret = -EPERM;
2297            goto out;
2298        }
2299
2300        /* Prep dependent devices for reset and clear our marker. */
2301        QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
2302            if (!vbasedev_iter->dev->realized ||
2303                vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
2304                continue;
2305            }
2306            tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
2307            if (vfio_pci_host_match(&host, tmp->vbasedev.name)) {
2308                if (single) {
2309                    ret = -EINVAL;
2310                    goto out_single;
2311                }
2312                vfio_pci_pre_reset(tmp);
2313                tmp->vbasedev.needs_reset = false;
2314                multi = true;
2315                break;
2316            }
2317        }
2318    }
2319
2320    if (!single && !multi) {
2321        ret = -EINVAL;
2322        goto out_single;
2323    }
2324
2325    /* Determine how many group fds need to be passed */
2326    count = 0;
2327    QLIST_FOREACH(group, &vfio_group_list, next) {
2328        for (i = 0; i < info->count; i++) {
2329            if (group->groupid == devices[i].group_id) {
2330                count++;
2331                break;
2332            }
2333        }
2334    }
2335
2336    reset = g_malloc0(sizeof(*reset) + (count * sizeof(*fds)));
2337    reset->argsz = sizeof(*reset) + (count * sizeof(*fds));
2338    fds = &reset->group_fds[0];
2339
2340    /* Fill in group fds */
2341    QLIST_FOREACH(group, &vfio_group_list, next) {
2342        for (i = 0; i < info->count; i++) {
2343            if (group->groupid == devices[i].group_id) {
2344                fds[reset->count++] = group->fd;
2345                break;
2346            }
2347        }
2348    }
2349
2350    /* Bus reset! */
2351    ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_PCI_HOT_RESET, reset);
2352    g_free(reset);
2353
2354    trace_vfio_pci_hot_reset_result(vdev->vbasedev.name,
2355                                    ret ? "%m" : "Success");
2356
2357out:
2358    /* Re-enable INTx on affected devices */
2359    for (i = 0; i < info->count; i++) {
2360        PCIHostDeviceAddress host;
2361        VFIOPCIDevice *tmp;
2362        VFIODevice *vbasedev_iter;
2363
2364        host.domain = devices[i].segment;
2365        host.bus = devices[i].bus;
2366        host.slot = PCI_SLOT(devices[i].devfn);
2367        host.function = PCI_FUNC(devices[i].devfn);
2368
2369        if (vfio_pci_host_match(&host, vdev->vbasedev.name)) {
2370            continue;
2371        }
2372
2373        QLIST_FOREACH(group, &vfio_group_list, next) {
2374            if (group->groupid == devices[i].group_id) {
2375                break;
2376            }
2377        }
2378
2379        if (!group) {
2380            break;
2381        }
2382
2383        QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
2384            if (!vbasedev_iter->dev->realized ||
2385                vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
2386                continue;
2387            }
2388            tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
2389            if (vfio_pci_host_match(&host, tmp->vbasedev.name)) {
2390                vfio_pci_post_reset(tmp);
2391                break;
2392            }
2393        }
2394    }
2395out_single:
2396    if (!single) {
2397        vfio_pci_post_reset(vdev);
2398    }
2399    g_free(info);
2400
2401    return ret;
2402}
2403
2404/*
2405 * We want to differentiate hot reset of mulitple in-use devices vs hot reset
2406 * of a single in-use device.  VFIO_DEVICE_RESET will already handle the case
2407 * of doing hot resets when there is only a single device per bus.  The in-use
2408 * here refers to how many VFIODevices are affected.  A hot reset that affects
2409 * multiple devices, but only a single in-use device, means that we can call
2410 * it from our bus ->reset() callback since the extent is effectively a single
2411 * device.  This allows us to make use of it in the hotplug path.  When there
2412 * are multiple in-use devices, we can only trigger the hot reset during a
2413 * system reset and thus from our reset handler.  We separate _one vs _multi
2414 * here so that we don't overlap and do a double reset on the system reset
2415 * path where both our reset handler and ->reset() callback are used.  Calling
2416 * _one() will only do a hot reset for the one in-use devices case, calling
2417 * _multi() will do nothing if a _one() would have been sufficient.
2418 */
2419static int vfio_pci_hot_reset_one(VFIOPCIDevice *vdev)
2420{
2421    return vfio_pci_hot_reset(vdev, true);
2422}
2423
2424static int vfio_pci_hot_reset_multi(VFIODevice *vbasedev)
2425{
2426    VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
2427    return vfio_pci_hot_reset(vdev, false);
2428}
2429
2430static void vfio_pci_compute_needs_reset(VFIODevice *vbasedev)
2431{
2432    VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
2433    if (!vbasedev->reset_works || (!vdev->has_flr && vdev->has_pm_reset)) {
2434        vbasedev->needs_reset = true;
2435    }
2436}
2437
2438static VFIODeviceOps vfio_pci_ops = {
2439    .vfio_compute_needs_reset = vfio_pci_compute_needs_reset,
2440    .vfio_hot_reset_multi = vfio_pci_hot_reset_multi,
2441    .vfio_eoi = vfio_intx_eoi,
2442};
2443
2444int vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp)
2445{
2446    VFIODevice *vbasedev = &vdev->vbasedev;
2447    struct vfio_region_info *reg_info;
2448    int ret;
2449
2450    ret = vfio_get_region_info(vbasedev, VFIO_PCI_VGA_REGION_INDEX, &reg_info);
2451    if (ret) {
2452        error_setg_errno(errp, -ret,
2453                         "failed getting region info for VGA region index %d",
2454                         VFIO_PCI_VGA_REGION_INDEX);
2455        return ret;
2456    }
2457
2458    if (!(reg_info->flags & VFIO_REGION_INFO_FLAG_READ) ||
2459        !(reg_info->flags & VFIO_REGION_INFO_FLAG_WRITE) ||
2460        reg_info->size < 0xbffff + 1) {
2461        error_setg(errp, "unexpected VGA info, flags 0x%lx, size 0x%lx",
2462                   (unsigned long)reg_info->flags,
2463                   (unsigned long)reg_info->size);
2464        g_free(reg_info);
2465        return -EINVAL;
2466    }
2467
2468    vdev->vga = g_new0(VFIOVGA, 1);
2469
2470    vdev->vga->fd_offset = reg_info->offset;
2471    vdev->vga->fd = vdev->vbasedev.fd;
2472
2473    g_free(reg_info);
2474
2475    vdev->vga->region[QEMU_PCI_VGA_MEM].offset = QEMU_PCI_VGA_MEM_BASE;
2476    vdev->vga->region[QEMU_PCI_VGA_MEM].nr = QEMU_PCI_VGA_MEM;
2477    QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_MEM].quirks);
2478
2479    memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_MEM].mem,
2480                          OBJECT(vdev), &vfio_vga_ops,
2481                          &vdev->vga->region[QEMU_PCI_VGA_MEM],
2482                          "vfio-vga-mmio@0xa0000",
2483                          QEMU_PCI_VGA_MEM_SIZE);
2484
2485    vdev->vga->region[QEMU_PCI_VGA_IO_LO].offset = QEMU_PCI_VGA_IO_LO_BASE;
2486    vdev->vga->region[QEMU_PCI_VGA_IO_LO].nr = QEMU_PCI_VGA_IO_LO;
2487    QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_IO_LO].quirks);
2488
2489    memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_IO_LO].mem,
2490                          OBJECT(vdev), &vfio_vga_ops,
2491                          &vdev->vga->region[QEMU_PCI_VGA_IO_LO],
2492                          "vfio-vga-io@0x3b0",
2493                          QEMU_PCI_VGA_IO_LO_SIZE);
2494
2495    vdev->vga->region[QEMU_PCI_VGA_IO_HI].offset = QEMU_PCI_VGA_IO_HI_BASE;
2496    vdev->vga->region[QEMU_PCI_VGA_IO_HI].nr = QEMU_PCI_VGA_IO_HI;
2497    QLIST_INIT(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].quirks);
2498
2499    memory_region_init_io(&vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem,
2500                          OBJECT(vdev), &vfio_vga_ops,
2501                          &vdev->vga->region[QEMU_PCI_VGA_IO_HI],
2502                          "vfio-vga-io@0x3c0",
2503                          QEMU_PCI_VGA_IO_HI_SIZE);
2504
2505    pci_register_vga(&vdev->pdev, &vdev->vga->region[QEMU_PCI_VGA_MEM].mem,
2506                     &vdev->vga->region[QEMU_PCI_VGA_IO_LO].mem,
2507                     &vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem);
2508
2509    return 0;
2510}
2511
2512static void vfio_populate_device(VFIOPCIDevice *vdev, Error **errp)
2513{
2514    VFIODevice *vbasedev = &vdev->vbasedev;
2515    struct vfio_region_info *reg_info;
2516    struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info) };
2517    int i, ret = -1;
2518
2519    /* Sanity check device */
2520    if (!(vbasedev->flags & VFIO_DEVICE_FLAGS_PCI)) {
2521        error_setg(errp, "this isn't a PCI device");
2522        return;
2523    }
2524
2525    if (vbasedev->num_regions < VFIO_PCI_CONFIG_REGION_INDEX + 1) {
2526        error_setg(errp, "unexpected number of io regions %u",
2527                   vbasedev->num_regions);
2528        return;
2529    }
2530
2531    if (vbasedev->num_irqs < VFIO_PCI_MSIX_IRQ_INDEX + 1) {
2532        error_setg(errp, "unexpected number of irqs %u", vbasedev->num_irqs);
2533        return;
2534    }
2535
2536    for (i = VFIO_PCI_BAR0_REGION_INDEX; i < VFIO_PCI_ROM_REGION_INDEX; i++) {
2537        char *name = g_strdup_printf("%s BAR %d", vbasedev->name, i);
2538
2539        ret = vfio_region_setup(OBJECT(vdev), vbasedev,
2540                                &vdev->bars[i].region, i, name);
2541        g_free(name);
2542
2543        if (ret) {
2544            error_setg_errno(errp, -ret, "failed to get region %d info", i);
2545            return;
2546        }
2547
2548        QLIST_INIT(&vdev->bars[i].quirks);
2549    }
2550
2551    ret = vfio_get_region_info(vbasedev,
2552                               VFIO_PCI_CONFIG_REGION_INDEX, &reg_info);
2553    if (ret) {
2554        error_setg_errno(errp, -ret, "failed to get config info");
2555        return;
2556    }
2557
2558    trace_vfio_populate_device_config(vdev->vbasedev.name,
2559                                      (unsigned long)reg_info->size,
2560                                      (unsigned long)reg_info->offset,
2561                                      (unsigned long)reg_info->flags);
2562
2563    vdev->config_size = reg_info->size;
2564    if (vdev->config_size == PCI_CONFIG_SPACE_SIZE) {
2565        vdev->pdev.cap_present &= ~QEMU_PCI_CAP_EXPRESS;
2566    }
2567    vdev->config_offset = reg_info->offset;
2568
2569    g_free(reg_info);
2570
2571    if (vdev->features & VFIO_FEATURE_ENABLE_VGA) {
2572        ret = vfio_populate_vga(vdev, errp);
2573        if (ret) {
2574            error_append_hint(errp, "device does not support "
2575                              "requested feature x-vga\n");
2576            return;
2577        }
2578    }
2579
2580    irq_info.index = VFIO_PCI_ERR_IRQ_INDEX;
2581
2582    ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_IRQ_INFO, &irq_info);
2583    if (ret) {
2584        /* This can fail for an old kernel or legacy PCI dev */
2585        trace_vfio_populate_device_get_irq_info_failure();
2586    } else if (irq_info.count == 1) {
2587        vdev->pci_aer = true;
2588    } else {
2589        error_report(WARN_PREFIX
2590                     "Could not enable error recovery for the device",
2591                     vbasedev->name);
2592    }
2593}
2594
2595static void vfio_put_device(VFIOPCIDevice *vdev)
2596{
2597    g_free(vdev->vbasedev.name);
2598    g_free(vdev->msix);
2599
2600    vfio_put_base_device(&vdev->vbasedev);
2601}
2602
2603static void vfio_err_notifier_handler(void *opaque)
2604{
2605    VFIOPCIDevice *vdev = opaque;
2606
2607    if (!event_notifier_test_and_clear(&vdev->err_notifier)) {
2608        return;
2609    }
2610
2611    /*
2612     * TBD. Retrieve the error details and decide what action
2613     * needs to be taken. One of the actions could be to pass
2614     * the error to the guest and have the guest driver recover
2615     * from the error. This requires that PCIe capabilities be
2616     * exposed to the guest. For now, we just terminate the
2617     * guest to contain the error.
2618     */
2619
2620    error_report("%s(%s) Unrecoverable error detected. Please collect any data possible and then kill the guest", __func__, vdev->vbasedev.name);
2621
2622    vm_stop(RUN_STATE_INTERNAL_ERROR);
2623}
2624
2625/*
2626 * Registers error notifier for devices supporting error recovery.
2627 * If we encounter a failure in this function, we report an error
2628 * and continue after disabling error recovery support for the
2629 * device.
2630 */
2631static void vfio_register_err_notifier(VFIOPCIDevice *vdev)
2632{
2633    int ret;
2634    int argsz;
2635    struct vfio_irq_set *irq_set;
2636    int32_t *pfd;
2637
2638    if (!vdev->pci_aer) {
2639        return;
2640    }
2641
2642    if (event_notifier_init(&vdev->err_notifier, 0)) {
2643        error_report("vfio: Unable to init event notifier for error detection");
2644        vdev->pci_aer = false;
2645        return;
2646    }
2647
2648    argsz = sizeof(*irq_set) + sizeof(*pfd);
2649
2650    irq_set = g_malloc0(argsz);
2651    irq_set->argsz = argsz;
2652    irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
2653                     VFIO_IRQ_SET_ACTION_TRIGGER;
2654    irq_set->index = VFIO_PCI_ERR_IRQ_INDEX;
2655    irq_set->start = 0;
2656    irq_set->count = 1;
2657    pfd = (int32_t *)&irq_set->data;
2658
2659    *pfd = event_notifier_get_fd(&vdev->err_notifier);
2660    qemu_set_fd_handler(*pfd, vfio_err_notifier_handler, NULL, vdev);
2661
2662    ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
2663    if (ret) {
2664        error_report("vfio: Failed to set up error notification");
2665        qemu_set_fd_handler(*pfd, NULL, NULL, vdev);
2666        event_notifier_cleanup(&vdev->err_notifier);
2667        vdev->pci_aer = false;
2668    }
2669    g_free(irq_set);
2670}
2671
2672static void vfio_unregister_err_notifier(VFIOPCIDevice *vdev)
2673{
2674    int argsz;
2675    struct vfio_irq_set *irq_set;
2676    int32_t *pfd;
2677    int ret;
2678
2679    if (!vdev->pci_aer) {
2680        return;
2681    }
2682
2683    argsz = sizeof(*irq_set) + sizeof(*pfd);
2684
2685    irq_set = g_malloc0(argsz);
2686    irq_set->argsz = argsz;
2687    irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
2688                     VFIO_IRQ_SET_ACTION_TRIGGER;
2689    irq_set->index = VFIO_PCI_ERR_IRQ_INDEX;
2690    irq_set->start = 0;
2691    irq_set->count = 1;
2692    pfd = (int32_t *)&irq_set->data;
2693    *pfd = -1;
2694
2695    ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set);
2696    if (ret) {
2697        error_report("vfio: Failed to de-assign error fd: %m");
2698    }
2699    g_free(irq_set);
2700    qemu_set_fd_handler(event_notifier_get_fd(&vdev->err_notifier),
2701                        NULL, NULL, vdev);
2702    event_notifier_cleanup(&vdev->err_notifier);
2703}
2704
2705static void vfio_req_notifier_handler(void *opaque)
2706{
2707    VFIOPCIDevice *vdev = opaque;
2708    Error *err = NULL;
2709
2710    if (!event_notifier_test_and_clear(&vdev->req_notifier)) {
2711        return;
2712    }
2713
2714    qdev_unplug(&vdev->pdev.qdev, &err);
2715    if (err) {
2716        error_reportf_err(err, WARN_PREFIX, vdev->vbasedev.name);
2717    }
2718}
2719
2720static void vfio_register_req_notifier(VFIOPCIDevice *vdev)
2721{
2722    struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info),
2723                                      .index = VFIO_PCI_REQ_IRQ_INDEX };
2724    int argsz;
2725    struct vfio_irq_set *irq_set;
2726    int32_t *pfd;
2727
2728    if (!(vdev->features & VFIO_FEATURE_ENABLE_REQ)) {
2729        return;
2730    }
2731
2732    if (ioctl(vdev->vbasedev.fd,
2733              VFIO_DEVICE_GET_IRQ_INFO, &irq_info) < 0 || irq_info.count < 1) {
2734        return;
2735    }
2736
2737    if (event_notifier_init(&vdev->req_notifier, 0)) {
2738        error_report("vfio: Unable to init event notifier for device request");
2739        return;
2740    }
2741
2742    argsz = sizeof(*irq_set) + sizeof(*pfd);
2743
2744    irq_set = g_malloc0(argsz);
2745    irq_set->argsz = argsz;
2746    irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
2747                     VFIO_IRQ_SET_ACTION_TRIGGER;
2748    irq_set->index = VFIO_PCI_REQ_IRQ_INDEX;
2749    irq_set->start = 0;
2750    irq_set->count = 1;
2751    pfd = (int32_t *)&irq_set->data;
2752
2753    *pfd = event_notifier_get_fd(&vdev->req_notifier);
2754    qemu_set_fd_handler(*pfd, vfio_req_notifier_handler, NULL, vdev);
2755
2756    if (ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set)) {
2757        error_report("vfio: Failed to set up device request notification");
2758        qemu_set_fd_handler(*pfd, NULL, NULL, vdev);
2759        event_notifier_cleanup(&vdev->req_notifier);
2760    } else {
2761        vdev->req_enabled = true;
2762    }
2763
2764    g_free(irq_set);
2765}
2766
2767static void vfio_unregister_req_notifier(VFIOPCIDevice *vdev)
2768{
2769    int argsz;
2770    struct vfio_irq_set *irq_set;
2771    int32_t *pfd;
2772
2773    if (!vdev->req_enabled) {
2774        return;
2775    }
2776
2777    argsz = sizeof(*irq_set) + sizeof(*pfd);
2778
2779    irq_set = g_malloc0(argsz);
2780    irq_set->argsz = argsz;
2781    irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD |
2782                     VFIO_IRQ_SET_ACTION_TRIGGER;
2783    irq_set->index = VFIO_PCI_REQ_IRQ_INDEX;
2784    irq_set->start = 0;
2785    irq_set->count = 1;
2786    pfd = (int32_t *)&irq_set->data;
2787    *pfd = -1;
2788
2789    if (ioctl(vdev->vbasedev.fd, VFIO_DEVICE_SET_IRQS, irq_set)) {
2790        error_report("vfio: Failed to de-assign device request fd: %m");
2791    }
2792    g_free(irq_set);
2793    qemu_set_fd_handler(event_notifier_get_fd(&vdev->req_notifier),
2794                        NULL, NULL, vdev);
2795    event_notifier_cleanup(&vdev->req_notifier);
2796
2797    vdev->req_enabled = false;
2798}
2799
2800static void vfio_realize(PCIDevice *pdev, Error **errp)
2801{
2802    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
2803    VFIODevice *vbasedev_iter;
2804    VFIOGroup *group;
2805    char *tmp, group_path[PATH_MAX], *group_name;
2806    Error *err = NULL;
2807    ssize_t len;
2808    struct stat st;
2809    int groupid;
2810    int i, ret;
2811
2812    if (!vdev->vbasedev.sysfsdev) {
2813        if (!(~vdev->host.domain || ~vdev->host.bus ||
2814              ~vdev->host.slot || ~vdev->host.function)) {
2815            error_setg(errp, "No provided host device");
2816            error_append_hint(errp, "Use -device vfio-pci,host=DDDD:BB:DD.F "
2817                              "or -device vfio-pci,sysfsdev=PATH_TO_DEVICE\n");
2818            return;
2819        }
2820        vdev->vbasedev.sysfsdev =
2821            g_strdup_printf("/sys/bus/pci/devices/%04x:%02x:%02x.%01x",
2822                            vdev->host.domain, vdev->host.bus,
2823                            vdev->host.slot, vdev->host.function);
2824    }
2825
2826    if (stat(vdev->vbasedev.sysfsdev, &st) < 0) {
2827        error_setg_errno(errp, errno, "no such host device");
2828        error_prepend(errp, ERR_PREFIX, vdev->vbasedev.sysfsdev);
2829        return;
2830    }
2831
2832    vdev->vbasedev.name = g_path_get_basename(vdev->vbasedev.sysfsdev);
2833    vdev->vbasedev.ops = &vfio_pci_ops;
2834    vdev->vbasedev.type = VFIO_DEVICE_TYPE_PCI;
2835    vdev->vbasedev.dev = &vdev->pdev.qdev;
2836
2837    tmp = g_strdup_printf("%s/iommu_group", vdev->vbasedev.sysfsdev);
2838    len = readlink(tmp, group_path, sizeof(group_path));
2839    g_free(tmp);
2840
2841    if (len <= 0 || len >= sizeof(group_path)) {
2842        error_setg_errno(errp, len < 0 ? errno : ENAMETOOLONG,
2843                         "no iommu_group found");
2844        goto error;
2845    }
2846
2847    group_path[len] = 0;
2848
2849    group_name = basename(group_path);
2850    if (sscanf(group_name, "%d", &groupid) != 1) {
2851        error_setg_errno(errp, errno, "failed to read %s", group_path);
2852        goto error;
2853    }
2854
2855    trace_vfio_realize(vdev->vbasedev.name, groupid);
2856
2857    group = vfio_get_group(groupid, pci_device_iommu_address_space(pdev), errp);
2858    if (!group) {
2859        goto error;
2860    }
2861
2862    QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
2863        if (strcmp(vbasedev_iter->name, vdev->vbasedev.name) == 0) {
2864            error_setg(errp, "device is already attached");
2865            vfio_put_group(group);
2866            goto error;
2867        }
2868    }
2869
2870    ret = vfio_get_device(group, vdev->vbasedev.name, &vdev->vbasedev, errp);
2871    if (ret) {
2872        vfio_put_group(group);
2873        goto error;
2874    }
2875
2876    vfio_populate_device(vdev, &err);
2877    if (err) {
2878        error_propagate(errp, err);
2879        goto error;
2880    }
2881
2882    /* Get a copy of config space */
2883    ret = pread(vdev->vbasedev.fd, vdev->pdev.config,
2884                MIN(pci_config_size(&vdev->pdev), vdev->config_size),
2885                vdev->config_offset);
2886    if (ret < (int)MIN(pci_config_size(&vdev->pdev), vdev->config_size)) {
2887        ret = ret < 0 ? -errno : -EFAULT;
2888        error_setg_errno(errp, -ret, "failed to read device config space");
2889        goto error;
2890    }
2891
2892    /* vfio emulates a lot for us, but some bits need extra love */
2893    vdev->emulated_config_bits = g_malloc0(vdev->config_size);
2894
2895    /* QEMU can choose to expose the ROM or not */
2896    memset(vdev->emulated_config_bits + PCI_ROM_ADDRESS, 0xff, 4);
2897    /* QEMU can also add or extend BARs */
2898    memset(vdev->emulated_config_bits + PCI_BASE_ADDRESS_0, 0xff, 6 * 4);
2899
2900    /*
2901     * The PCI spec reserves vendor ID 0xffff as an invalid value.  The
2902     * device ID is managed by the vendor and need only be a 16-bit value.
2903     * Allow any 16-bit value for subsystem so they can be hidden or changed.
2904     */
2905    if (vdev->vendor_id != PCI_ANY_ID) {
2906        if (vdev->vendor_id >= 0xffff) {
2907            error_setg(errp, "invalid PCI vendor ID provided");
2908            goto error;
2909        }
2910        vfio_add_emulated_word(vdev, PCI_VENDOR_ID, vdev->vendor_id, ~0);
2911        trace_vfio_pci_emulated_vendor_id(vdev->vbasedev.name, vdev->vendor_id);
2912    } else {
2913        vdev->vendor_id = pci_get_word(pdev->config + PCI_VENDOR_ID);
2914    }
2915
2916    if (vdev->device_id != PCI_ANY_ID) {
2917        if (vdev->device_id > 0xffff) {
2918            error_setg(errp, "invalid PCI device ID provided");
2919            goto error;
2920        }
2921        vfio_add_emulated_word(vdev, PCI_DEVICE_ID, vdev->device_id, ~0);
2922        trace_vfio_pci_emulated_device_id(vdev->vbasedev.name, vdev->device_id);
2923    } else {
2924        vdev->device_id = pci_get_word(pdev->config + PCI_DEVICE_ID);
2925    }
2926
2927    if (vdev->sub_vendor_id != PCI_ANY_ID) {
2928        if (vdev->sub_vendor_id > 0xffff) {
2929            error_setg(errp, "invalid PCI subsystem vendor ID provided");
2930            goto error;
2931        }
2932        vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_VENDOR_ID,
2933                               vdev->sub_vendor_id, ~0);
2934        trace_vfio_pci_emulated_sub_vendor_id(vdev->vbasedev.name,
2935                                              vdev->sub_vendor_id);
2936    }
2937
2938    if (vdev->sub_device_id != PCI_ANY_ID) {
2939        if (vdev->sub_device_id > 0xffff) {
2940            error_setg(errp, "invalid PCI subsystem device ID provided");
2941            goto error;
2942        }
2943        vfio_add_emulated_word(vdev, PCI_SUBSYSTEM_ID, vdev->sub_device_id, ~0);
2944        trace_vfio_pci_emulated_sub_device_id(vdev->vbasedev.name,
2945                                              vdev->sub_device_id);
2946    }
2947
2948    /* QEMU can change multi-function devices to single function, or reverse */
2949    vdev->emulated_config_bits[PCI_HEADER_TYPE] =
2950                                              PCI_HEADER_TYPE_MULTI_FUNCTION;
2951
2952    /* Restore or clear multifunction, this is always controlled by QEMU */
2953    if (vdev->pdev.cap_present & QEMU_PCI_CAP_MULTIFUNCTION) {
2954        vdev->pdev.config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION;
2955    } else {
2956        vdev->pdev.config[PCI_HEADER_TYPE] &= ~PCI_HEADER_TYPE_MULTI_FUNCTION;
2957    }
2958
2959    /*
2960     * Clear host resource mapping info.  If we choose not to register a
2961     * BAR, such as might be the case with the option ROM, we can get
2962     * confusing, unwritable, residual addresses from the host here.
2963     */
2964    memset(&vdev->pdev.config[PCI_BASE_ADDRESS_0], 0, 24);
2965    memset(&vdev->pdev.config[PCI_ROM_ADDRESS], 0, 4);
2966
2967    vfio_pci_size_rom(vdev);
2968
2969    vfio_bars_prepare(vdev);
2970
2971    vfio_msix_early_setup(vdev, &err);
2972    if (err) {
2973        error_propagate(errp, err);
2974        goto error;
2975    }
2976
2977    vfio_bars_register(vdev);
2978
2979    ret = vfio_add_capabilities(vdev, errp);
2980    if (ret) {
2981        goto out_teardown;
2982    }
2983
2984    if (vdev->vga) {
2985        vfio_vga_quirk_setup(vdev);
2986    }
2987
2988    for (i = 0; i < PCI_ROM_SLOT; i++) {
2989        vfio_bar_quirk_setup(vdev, i);
2990    }
2991
2992    if (!vdev->igd_opregion &&
2993        vdev->features & VFIO_FEATURE_ENABLE_IGD_OPREGION) {
2994        struct vfio_region_info *opregion;
2995
2996        if (vdev->pdev.qdev.hotplugged) {
2997            error_setg(errp,
2998                       "cannot support IGD OpRegion feature on hotplugged "
2999                       "device");
3000            goto out_teardown;
3001        }
3002
3003        ret = vfio_get_dev_region_info(&vdev->vbasedev,
3004                        VFIO_REGION_TYPE_PCI_VENDOR_TYPE | PCI_VENDOR_ID_INTEL,
3005                        VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, &opregion);
3006        if (ret) {
3007            error_setg_errno(errp, -ret,
3008                             "does not support requested IGD OpRegion feature");
3009            goto out_teardown;
3010        }
3011
3012        ret = vfio_pci_igd_opregion_init(vdev, opregion, errp);
3013        g_free(opregion);
3014        if (ret) {
3015            goto out_teardown;
3016        }
3017    }
3018
3019    /* QEMU emulates all of MSI & MSIX */
3020    if (pdev->cap_present & QEMU_PCI_CAP_MSIX) {
3021        memset(vdev->emulated_config_bits + pdev->msix_cap, 0xff,
3022               MSIX_CAP_LENGTH);
3023    }
3024
3025    if (pdev->cap_present & QEMU_PCI_CAP_MSI) {
3026        memset(vdev->emulated_config_bits + pdev->msi_cap, 0xff,
3027               vdev->msi_cap_size);
3028    }
3029
3030    if (vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1)) {
3031        vdev->intx.mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
3032                                                  vfio_intx_mmap_enable, vdev);
3033        pci_device_set_intx_routing_notifier(&vdev->pdev, vfio_intx_update);
3034        ret = vfio_intx_enable(vdev, errp);
3035        if (ret) {
3036            goto out_teardown;
3037        }
3038    }
3039
3040    if (vdev->display != ON_OFF_AUTO_OFF) {
3041        ret = vfio_display_probe(vdev, errp);
3042        if (ret) {
3043            goto out_teardown;
3044        }
3045    }
3046
3047    vfio_register_err_notifier(vdev);
3048    vfio_register_req_notifier(vdev);
3049    vfio_setup_resetfn_quirk(vdev);
3050
3051    return;
3052
3053out_teardown:
3054    pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
3055    vfio_teardown_msi(vdev);
3056    vfio_bars_exit(vdev);
3057error:
3058    error_prepend(errp, ERR_PREFIX, vdev->vbasedev.name);
3059}
3060
3061static void vfio_instance_finalize(Object *obj)
3062{
3063    PCIDevice *pci_dev = PCI_DEVICE(obj);
3064    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pci_dev);
3065    VFIOGroup *group = vdev->vbasedev.group;
3066
3067    vfio_display_finalize(vdev);
3068    vfio_bars_finalize(vdev);
3069    g_free(vdev->emulated_config_bits);
3070    g_free(vdev->rom);
3071    /*
3072     * XXX Leaking igd_opregion is not an oversight, we can't remove the
3073     * fw_cfg entry therefore leaking this allocation seems like the safest
3074     * option.
3075     *
3076     * g_free(vdev->igd_opregion);
3077     */
3078    vfio_put_device(vdev);
3079    vfio_put_group(group);
3080}
3081
3082static void vfio_exitfn(PCIDevice *pdev)
3083{
3084    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
3085
3086    vfio_unregister_req_notifier(vdev);
3087    vfio_unregister_err_notifier(vdev);
3088    pci_device_set_intx_routing_notifier(&vdev->pdev, NULL);
3089    vfio_disable_interrupts(vdev);
3090    if (vdev->intx.mmap_timer) {
3091        timer_free(vdev->intx.mmap_timer);
3092    }
3093    vfio_teardown_msi(vdev);
3094    vfio_bars_exit(vdev);
3095}
3096
3097static void vfio_pci_reset(DeviceState *dev)
3098{
3099    PCIDevice *pdev = DO_UPCAST(PCIDevice, qdev, dev);
3100    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev);
3101
3102    trace_vfio_pci_reset(vdev->vbasedev.name);
3103
3104    vfio_pci_pre_reset(vdev);
3105
3106    if (vdev->resetfn && !vdev->resetfn(vdev)) {
3107        goto post_reset;
3108    }
3109
3110    if (vdev->vbasedev.reset_works &&
3111        (vdev->has_flr || !vdev->has_pm_reset) &&
3112        !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) {
3113        trace_vfio_pci_reset_flr(vdev->vbasedev.name);
3114        goto post_reset;
3115    }
3116
3117    /* See if we can do our own bus reset */
3118    if (!vfio_pci_hot_reset_one(vdev)) {
3119        goto post_reset;
3120    }
3121
3122    /* If nothing else works and the device supports PM reset, use it */
3123    if (vdev->vbasedev.reset_works && vdev->has_pm_reset &&
3124        !ioctl(vdev->vbasedev.fd, VFIO_DEVICE_RESET)) {
3125        trace_vfio_pci_reset_pm(vdev->vbasedev.name);
3126        goto post_reset;
3127    }
3128
3129post_reset:
3130    vfio_pci_post_reset(vdev);
3131}
3132
3133static void vfio_instance_init(Object *obj)
3134{
3135    PCIDevice *pci_dev = PCI_DEVICE(obj);
3136    VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, PCI_DEVICE(obj));
3137
3138    device_add_bootindex_property(obj, &vdev->bootindex,
3139                                  "bootindex", NULL,
3140                                  &pci_dev->qdev, NULL);
3141    vdev->host.domain = ~0U;
3142    vdev->host.bus = ~0U;
3143    vdev->host.slot = ~0U;
3144    vdev->host.function = ~0U;
3145
3146    vdev->nv_gpudirect_clique = 0xFF;
3147
3148    /* QEMU_PCI_CAP_EXPRESS initialization does not depend on QEMU command
3149     * line, therefore, no need to wait to realize like other devices */
3150    pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS;
3151}
3152
3153static Property vfio_pci_dev_properties[] = {
3154    DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIOPCIDevice, host),
3155    DEFINE_PROP_STRING("sysfsdev", VFIOPCIDevice, vbasedev.sysfsdev),
3156    DEFINE_PROP_ON_OFF_AUTO("display", VFIOPCIDevice,
3157                            display, ON_OFF_AUTO_AUTO),
3158    DEFINE_PROP_UINT32("x-intx-mmap-timeout-ms", VFIOPCIDevice,
3159                       intx.mmap_timeout, 1100),
3160    DEFINE_PROP_BIT("x-vga", VFIOPCIDevice, features,
3161                    VFIO_FEATURE_ENABLE_VGA_BIT, false),
3162    DEFINE_PROP_BIT("x-req", VFIOPCIDevice, features,
3163                    VFIO_FEATURE_ENABLE_REQ_BIT, true),
3164    DEFINE_PROP_BIT("x-igd-opregion", VFIOPCIDevice, features,
3165                    VFIO_FEATURE_ENABLE_IGD_OPREGION_BIT, false),
3166    DEFINE_PROP_BOOL("x-no-mmap", VFIOPCIDevice, vbasedev.no_mmap, false),
3167    DEFINE_PROP_BOOL("x-no-kvm-intx", VFIOPCIDevice, no_kvm_intx, false),
3168    DEFINE_PROP_BOOL("x-no-kvm-msi", VFIOPCIDevice, no_kvm_msi, false),
3169    DEFINE_PROP_BOOL("x-no-kvm-msix", VFIOPCIDevice, no_kvm_msix, false),
3170    DEFINE_PROP_BOOL("x-no-geforce-quirks", VFIOPCIDevice,
3171                     no_geforce_quirks, false),
3172    DEFINE_PROP_UINT32("x-pci-vendor-id", VFIOPCIDevice, vendor_id, PCI_ANY_ID),
3173    DEFINE_PROP_UINT32("x-pci-device-id", VFIOPCIDevice, device_id, PCI_ANY_ID),
3174    DEFINE_PROP_UINT32("x-pci-sub-vendor-id", VFIOPCIDevice,
3175                       sub_vendor_id, PCI_ANY_ID),
3176    DEFINE_PROP_UINT32("x-pci-sub-device-id", VFIOPCIDevice,
3177                       sub_device_id, PCI_ANY_ID),
3178    DEFINE_PROP_UINT32("x-igd-gms", VFIOPCIDevice, igd_gms, 0),
3179    DEFINE_PROP_UNSIGNED_NODEFAULT("x-nv-gpudirect-clique", VFIOPCIDevice,
3180                                   nv_gpudirect_clique,
3181                                   qdev_prop_nv_gpudirect_clique, uint8_t),
3182    DEFINE_PROP_OFF_AUTO_PCIBAR("x-msix-relocation", VFIOPCIDevice, msix_relo,
3183                                OFF_AUTOPCIBAR_OFF),
3184    /*
3185     * TODO - support passed fds... is this necessary?
3186     * DEFINE_PROP_STRING("vfiofd", VFIOPCIDevice, vfiofd_name),
3187     * DEFINE_PROP_STRING("vfiogroupfd, VFIOPCIDevice, vfiogroupfd_name),
3188     */
3189    DEFINE_PROP_END_OF_LIST(),
3190};
3191
3192static const VMStateDescription vfio_pci_vmstate = {
3193    .name = "vfio-pci",
3194    .unmigratable = 1,
3195};
3196
3197static void vfio_pci_dev_class_init(ObjectClass *klass, void *data)
3198{
3199    DeviceClass *dc = DEVICE_CLASS(klass);
3200    PCIDeviceClass *pdc = PCI_DEVICE_CLASS(klass);
3201
3202    dc->reset = vfio_pci_reset;
3203    dc->props = vfio_pci_dev_properties;
3204    dc->vmsd = &vfio_pci_vmstate;
3205    dc->desc = "VFIO-based PCI device assignment";
3206    set_bit(DEVICE_CATEGORY_MISC, dc->categories);
3207    pdc->realize = vfio_realize;
3208    pdc->exit = vfio_exitfn;
3209    pdc->config_read = vfio_pci_read_config;
3210    pdc->config_write = vfio_pci_write_config;
3211}
3212
3213static const TypeInfo vfio_pci_dev_info = {
3214    .name = "vfio-pci",
3215    .parent = TYPE_PCI_DEVICE,
3216    .instance_size = sizeof(VFIOPCIDevice),
3217    .class_init = vfio_pci_dev_class_init,
3218    .instance_init = vfio_instance_init,
3219    .instance_finalize = vfio_instance_finalize,
3220    .interfaces = (InterfaceInfo[]) {
3221        { INTERFACE_PCIE_DEVICE },
3222        { INTERFACE_CONVENTIONAL_PCI_DEVICE },
3223        { }
3224    },
3225};
3226
3227static void register_vfio_pci_dev_type(void)
3228{
3229    type_register_static(&vfio_pci_dev_info);
3230}
3231
3232type_init(register_vfio_pci_dev_type)
3233