qemu/hw/pci/msix.c
<<
>>
Prefs
   1/*
   2 * MSI-X device support
   3 *
   4 * This module includes support for MSI-X in pci devices.
   5 *
   6 * Author: Michael S. Tsirkin <mst@redhat.com>
   7 *
   8 *  Copyright (c) 2009, Red Hat Inc, Michael S. Tsirkin (mst@redhat.com)
   9 *
  10 * This work is licensed under the terms of the GNU GPL, version 2.  See
  11 * the COPYING file in the top-level directory.
  12 *
  13 * Contributions after 2012-01-13 are licensed under the terms of the
  14 * GNU GPL, version 2 or (at your option) any later version.
  15 */
  16
  17#include "qemu/osdep.h"
  18#include "hw/pci/msi.h"
  19#include "hw/pci/msix.h"
  20#include "hw/pci/pci.h"
  21#include "hw/xen/xen.h"
  22#include "migration/qemu-file-types.h"
  23#include "migration/vmstate.h"
  24#include "qemu/range.h"
  25#include "qapi/error.h"
  26#include "trace.h"
  27
  28/* MSI enable bit and maskall bit are in byte 1 in FLAGS register */
  29#define MSIX_CONTROL_OFFSET (PCI_MSIX_FLAGS + 1)
  30#define MSIX_ENABLE_MASK (PCI_MSIX_FLAGS_ENABLE >> 8)
  31#define MSIX_MASKALL_MASK (PCI_MSIX_FLAGS_MASKALL >> 8)
  32
  33MSIMessage msix_get_message(PCIDevice *dev, unsigned vector)
  34{
  35    uint8_t *table_entry = dev->msix_table + vector * PCI_MSIX_ENTRY_SIZE;
  36    MSIMessage msg;
  37
  38    msg.address = pci_get_quad(table_entry + PCI_MSIX_ENTRY_LOWER_ADDR);
  39    msg.data = pci_get_long(table_entry + PCI_MSIX_ENTRY_DATA);
  40    return msg;
  41}
  42
  43/*
  44 * Special API for POWER to configure the vectors through
  45 * a side channel. Should never be used by devices.
  46 */
  47void msix_set_message(PCIDevice *dev, int vector, struct MSIMessage msg)
  48{
  49    uint8_t *table_entry = dev->msix_table + vector * PCI_MSIX_ENTRY_SIZE;
  50
  51    pci_set_quad(table_entry + PCI_MSIX_ENTRY_LOWER_ADDR, msg.address);
  52    pci_set_long(table_entry + PCI_MSIX_ENTRY_DATA, msg.data);
  53    table_entry[PCI_MSIX_ENTRY_VECTOR_CTRL] &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
  54}
  55
  56static uint8_t msix_pending_mask(int vector)
  57{
  58    return 1 << (vector % 8);
  59}
  60
  61static uint8_t *msix_pending_byte(PCIDevice *dev, int vector)
  62{
  63    return dev->msix_pba + vector / 8;
  64}
  65
  66static int msix_is_pending(PCIDevice *dev, int vector)
  67{
  68    return *msix_pending_byte(dev, vector) & msix_pending_mask(vector);
  69}
  70
  71void msix_set_pending(PCIDevice *dev, unsigned int vector)
  72{
  73    *msix_pending_byte(dev, vector) |= msix_pending_mask(vector);
  74}
  75
  76void msix_clr_pending(PCIDevice *dev, int vector)
  77{
  78    *msix_pending_byte(dev, vector) &= ~msix_pending_mask(vector);
  79}
  80
  81static bool msix_vector_masked(PCIDevice *dev, unsigned int vector, bool fmask)
  82{
  83    unsigned offset = vector * PCI_MSIX_ENTRY_SIZE;
  84    uint8_t *data = &dev->msix_table[offset + PCI_MSIX_ENTRY_DATA];
  85    /* MSIs on Xen can be remapped into pirqs. In those cases, masking
  86     * and unmasking go through the PV evtchn path. */
  87    if (xen_enabled() && xen_is_pirq_msi(pci_get_long(data))) {
  88        return false;
  89    }
  90    return fmask || dev->msix_table[offset + PCI_MSIX_ENTRY_VECTOR_CTRL] &
  91        PCI_MSIX_ENTRY_CTRL_MASKBIT;
  92}
  93
  94bool msix_is_masked(PCIDevice *dev, unsigned int vector)
  95{
  96    return msix_vector_masked(dev, vector, dev->msix_function_masked);
  97}
  98
  99static void msix_fire_vector_notifier(PCIDevice *dev,
 100                                      unsigned int vector, bool is_masked)
 101{
 102    MSIMessage msg;
 103    int ret;
 104
 105    if (!dev->msix_vector_use_notifier) {
 106        return;
 107    }
 108    if (is_masked) {
 109        dev->msix_vector_release_notifier(dev, vector);
 110    } else {
 111        msg = msix_get_message(dev, vector);
 112        ret = dev->msix_vector_use_notifier(dev, vector, msg);
 113        assert(ret >= 0);
 114    }
 115}
 116
 117static void msix_handle_mask_update(PCIDevice *dev, int vector, bool was_masked)
 118{
 119    bool is_masked = msix_is_masked(dev, vector);
 120
 121    if (is_masked == was_masked) {
 122        return;
 123    }
 124
 125    msix_fire_vector_notifier(dev, vector, is_masked);
 126
 127    if (!is_masked && msix_is_pending(dev, vector)) {
 128        msix_clr_pending(dev, vector);
 129        msix_notify(dev, vector);
 130    }
 131}
 132
 133static bool msix_masked(PCIDevice *dev)
 134{
 135    return dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] & MSIX_MASKALL_MASK;
 136}
 137
 138static void msix_update_function_masked(PCIDevice *dev)
 139{
 140    dev->msix_function_masked = !msix_enabled(dev) || msix_masked(dev);
 141}
 142
 143/* Handle MSI-X capability config write. */
 144void msix_write_config(PCIDevice *dev, uint32_t addr,
 145                       uint32_t val, int len)
 146{
 147    unsigned enable_pos = dev->msix_cap + MSIX_CONTROL_OFFSET;
 148    int vector;
 149    bool was_masked;
 150
 151    if (!msix_present(dev) || !range_covers_byte(addr, len, enable_pos)) {
 152        return;
 153    }
 154
 155    trace_msix_write_config(dev->name, msix_enabled(dev), msix_masked(dev));
 156
 157    was_masked = dev->msix_function_masked;
 158    msix_update_function_masked(dev);
 159
 160    if (!msix_enabled(dev)) {
 161        return;
 162    }
 163
 164    pci_device_deassert_intx(dev);
 165
 166    if (dev->msix_function_masked == was_masked) {
 167        return;
 168    }
 169
 170    for (vector = 0; vector < dev->msix_entries_nr; ++vector) {
 171        msix_handle_mask_update(dev, vector,
 172                                msix_vector_masked(dev, vector, was_masked));
 173    }
 174}
 175
 176static uint64_t msix_table_mmio_read(void *opaque, hwaddr addr,
 177                                     unsigned size)
 178{
 179    PCIDevice *dev = opaque;
 180
 181    return pci_get_long(dev->msix_table + addr);
 182}
 183
 184static void msix_table_mmio_write(void *opaque, hwaddr addr,
 185                                  uint64_t val, unsigned size)
 186{
 187    PCIDevice *dev = opaque;
 188    int vector = addr / PCI_MSIX_ENTRY_SIZE;
 189    bool was_masked;
 190
 191    was_masked = msix_is_masked(dev, vector);
 192    pci_set_long(dev->msix_table + addr, val);
 193    msix_handle_mask_update(dev, vector, was_masked);
 194}
 195
 196static const MemoryRegionOps msix_table_mmio_ops = {
 197    .read = msix_table_mmio_read,
 198    .write = msix_table_mmio_write,
 199    .endianness = DEVICE_LITTLE_ENDIAN,
 200    .valid = {
 201        .min_access_size = 4,
 202        .max_access_size = 4,
 203    },
 204};
 205
 206static uint64_t msix_pba_mmio_read(void *opaque, hwaddr addr,
 207                                   unsigned size)
 208{
 209    PCIDevice *dev = opaque;
 210    if (dev->msix_vector_poll_notifier) {
 211        unsigned vector_start = addr * 8;
 212        unsigned vector_end = MIN(addr + size * 8, dev->msix_entries_nr);
 213        dev->msix_vector_poll_notifier(dev, vector_start, vector_end);
 214    }
 215
 216    return pci_get_long(dev->msix_pba + addr);
 217}
 218
 219static void msix_pba_mmio_write(void *opaque, hwaddr addr,
 220                                uint64_t val, unsigned size)
 221{
 222}
 223
 224static const MemoryRegionOps msix_pba_mmio_ops = {
 225    .read = msix_pba_mmio_read,
 226    .write = msix_pba_mmio_write,
 227    .endianness = DEVICE_LITTLE_ENDIAN,
 228    .valid = {
 229        .min_access_size = 4,
 230        .max_access_size = 4,
 231    },
 232};
 233
 234static void msix_mask_all(struct PCIDevice *dev, unsigned nentries)
 235{
 236    int vector;
 237
 238    for (vector = 0; vector < nentries; ++vector) {
 239        unsigned offset =
 240            vector * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_VECTOR_CTRL;
 241        bool was_masked = msix_is_masked(dev, vector);
 242
 243        dev->msix_table[offset] |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
 244        msix_handle_mask_update(dev, vector, was_masked);
 245    }
 246}
 247
 248/*
 249 * Make PCI device @dev MSI-X capable
 250 * @nentries is the max number of MSI-X vectors that the device support.
 251 * @table_bar is the MemoryRegion that MSI-X table structure resides.
 252 * @table_bar_nr is number of base address register corresponding to @table_bar.
 253 * @table_offset indicates the offset that the MSI-X table structure starts with
 254 * in @table_bar.
 255 * @pba_bar is the MemoryRegion that the Pending Bit Array structure resides.
 256 * @pba_bar_nr is number of base address register corresponding to @pba_bar.
 257 * @pba_offset indicates the offset that the Pending Bit Array structure
 258 * starts with in @pba_bar.
 259 * Non-zero @cap_pos puts capability MSI-X at that offset in PCI config space.
 260 * @errp is for returning errors.
 261 *
 262 * Return 0 on success; set @errp and return -errno on error:
 263 * -ENOTSUP means lacking msi support for a msi-capable platform.
 264 * -EINVAL means capability overlap, happens when @cap_pos is non-zero,
 265 * also means a programming error, except device assignment, which can check
 266 * if a real HW is broken.
 267 */
 268int msix_init(struct PCIDevice *dev, unsigned short nentries,
 269              MemoryRegion *table_bar, uint8_t table_bar_nr,
 270              unsigned table_offset, MemoryRegion *pba_bar,
 271              uint8_t pba_bar_nr, unsigned pba_offset, uint8_t cap_pos,
 272              Error **errp)
 273{
 274    int cap;
 275    unsigned table_size, pba_size;
 276    uint8_t *config;
 277
 278    /* Nothing to do if MSI is not supported by interrupt controller */
 279    if (!msi_nonbroken) {
 280        error_setg(errp, "MSI-X is not supported by interrupt controller");
 281        return -ENOTSUP;
 282    }
 283
 284    if (nentries < 1 || nentries > PCI_MSIX_FLAGS_QSIZE + 1) {
 285        error_setg(errp, "The number of MSI-X vectors is invalid");
 286        return -EINVAL;
 287    }
 288
 289    table_size = nentries * PCI_MSIX_ENTRY_SIZE;
 290    pba_size = QEMU_ALIGN_UP(nentries, 64) / 8;
 291
 292    /* Sanity test: table & pba don't overlap, fit within BARs, min aligned */
 293    if ((table_bar_nr == pba_bar_nr &&
 294         ranges_overlap(table_offset, table_size, pba_offset, pba_size)) ||
 295        table_offset + table_size > memory_region_size(table_bar) ||
 296        pba_offset + pba_size > memory_region_size(pba_bar) ||
 297        (table_offset | pba_offset) & PCI_MSIX_FLAGS_BIRMASK) {
 298        error_setg(errp, "table & pba overlap, or they don't fit in BARs,"
 299                   " or don't align");
 300        return -EINVAL;
 301    }
 302
 303    cap = pci_add_capability(dev, PCI_CAP_ID_MSIX,
 304                              cap_pos, MSIX_CAP_LENGTH, errp);
 305    if (cap < 0) {
 306        return cap;
 307    }
 308
 309    dev->msix_cap = cap;
 310    dev->cap_present |= QEMU_PCI_CAP_MSIX;
 311    config = dev->config + cap;
 312
 313    pci_set_word(config + PCI_MSIX_FLAGS, nentries - 1);
 314    dev->msix_entries_nr = nentries;
 315    dev->msix_function_masked = true;
 316
 317    pci_set_long(config + PCI_MSIX_TABLE, table_offset | table_bar_nr);
 318    pci_set_long(config + PCI_MSIX_PBA, pba_offset | pba_bar_nr);
 319
 320    /* Make flags bit writable. */
 321    dev->wmask[cap + MSIX_CONTROL_OFFSET] |= MSIX_ENABLE_MASK |
 322                                             MSIX_MASKALL_MASK;
 323
 324    dev->msix_table = g_malloc0(table_size);
 325    dev->msix_pba = g_malloc0(pba_size);
 326    dev->msix_entry_used = g_malloc0(nentries * sizeof *dev->msix_entry_used);
 327
 328    msix_mask_all(dev, nentries);
 329
 330    memory_region_init_io(&dev->msix_table_mmio, OBJECT(dev), &msix_table_mmio_ops, dev,
 331                          "msix-table", table_size);
 332    memory_region_add_subregion(table_bar, table_offset, &dev->msix_table_mmio);
 333    memory_region_init_io(&dev->msix_pba_mmio, OBJECT(dev), &msix_pba_mmio_ops, dev,
 334                          "msix-pba", pba_size);
 335    memory_region_add_subregion(pba_bar, pba_offset, &dev->msix_pba_mmio);
 336
 337    return 0;
 338}
 339
 340int msix_init_exclusive_bar(PCIDevice *dev, unsigned short nentries,
 341                            uint8_t bar_nr, Error **errp)
 342{
 343    int ret;
 344    char *name;
 345    uint32_t bar_size = 4096;
 346    uint32_t bar_pba_offset = bar_size / 2;
 347    uint32_t bar_pba_size = QEMU_ALIGN_UP(nentries, 64) / 8;
 348
 349    /*
 350     * Migration compatibility dictates that this remains a 4k
 351     * BAR with the vector table in the lower half and PBA in
 352     * the upper half for nentries which is lower or equal to 128.
 353     * No need to care about using more than 65 entries for legacy
 354     * machine types who has at most 64 queues.
 355     */
 356    if (nentries * PCI_MSIX_ENTRY_SIZE > bar_pba_offset) {
 357        bar_pba_offset = nentries * PCI_MSIX_ENTRY_SIZE;
 358    }
 359
 360    if (bar_pba_offset + bar_pba_size > 4096) {
 361        bar_size = bar_pba_offset + bar_pba_size;
 362    }
 363
 364    bar_size = pow2ceil(bar_size);
 365
 366    name = g_strdup_printf("%s-msix", dev->name);
 367    memory_region_init(&dev->msix_exclusive_bar, OBJECT(dev), name, bar_size);
 368    g_free(name);
 369
 370    ret = msix_init(dev, nentries, &dev->msix_exclusive_bar, bar_nr,
 371                    0, &dev->msix_exclusive_bar,
 372                    bar_nr, bar_pba_offset,
 373                    0, errp);
 374    if (ret) {
 375        return ret;
 376    }
 377
 378    pci_register_bar(dev, bar_nr, PCI_BASE_ADDRESS_SPACE_MEMORY,
 379                     &dev->msix_exclusive_bar);
 380
 381    return 0;
 382}
 383
 384static void msix_free_irq_entries(PCIDevice *dev)
 385{
 386    int vector;
 387
 388    for (vector = 0; vector < dev->msix_entries_nr; ++vector) {
 389        dev->msix_entry_used[vector] = 0;
 390        msix_clr_pending(dev, vector);
 391    }
 392}
 393
 394static void msix_clear_all_vectors(PCIDevice *dev)
 395{
 396    int vector;
 397
 398    for (vector = 0; vector < dev->msix_entries_nr; ++vector) {
 399        msix_clr_pending(dev, vector);
 400    }
 401}
 402
 403/* Clean up resources for the device. */
 404void msix_uninit(PCIDevice *dev, MemoryRegion *table_bar, MemoryRegion *pba_bar)
 405{
 406    if (!msix_present(dev)) {
 407        return;
 408    }
 409    pci_del_capability(dev, PCI_CAP_ID_MSIX, MSIX_CAP_LENGTH);
 410    dev->msix_cap = 0;
 411    msix_free_irq_entries(dev);
 412    dev->msix_entries_nr = 0;
 413    memory_region_del_subregion(pba_bar, &dev->msix_pba_mmio);
 414    g_free(dev->msix_pba);
 415    dev->msix_pba = NULL;
 416    memory_region_del_subregion(table_bar, &dev->msix_table_mmio);
 417    g_free(dev->msix_table);
 418    dev->msix_table = NULL;
 419    g_free(dev->msix_entry_used);
 420    dev->msix_entry_used = NULL;
 421    dev->cap_present &= ~QEMU_PCI_CAP_MSIX;
 422}
 423
 424void msix_uninit_exclusive_bar(PCIDevice *dev)
 425{
 426    if (msix_present(dev)) {
 427        msix_uninit(dev, &dev->msix_exclusive_bar, &dev->msix_exclusive_bar);
 428    }
 429}
 430
 431void msix_save(PCIDevice *dev, QEMUFile *f)
 432{
 433    unsigned n = dev->msix_entries_nr;
 434
 435    if (!msix_present(dev)) {
 436        return;
 437    }
 438
 439    qemu_put_buffer(f, dev->msix_table, n * PCI_MSIX_ENTRY_SIZE);
 440    qemu_put_buffer(f, dev->msix_pba, DIV_ROUND_UP(n, 8));
 441}
 442
 443/* Should be called after restoring the config space. */
 444void msix_load(PCIDevice *dev, QEMUFile *f)
 445{
 446    unsigned n = dev->msix_entries_nr;
 447    unsigned int vector;
 448
 449    if (!msix_present(dev)) {
 450        return;
 451    }
 452
 453    msix_clear_all_vectors(dev);
 454    qemu_get_buffer(f, dev->msix_table, n * PCI_MSIX_ENTRY_SIZE);
 455    qemu_get_buffer(f, dev->msix_pba, DIV_ROUND_UP(n, 8));
 456    msix_update_function_masked(dev);
 457
 458    for (vector = 0; vector < n; vector++) {
 459        msix_handle_mask_update(dev, vector, true);
 460    }
 461}
 462
 463/* Does device support MSI-X? */
 464int msix_present(PCIDevice *dev)
 465{
 466    return dev->cap_present & QEMU_PCI_CAP_MSIX;
 467}
 468
 469/* Is MSI-X enabled? */
 470int msix_enabled(PCIDevice *dev)
 471{
 472    return (dev->cap_present & QEMU_PCI_CAP_MSIX) &&
 473        (dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &
 474         MSIX_ENABLE_MASK);
 475}
 476
 477/* Send an MSI-X message */
 478void msix_notify(PCIDevice *dev, unsigned vector)
 479{
 480    MSIMessage msg;
 481
 482    if (vector >= dev->msix_entries_nr || !dev->msix_entry_used[vector]) {
 483        return;
 484    }
 485
 486    if (msix_is_masked(dev, vector)) {
 487        msix_set_pending(dev, vector);
 488        return;
 489    }
 490
 491    msg = msix_get_message(dev, vector);
 492
 493    msi_send_message(dev, msg);
 494}
 495
 496void msix_reset(PCIDevice *dev)
 497{
 498    if (!msix_present(dev)) {
 499        return;
 500    }
 501    msix_clear_all_vectors(dev);
 502    dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &=
 503            ~dev->wmask[dev->msix_cap + MSIX_CONTROL_OFFSET];
 504    memset(dev->msix_table, 0, dev->msix_entries_nr * PCI_MSIX_ENTRY_SIZE);
 505    memset(dev->msix_pba, 0, QEMU_ALIGN_UP(dev->msix_entries_nr, 64) / 8);
 506    msix_mask_all(dev, dev->msix_entries_nr);
 507}
 508
 509/* PCI spec suggests that devices make it possible for software to configure
 510 * less vectors than supported by the device, but does not specify a standard
 511 * mechanism for devices to do so.
 512 *
 513 * We support this by asking devices to declare vectors software is going to
 514 * actually use, and checking this on the notification path. Devices that
 515 * don't want to follow the spec suggestion can declare all vectors as used. */
 516
 517/* Mark vector as used. */
 518int msix_vector_use(PCIDevice *dev, unsigned vector)
 519{
 520    if (vector >= dev->msix_entries_nr) {
 521        return -EINVAL;
 522    }
 523
 524    dev->msix_entry_used[vector]++;
 525    return 0;
 526}
 527
 528/* Mark vector as unused. */
 529void msix_vector_unuse(PCIDevice *dev, unsigned vector)
 530{
 531    if (vector >= dev->msix_entries_nr || !dev->msix_entry_used[vector]) {
 532        return;
 533    }
 534    if (--dev->msix_entry_used[vector]) {
 535        return;
 536    }
 537    msix_clr_pending(dev, vector);
 538}
 539
 540void msix_unuse_all_vectors(PCIDevice *dev)
 541{
 542    if (!msix_present(dev)) {
 543        return;
 544    }
 545    msix_free_irq_entries(dev);
 546}
 547
 548unsigned int msix_nr_vectors_allocated(const PCIDevice *dev)
 549{
 550    return dev->msix_entries_nr;
 551}
 552
 553static int msix_set_notifier_for_vector(PCIDevice *dev, unsigned int vector)
 554{
 555    MSIMessage msg;
 556
 557    if (msix_is_masked(dev, vector)) {
 558        return 0;
 559    }
 560    msg = msix_get_message(dev, vector);
 561    return dev->msix_vector_use_notifier(dev, vector, msg);
 562}
 563
 564static void msix_unset_notifier_for_vector(PCIDevice *dev, unsigned int vector)
 565{
 566    if (msix_is_masked(dev, vector)) {
 567        return;
 568    }
 569    dev->msix_vector_release_notifier(dev, vector);
 570}
 571
 572int msix_set_vector_notifiers(PCIDevice *dev,
 573                              MSIVectorUseNotifier use_notifier,
 574                              MSIVectorReleaseNotifier release_notifier,
 575                              MSIVectorPollNotifier poll_notifier)
 576{
 577    int vector, ret;
 578
 579    assert(use_notifier && release_notifier);
 580
 581    dev->msix_vector_use_notifier = use_notifier;
 582    dev->msix_vector_release_notifier = release_notifier;
 583    dev->msix_vector_poll_notifier = poll_notifier;
 584
 585    if ((dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &
 586        (MSIX_ENABLE_MASK | MSIX_MASKALL_MASK)) == MSIX_ENABLE_MASK) {
 587        for (vector = 0; vector < dev->msix_entries_nr; vector++) {
 588            ret = msix_set_notifier_for_vector(dev, vector);
 589            if (ret < 0) {
 590                goto undo;
 591            }
 592        }
 593    }
 594    if (dev->msix_vector_poll_notifier) {
 595        dev->msix_vector_poll_notifier(dev, 0, dev->msix_entries_nr);
 596    }
 597    return 0;
 598
 599undo:
 600    while (--vector >= 0) {
 601        msix_unset_notifier_for_vector(dev, vector);
 602    }
 603    dev->msix_vector_use_notifier = NULL;
 604    dev->msix_vector_release_notifier = NULL;
 605    return ret;
 606}
 607
 608void msix_unset_vector_notifiers(PCIDevice *dev)
 609{
 610    int vector;
 611
 612    assert(dev->msix_vector_use_notifier &&
 613           dev->msix_vector_release_notifier);
 614
 615    if ((dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &
 616        (MSIX_ENABLE_MASK | MSIX_MASKALL_MASK)) == MSIX_ENABLE_MASK) {
 617        for (vector = 0; vector < dev->msix_entries_nr; vector++) {
 618            msix_unset_notifier_for_vector(dev, vector);
 619        }
 620    }
 621    dev->msix_vector_use_notifier = NULL;
 622    dev->msix_vector_release_notifier = NULL;
 623    dev->msix_vector_poll_notifier = NULL;
 624}
 625
 626static int put_msix_state(QEMUFile *f, void *pv, size_t size,
 627                          const VMStateField *field, QJSON *vmdesc)
 628{
 629    msix_save(pv, f);
 630
 631    return 0;
 632}
 633
 634static int get_msix_state(QEMUFile *f, void *pv, size_t size,
 635                          const VMStateField *field)
 636{
 637    msix_load(pv, f);
 638    return 0;
 639}
 640
 641static VMStateInfo vmstate_info_msix = {
 642    .name = "msix state",
 643    .get  = get_msix_state,
 644    .put  = put_msix_state,
 645};
 646
 647const VMStateDescription vmstate_msix = {
 648    .name = "msix",
 649    .fields = (VMStateField[]) {
 650        {
 651            .name         = "msix",
 652            .version_id   = 0,
 653            .field_exists = NULL,
 654            .size         = 0,   /* ouch */
 655            .info         = &vmstate_info_msix,
 656            .flags        = VMS_SINGLE,
 657            .offset       = 0,
 658        },
 659        VMSTATE_END_OF_LIST()
 660    }
 661};
 662