qemu/hw/pci/msix.c
<<
>>
Prefs
   1/*
   2 * MSI-X device support
   3 *
   4 * This module includes support for MSI-X in pci devices.
   5 *
   6 * Author: Michael S. Tsirkin <mst@redhat.com>
   7 *
   8 *  Copyright (c) 2009, Red Hat Inc, Michael S. Tsirkin (mst@redhat.com)
   9 *
  10 * This work is licensed under the terms of the GNU GPL, version 2.  See
  11 * the COPYING file in the top-level directory.
  12 *
  13 * Contributions after 2012-01-13 are licensed under the terms of the
  14 * GNU GPL, version 2 or (at your option) any later version.
  15 */
  16
  17#include "qemu/osdep.h"
  18#include "hw/pci/msi.h"
  19#include "hw/pci/msix.h"
  20#include "hw/pci/pci.h"
  21#include "hw/xen/xen.h"
  22#include "sysemu/xen.h"
  23#include "migration/qemu-file-types.h"
  24#include "migration/vmstate.h"
  25#include "qemu/range.h"
  26#include "qapi/error.h"
  27#include "trace.h"
  28
  29/* MSI enable bit and maskall bit are in byte 1 in FLAGS register */
  30#define MSIX_CONTROL_OFFSET (PCI_MSIX_FLAGS + 1)
  31#define MSIX_ENABLE_MASK (PCI_MSIX_FLAGS_ENABLE >> 8)
  32#define MSIX_MASKALL_MASK (PCI_MSIX_FLAGS_MASKALL >> 8)
  33
  34MSIMessage msix_get_message(PCIDevice *dev, unsigned vector)
  35{
  36    uint8_t *table_entry = dev->msix_table + vector * PCI_MSIX_ENTRY_SIZE;
  37    MSIMessage msg;
  38
  39    msg.address = pci_get_quad(table_entry + PCI_MSIX_ENTRY_LOWER_ADDR);
  40    msg.data = pci_get_long(table_entry + PCI_MSIX_ENTRY_DATA);
  41    return msg;
  42}
  43
  44/*
  45 * Special API for POWER to configure the vectors through
  46 * a side channel. Should never be used by devices.
  47 */
  48void msix_set_message(PCIDevice *dev, int vector, struct MSIMessage msg)
  49{
  50    uint8_t *table_entry = dev->msix_table + vector * PCI_MSIX_ENTRY_SIZE;
  51
  52    pci_set_quad(table_entry + PCI_MSIX_ENTRY_LOWER_ADDR, msg.address);
  53    pci_set_long(table_entry + PCI_MSIX_ENTRY_DATA, msg.data);
  54    table_entry[PCI_MSIX_ENTRY_VECTOR_CTRL] &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
  55}
  56
  57static uint8_t msix_pending_mask(int vector)
  58{
  59    return 1 << (vector % 8);
  60}
  61
  62static uint8_t *msix_pending_byte(PCIDevice *dev, int vector)
  63{
  64    return dev->msix_pba + vector / 8;
  65}
  66
  67static int msix_is_pending(PCIDevice *dev, int vector)
  68{
  69    return *msix_pending_byte(dev, vector) & msix_pending_mask(vector);
  70}
  71
  72void msix_set_pending(PCIDevice *dev, unsigned int vector)
  73{
  74    *msix_pending_byte(dev, vector) |= msix_pending_mask(vector);
  75}
  76
  77void msix_clr_pending(PCIDevice *dev, int vector)
  78{
  79    *msix_pending_byte(dev, vector) &= ~msix_pending_mask(vector);
  80}
  81
  82static bool msix_vector_masked(PCIDevice *dev, unsigned int vector, bool fmask)
  83{
  84    unsigned offset = vector * PCI_MSIX_ENTRY_SIZE;
  85    uint8_t *data = &dev->msix_table[offset + PCI_MSIX_ENTRY_DATA];
  86    /* MSIs on Xen can be remapped into pirqs. In those cases, masking
  87     * and unmasking go through the PV evtchn path. */
  88    if (xen_enabled() && xen_is_pirq_msi(pci_get_long(data))) {
  89        return false;
  90    }
  91    return fmask || dev->msix_table[offset + PCI_MSIX_ENTRY_VECTOR_CTRL] &
  92        PCI_MSIX_ENTRY_CTRL_MASKBIT;
  93}
  94
  95bool msix_is_masked(PCIDevice *dev, unsigned int vector)
  96{
  97    return msix_vector_masked(dev, vector, dev->msix_function_masked);
  98}
  99
 100static void msix_fire_vector_notifier(PCIDevice *dev,
 101                                      unsigned int vector, bool is_masked)
 102{
 103    MSIMessage msg;
 104    int ret;
 105
 106    if (!dev->msix_vector_use_notifier) {
 107        return;
 108    }
 109    if (is_masked) {
 110        dev->msix_vector_release_notifier(dev, vector);
 111    } else {
 112        msg = msix_get_message(dev, vector);
 113        ret = dev->msix_vector_use_notifier(dev, vector, msg);
 114        assert(ret >= 0);
 115    }
 116}
 117
 118static void msix_handle_mask_update(PCIDevice *dev, int vector, bool was_masked)
 119{
 120    bool is_masked = msix_is_masked(dev, vector);
 121
 122    if (is_masked == was_masked) {
 123        return;
 124    }
 125
 126    msix_fire_vector_notifier(dev, vector, is_masked);
 127
 128    if (!is_masked && msix_is_pending(dev, vector)) {
 129        msix_clr_pending(dev, vector);
 130        msix_notify(dev, vector);
 131    }
 132}
 133
 134static bool msix_masked(PCIDevice *dev)
 135{
 136    return dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] & MSIX_MASKALL_MASK;
 137}
 138
 139static void msix_update_function_masked(PCIDevice *dev)
 140{
 141    dev->msix_function_masked = !msix_enabled(dev) || msix_masked(dev);
 142}
 143
 144/* Handle MSI-X capability config write. */
 145void msix_write_config(PCIDevice *dev, uint32_t addr,
 146                       uint32_t val, int len)
 147{
 148    unsigned enable_pos = dev->msix_cap + MSIX_CONTROL_OFFSET;
 149    int vector;
 150    bool was_masked;
 151
 152    if (!msix_present(dev) || !range_covers_byte(addr, len, enable_pos)) {
 153        return;
 154    }
 155
 156    trace_msix_write_config(dev->name, msix_enabled(dev), msix_masked(dev));
 157
 158    was_masked = dev->msix_function_masked;
 159    msix_update_function_masked(dev);
 160
 161    if (!msix_enabled(dev)) {
 162        return;
 163    }
 164
 165    pci_device_deassert_intx(dev);
 166
 167    if (dev->msix_function_masked == was_masked) {
 168        return;
 169    }
 170
 171    for (vector = 0; vector < dev->msix_entries_nr; ++vector) {
 172        msix_handle_mask_update(dev, vector,
 173                                msix_vector_masked(dev, vector, was_masked));
 174    }
 175}
 176
 177static uint64_t msix_table_mmio_read(void *opaque, hwaddr addr,
 178                                     unsigned size)
 179{
 180    PCIDevice *dev = opaque;
 181
 182    assert(addr + size <= dev->msix_entries_nr * PCI_MSIX_ENTRY_SIZE);
 183    return pci_get_long(dev->msix_table + addr);
 184}
 185
 186static void msix_table_mmio_write(void *opaque, hwaddr addr,
 187                                  uint64_t val, unsigned size)
 188{
 189    PCIDevice *dev = opaque;
 190    int vector = addr / PCI_MSIX_ENTRY_SIZE;
 191    bool was_masked;
 192
 193    assert(addr + size <= dev->msix_entries_nr * PCI_MSIX_ENTRY_SIZE);
 194
 195    was_masked = msix_is_masked(dev, vector);
 196    pci_set_long(dev->msix_table + addr, val);
 197    msix_handle_mask_update(dev, vector, was_masked);
 198}
 199
 200static const MemoryRegionOps msix_table_mmio_ops = {
 201    .read = msix_table_mmio_read,
 202    .write = msix_table_mmio_write,
 203    .endianness = DEVICE_LITTLE_ENDIAN,
 204    .valid = {
 205        .min_access_size = 4,
 206        .max_access_size = 8,
 207    },
 208    .impl = {
 209        .max_access_size = 4,
 210    },
 211};
 212
 213static uint64_t msix_pba_mmio_read(void *opaque, hwaddr addr,
 214                                   unsigned size)
 215{
 216    PCIDevice *dev = opaque;
 217    if (dev->msix_vector_poll_notifier) {
 218        unsigned vector_start = addr * 8;
 219        unsigned vector_end = MIN(addr + size * 8, dev->msix_entries_nr);
 220        dev->msix_vector_poll_notifier(dev, vector_start, vector_end);
 221    }
 222
 223    return pci_get_long(dev->msix_pba + addr);
 224}
 225
 226static void msix_pba_mmio_write(void *opaque, hwaddr addr,
 227                                uint64_t val, unsigned size)
 228{
 229}
 230
 231static const MemoryRegionOps msix_pba_mmio_ops = {
 232    .read = msix_pba_mmio_read,
 233    .write = msix_pba_mmio_write,
 234    .endianness = DEVICE_LITTLE_ENDIAN,
 235    .valid = {
 236        .min_access_size = 4,
 237        .max_access_size = 8,
 238    },
 239    .impl = {
 240        .max_access_size = 4,
 241    },
 242};
 243
 244static void msix_mask_all(struct PCIDevice *dev, unsigned nentries)
 245{
 246    int vector;
 247
 248    for (vector = 0; vector < nentries; ++vector) {
 249        unsigned offset =
 250            vector * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_VECTOR_CTRL;
 251        bool was_masked = msix_is_masked(dev, vector);
 252
 253        dev->msix_table[offset] |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
 254        msix_handle_mask_update(dev, vector, was_masked);
 255    }
 256}
 257
 258/*
 259 * Make PCI device @dev MSI-X capable
 260 * @nentries is the max number of MSI-X vectors that the device support.
 261 * @table_bar is the MemoryRegion that MSI-X table structure resides.
 262 * @table_bar_nr is number of base address register corresponding to @table_bar.
 263 * @table_offset indicates the offset that the MSI-X table structure starts with
 264 * in @table_bar.
 265 * @pba_bar is the MemoryRegion that the Pending Bit Array structure resides.
 266 * @pba_bar_nr is number of base address register corresponding to @pba_bar.
 267 * @pba_offset indicates the offset that the Pending Bit Array structure
 268 * starts with in @pba_bar.
 269 * Non-zero @cap_pos puts capability MSI-X at that offset in PCI config space.
 270 * @errp is for returning errors.
 271 *
 272 * Return 0 on success; set @errp and return -errno on error:
 273 * -ENOTSUP means lacking msi support for a msi-capable platform.
 274 * -EINVAL means capability overlap, happens when @cap_pos is non-zero,
 275 * also means a programming error, except device assignment, which can check
 276 * if a real HW is broken.
 277 */
 278int msix_init(struct PCIDevice *dev, unsigned short nentries,
 279              MemoryRegion *table_bar, uint8_t table_bar_nr,
 280              unsigned table_offset, MemoryRegion *pba_bar,
 281              uint8_t pba_bar_nr, unsigned pba_offset, uint8_t cap_pos,
 282              Error **errp)
 283{
 284    int cap;
 285    unsigned table_size, pba_size;
 286    uint8_t *config;
 287
 288    /* Nothing to do if MSI is not supported by interrupt controller */
 289    if (!msi_nonbroken) {
 290        error_setg(errp, "MSI-X is not supported by interrupt controller");
 291        return -ENOTSUP;
 292    }
 293
 294    if (nentries < 1 || nentries > PCI_MSIX_FLAGS_QSIZE + 1) {
 295        error_setg(errp, "The number of MSI-X vectors is invalid");
 296        return -EINVAL;
 297    }
 298
 299    table_size = nentries * PCI_MSIX_ENTRY_SIZE;
 300    pba_size = QEMU_ALIGN_UP(nentries, 64) / 8;
 301
 302    /* Sanity test: table & pba don't overlap, fit within BARs, min aligned */
 303    if ((table_bar_nr == pba_bar_nr &&
 304         ranges_overlap(table_offset, table_size, pba_offset, pba_size)) ||
 305        table_offset + table_size > memory_region_size(table_bar) ||
 306        pba_offset + pba_size > memory_region_size(pba_bar) ||
 307        (table_offset | pba_offset) & PCI_MSIX_FLAGS_BIRMASK) {
 308        error_setg(errp, "table & pba overlap, or they don't fit in BARs,"
 309                   " or don't align");
 310        return -EINVAL;
 311    }
 312
 313    cap = pci_add_capability(dev, PCI_CAP_ID_MSIX,
 314                              cap_pos, MSIX_CAP_LENGTH, errp);
 315    if (cap < 0) {
 316        return cap;
 317    }
 318
 319    dev->msix_cap = cap;
 320    dev->cap_present |= QEMU_PCI_CAP_MSIX;
 321    config = dev->config + cap;
 322
 323    pci_set_word(config + PCI_MSIX_FLAGS, nentries - 1);
 324    dev->msix_entries_nr = nentries;
 325    dev->msix_function_masked = true;
 326
 327    pci_set_long(config + PCI_MSIX_TABLE, table_offset | table_bar_nr);
 328    pci_set_long(config + PCI_MSIX_PBA, pba_offset | pba_bar_nr);
 329
 330    /* Make flags bit writable. */
 331    dev->wmask[cap + MSIX_CONTROL_OFFSET] |= MSIX_ENABLE_MASK |
 332                                             MSIX_MASKALL_MASK;
 333
 334    dev->msix_table = g_malloc0(table_size);
 335    dev->msix_pba = g_malloc0(pba_size);
 336    dev->msix_entry_used = g_malloc0(nentries * sizeof *dev->msix_entry_used);
 337
 338    msix_mask_all(dev, nentries);
 339
 340    memory_region_init_io(&dev->msix_table_mmio, OBJECT(dev), &msix_table_mmio_ops, dev,
 341                          "msix-table", table_size);
 342    memory_region_add_subregion(table_bar, table_offset, &dev->msix_table_mmio);
 343    memory_region_init_io(&dev->msix_pba_mmio, OBJECT(dev), &msix_pba_mmio_ops, dev,
 344                          "msix-pba", pba_size);
 345    memory_region_add_subregion(pba_bar, pba_offset, &dev->msix_pba_mmio);
 346
 347    return 0;
 348}
 349
 350int msix_init_exclusive_bar(PCIDevice *dev, unsigned short nentries,
 351                            uint8_t bar_nr, Error **errp)
 352{
 353    int ret;
 354    char *name;
 355    uint32_t bar_size = 4096;
 356    uint32_t bar_pba_offset = bar_size / 2;
 357    uint32_t bar_pba_size = QEMU_ALIGN_UP(nentries, 64) / 8;
 358
 359    /*
 360     * Migration compatibility dictates that this remains a 4k
 361     * BAR with the vector table in the lower half and PBA in
 362     * the upper half for nentries which is lower or equal to 128.
 363     * No need to care about using more than 65 entries for legacy
 364     * machine types who has at most 64 queues.
 365     */
 366    if (nentries * PCI_MSIX_ENTRY_SIZE > bar_pba_offset) {
 367        bar_pba_offset = nentries * PCI_MSIX_ENTRY_SIZE;
 368    }
 369
 370    if (bar_pba_offset + bar_pba_size > 4096) {
 371        bar_size = bar_pba_offset + bar_pba_size;
 372    }
 373
 374    bar_size = pow2ceil(bar_size);
 375
 376    name = g_strdup_printf("%s-msix", dev->name);
 377    memory_region_init(&dev->msix_exclusive_bar, OBJECT(dev), name, bar_size);
 378    g_free(name);
 379
 380    ret = msix_init(dev, nentries, &dev->msix_exclusive_bar, bar_nr,
 381                    0, &dev->msix_exclusive_bar,
 382                    bar_nr, bar_pba_offset,
 383                    0, errp);
 384    if (ret) {
 385        return ret;
 386    }
 387
 388    pci_register_bar(dev, bar_nr, PCI_BASE_ADDRESS_SPACE_MEMORY,
 389                     &dev->msix_exclusive_bar);
 390
 391    return 0;
 392}
 393
 394static void msix_free_irq_entries(PCIDevice *dev)
 395{
 396    int vector;
 397
 398    for (vector = 0; vector < dev->msix_entries_nr; ++vector) {
 399        dev->msix_entry_used[vector] = 0;
 400        msix_clr_pending(dev, vector);
 401    }
 402}
 403
 404static void msix_clear_all_vectors(PCIDevice *dev)
 405{
 406    int vector;
 407
 408    for (vector = 0; vector < dev->msix_entries_nr; ++vector) {
 409        msix_clr_pending(dev, vector);
 410    }
 411}
 412
 413/* Clean up resources for the device. */
 414void msix_uninit(PCIDevice *dev, MemoryRegion *table_bar, MemoryRegion *pba_bar)
 415{
 416    if (!msix_present(dev)) {
 417        return;
 418    }
 419    pci_del_capability(dev, PCI_CAP_ID_MSIX, MSIX_CAP_LENGTH);
 420    dev->msix_cap = 0;
 421    msix_free_irq_entries(dev);
 422    dev->msix_entries_nr = 0;
 423    memory_region_del_subregion(pba_bar, &dev->msix_pba_mmio);
 424    g_free(dev->msix_pba);
 425    dev->msix_pba = NULL;
 426    memory_region_del_subregion(table_bar, &dev->msix_table_mmio);
 427    g_free(dev->msix_table);
 428    dev->msix_table = NULL;
 429    g_free(dev->msix_entry_used);
 430    dev->msix_entry_used = NULL;
 431    dev->cap_present &= ~QEMU_PCI_CAP_MSIX;
 432}
 433
 434void msix_uninit_exclusive_bar(PCIDevice *dev)
 435{
 436    if (msix_present(dev)) {
 437        msix_uninit(dev, &dev->msix_exclusive_bar, &dev->msix_exclusive_bar);
 438    }
 439}
 440
 441void msix_save(PCIDevice *dev, QEMUFile *f)
 442{
 443    unsigned n = dev->msix_entries_nr;
 444
 445    if (!msix_present(dev)) {
 446        return;
 447    }
 448
 449    qemu_put_buffer(f, dev->msix_table, n * PCI_MSIX_ENTRY_SIZE);
 450    qemu_put_buffer(f, dev->msix_pba, DIV_ROUND_UP(n, 8));
 451}
 452
 453/* Should be called after restoring the config space. */
 454void msix_load(PCIDevice *dev, QEMUFile *f)
 455{
 456    unsigned n = dev->msix_entries_nr;
 457    unsigned int vector;
 458
 459    if (!msix_present(dev)) {
 460        return;
 461    }
 462
 463    msix_clear_all_vectors(dev);
 464    qemu_get_buffer(f, dev->msix_table, n * PCI_MSIX_ENTRY_SIZE);
 465    qemu_get_buffer(f, dev->msix_pba, DIV_ROUND_UP(n, 8));
 466    msix_update_function_masked(dev);
 467
 468    for (vector = 0; vector < n; vector++) {
 469        msix_handle_mask_update(dev, vector, true);
 470    }
 471}
 472
 473/* Does device support MSI-X? */
 474int msix_present(PCIDevice *dev)
 475{
 476    return dev->cap_present & QEMU_PCI_CAP_MSIX;
 477}
 478
 479/* Is MSI-X enabled? */
 480int msix_enabled(PCIDevice *dev)
 481{
 482    return (dev->cap_present & QEMU_PCI_CAP_MSIX) &&
 483        (dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &
 484         MSIX_ENABLE_MASK);
 485}
 486
 487/* Send an MSI-X message */
 488void msix_notify(PCIDevice *dev, unsigned vector)
 489{
 490    MSIMessage msg;
 491
 492    if (vector >= dev->msix_entries_nr || !dev->msix_entry_used[vector]) {
 493        return;
 494    }
 495
 496    if (msix_is_masked(dev, vector)) {
 497        msix_set_pending(dev, vector);
 498        return;
 499    }
 500
 501    msg = msix_get_message(dev, vector);
 502
 503    msi_send_message(dev, msg);
 504}
 505
 506void msix_reset(PCIDevice *dev)
 507{
 508    if (!msix_present(dev)) {
 509        return;
 510    }
 511    msix_clear_all_vectors(dev);
 512    dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &=
 513            ~dev->wmask[dev->msix_cap + MSIX_CONTROL_OFFSET];
 514    memset(dev->msix_table, 0, dev->msix_entries_nr * PCI_MSIX_ENTRY_SIZE);
 515    memset(dev->msix_pba, 0, QEMU_ALIGN_UP(dev->msix_entries_nr, 64) / 8);
 516    msix_mask_all(dev, dev->msix_entries_nr);
 517}
 518
 519/* PCI spec suggests that devices make it possible for software to configure
 520 * less vectors than supported by the device, but does not specify a standard
 521 * mechanism for devices to do so.
 522 *
 523 * We support this by asking devices to declare vectors software is going to
 524 * actually use, and checking this on the notification path. Devices that
 525 * don't want to follow the spec suggestion can declare all vectors as used. */
 526
 527/* Mark vector as used. */
 528int msix_vector_use(PCIDevice *dev, unsigned vector)
 529{
 530    if (vector >= dev->msix_entries_nr) {
 531        return -EINVAL;
 532    }
 533
 534    dev->msix_entry_used[vector]++;
 535    return 0;
 536}
 537
 538/* Mark vector as unused. */
 539void msix_vector_unuse(PCIDevice *dev, unsigned vector)
 540{
 541    if (vector >= dev->msix_entries_nr || !dev->msix_entry_used[vector]) {
 542        return;
 543    }
 544    if (--dev->msix_entry_used[vector]) {
 545        return;
 546    }
 547    msix_clr_pending(dev, vector);
 548}
 549
 550void msix_unuse_all_vectors(PCIDevice *dev)
 551{
 552    if (!msix_present(dev)) {
 553        return;
 554    }
 555    msix_free_irq_entries(dev);
 556}
 557
 558unsigned int msix_nr_vectors_allocated(const PCIDevice *dev)
 559{
 560    return dev->msix_entries_nr;
 561}
 562
 563static int msix_set_notifier_for_vector(PCIDevice *dev, unsigned int vector)
 564{
 565    MSIMessage msg;
 566
 567    if (msix_is_masked(dev, vector)) {
 568        return 0;
 569    }
 570    msg = msix_get_message(dev, vector);
 571    return dev->msix_vector_use_notifier(dev, vector, msg);
 572}
 573
 574static void msix_unset_notifier_for_vector(PCIDevice *dev, unsigned int vector)
 575{
 576    if (msix_is_masked(dev, vector)) {
 577        return;
 578    }
 579    dev->msix_vector_release_notifier(dev, vector);
 580}
 581
 582int msix_set_vector_notifiers(PCIDevice *dev,
 583                              MSIVectorUseNotifier use_notifier,
 584                              MSIVectorReleaseNotifier release_notifier,
 585                              MSIVectorPollNotifier poll_notifier)
 586{
 587    int vector, ret;
 588
 589    assert(use_notifier && release_notifier);
 590
 591    dev->msix_vector_use_notifier = use_notifier;
 592    dev->msix_vector_release_notifier = release_notifier;
 593    dev->msix_vector_poll_notifier = poll_notifier;
 594
 595    if ((dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &
 596        (MSIX_ENABLE_MASK | MSIX_MASKALL_MASK)) == MSIX_ENABLE_MASK) {
 597        for (vector = 0; vector < dev->msix_entries_nr; vector++) {
 598            ret = msix_set_notifier_for_vector(dev, vector);
 599            if (ret < 0) {
 600                goto undo;
 601            }
 602        }
 603    }
 604    if (dev->msix_vector_poll_notifier) {
 605        dev->msix_vector_poll_notifier(dev, 0, dev->msix_entries_nr);
 606    }
 607    return 0;
 608
 609undo:
 610    while (--vector >= 0) {
 611        msix_unset_notifier_for_vector(dev, vector);
 612    }
 613    dev->msix_vector_use_notifier = NULL;
 614    dev->msix_vector_release_notifier = NULL;
 615    return ret;
 616}
 617
 618void msix_unset_vector_notifiers(PCIDevice *dev)
 619{
 620    int vector;
 621
 622    assert(dev->msix_vector_use_notifier &&
 623           dev->msix_vector_release_notifier);
 624
 625    if ((dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &
 626        (MSIX_ENABLE_MASK | MSIX_MASKALL_MASK)) == MSIX_ENABLE_MASK) {
 627        for (vector = 0; vector < dev->msix_entries_nr; vector++) {
 628            msix_unset_notifier_for_vector(dev, vector);
 629        }
 630    }
 631    dev->msix_vector_use_notifier = NULL;
 632    dev->msix_vector_release_notifier = NULL;
 633    dev->msix_vector_poll_notifier = NULL;
 634}
 635
 636static int put_msix_state(QEMUFile *f, void *pv, size_t size,
 637                          const VMStateField *field, JSONWriter *vmdesc)
 638{
 639    msix_save(pv, f);
 640
 641    return 0;
 642}
 643
 644static int get_msix_state(QEMUFile *f, void *pv, size_t size,
 645                          const VMStateField *field)
 646{
 647    msix_load(pv, f);
 648    return 0;
 649}
 650
 651static VMStateInfo vmstate_info_msix = {
 652    .name = "msix state",
 653    .get  = get_msix_state,
 654    .put  = put_msix_state,
 655};
 656
 657const VMStateDescription vmstate_msix = {
 658    .name = "msix",
 659    .fields = (VMStateField[]) {
 660        {
 661            .name         = "msix",
 662            .version_id   = 0,
 663            .field_exists = NULL,
 664            .size         = 0,   /* ouch */
 665            .info         = &vmstate_info_msix,
 666            .flags        = VMS_SINGLE,
 667            .offset       = 0,
 668        },
 669        VMSTATE_END_OF_LIST()
 670    }
 671};
 672