qemu/hw/virtio/virtio.c
<<
>>
Prefs
   1/*
   2 * Virtio Support
   3 *
   4 * Copyright IBM, Corp. 2007
   5 *
   6 * Authors:
   7 *  Anthony Liguori   <aliguori@us.ibm.com>
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2.  See
  10 * the COPYING file in the top-level directory.
  11 *
  12 */
  13
  14#include "qemu/osdep.h"
  15#include "qapi/error.h"
  16#include "cpu.h"
  17#include "trace.h"
  18#include "qemu/error-report.h"
  19#include "qemu/log.h"
  20#include "qemu/main-loop.h"
  21#include "qemu/module.h"
  22#include "hw/virtio/virtio.h"
  23#include "migration/qemu-file-types.h"
  24#include "qemu/atomic.h"
  25#include "hw/virtio/virtio-bus.h"
  26#include "hw/qdev-properties.h"
  27#include "hw/virtio/virtio-access.h"
  28#include "sysemu/dma.h"
  29#include "sysemu/runstate.h"
  30#include "standard-headers/linux/virtio_ids.h"
  31
  32/*
  33 * The alignment to use between consumer and producer parts of vring.
  34 * x86 pagesize again. This is the default, used by transports like PCI
  35 * which don't provide a means for the guest to tell the host the alignment.
  36 */
  37#define VIRTIO_PCI_VRING_ALIGN         4096
  38
  39typedef struct VRingDesc
  40{
  41    uint64_t addr;
  42    uint32_t len;
  43    uint16_t flags;
  44    uint16_t next;
  45} VRingDesc;
  46
  47typedef struct VRingPackedDesc {
  48    uint64_t addr;
  49    uint32_t len;
  50    uint16_t id;
  51    uint16_t flags;
  52} VRingPackedDesc;
  53
  54typedef struct VRingAvail
  55{
  56    uint16_t flags;
  57    uint16_t idx;
  58    uint16_t ring[];
  59} VRingAvail;
  60
  61typedef struct VRingUsedElem
  62{
  63    uint32_t id;
  64    uint32_t len;
  65} VRingUsedElem;
  66
  67typedef struct VRingUsed
  68{
  69    uint16_t flags;
  70    uint16_t idx;
  71    VRingUsedElem ring[];
  72} VRingUsed;
  73
  74typedef struct VRingMemoryRegionCaches {
  75    struct rcu_head rcu;
  76    MemoryRegionCache desc;
  77    MemoryRegionCache avail;
  78    MemoryRegionCache used;
  79} VRingMemoryRegionCaches;
  80
  81typedef struct VRing
  82{
  83    unsigned int num;
  84    unsigned int num_default;
  85    unsigned int align;
  86    hwaddr desc;
  87    hwaddr avail;
  88    hwaddr used;
  89    VRingMemoryRegionCaches *caches;
  90} VRing;
  91
  92typedef struct VRingPackedDescEvent {
  93    uint16_t off_wrap;
  94    uint16_t flags;
  95} VRingPackedDescEvent ;
  96
  97struct VirtQueue
  98{
  99    VRing vring;
 100    VirtQueueElement *used_elems;
 101
 102    /* Next head to pop */
 103    uint16_t last_avail_idx;
 104    bool last_avail_wrap_counter;
 105
 106    /* Last avail_idx read from VQ. */
 107    uint16_t shadow_avail_idx;
 108    bool shadow_avail_wrap_counter;
 109
 110    uint16_t used_idx;
 111    bool used_wrap_counter;
 112
 113    /* Last used index value we have signalled on */
 114    uint16_t signalled_used;
 115
 116    /* Last used index value we have signalled on */
 117    bool signalled_used_valid;
 118
 119    /* Notification enabled? */
 120    bool notification;
 121
 122    uint16_t queue_index;
 123
 124    unsigned int inuse;
 125
 126    uint16_t vector;
 127    VirtIOHandleOutput handle_output;
 128    VirtIOHandleAIOOutput handle_aio_output;
 129    VirtIODevice *vdev;
 130    EventNotifier guest_notifier;
 131    EventNotifier host_notifier;
 132    bool host_notifier_enabled;
 133    QLIST_ENTRY(VirtQueue) node;
 134};
 135
 136static void virtio_free_region_cache(VRingMemoryRegionCaches *caches)
 137{
 138    if (!caches) {
 139        return;
 140    }
 141
 142    address_space_cache_destroy(&caches->desc);
 143    address_space_cache_destroy(&caches->avail);
 144    address_space_cache_destroy(&caches->used);
 145    g_free(caches);
 146}
 147
 148static void virtio_virtqueue_reset_region_cache(struct VirtQueue *vq)
 149{
 150    VRingMemoryRegionCaches *caches;
 151
 152    caches = qatomic_read(&vq->vring.caches);
 153    qatomic_rcu_set(&vq->vring.caches, NULL);
 154    if (caches) {
 155        call_rcu(caches, virtio_free_region_cache, rcu);
 156    }
 157}
 158
 159static void virtio_init_region_cache(VirtIODevice *vdev, int n)
 160{
 161    VirtQueue *vq = &vdev->vq[n];
 162    VRingMemoryRegionCaches *old = vq->vring.caches;
 163    VRingMemoryRegionCaches *new = NULL;
 164    hwaddr addr, size;
 165    int64_t len;
 166    bool packed;
 167
 168
 169    addr = vq->vring.desc;
 170    if (!addr) {
 171        goto out_no_cache;
 172    }
 173    new = g_new0(VRingMemoryRegionCaches, 1);
 174    size = virtio_queue_get_desc_size(vdev, n);
 175    packed = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED) ?
 176                                   true : false;
 177    len = address_space_cache_init(&new->desc, vdev->dma_as,
 178                                   addr, size, packed);
 179    if (len < size) {
 180        virtio_error(vdev, "Cannot map desc");
 181        goto err_desc;
 182    }
 183
 184    size = virtio_queue_get_used_size(vdev, n);
 185    len = address_space_cache_init(&new->used, vdev->dma_as,
 186                                   vq->vring.used, size, true);
 187    if (len < size) {
 188        virtio_error(vdev, "Cannot map used");
 189        goto err_used;
 190    }
 191
 192    size = virtio_queue_get_avail_size(vdev, n);
 193    len = address_space_cache_init(&new->avail, vdev->dma_as,
 194                                   vq->vring.avail, size, false);
 195    if (len < size) {
 196        virtio_error(vdev, "Cannot map avail");
 197        goto err_avail;
 198    }
 199
 200    qatomic_rcu_set(&vq->vring.caches, new);
 201    if (old) {
 202        call_rcu(old, virtio_free_region_cache, rcu);
 203    }
 204    return;
 205
 206err_avail:
 207    address_space_cache_destroy(&new->avail);
 208err_used:
 209    address_space_cache_destroy(&new->used);
 210err_desc:
 211    address_space_cache_destroy(&new->desc);
 212out_no_cache:
 213    g_free(new);
 214    virtio_virtqueue_reset_region_cache(vq);
 215}
 216
 217/* virt queue functions */
 218void virtio_queue_update_rings(VirtIODevice *vdev, int n)
 219{
 220    VRing *vring = &vdev->vq[n].vring;
 221
 222    if (!vring->num || !vring->desc || !vring->align) {
 223        /* not yet setup -> nothing to do */
 224        return;
 225    }
 226    vring->avail = vring->desc + vring->num * sizeof(VRingDesc);
 227    vring->used = vring_align(vring->avail +
 228                              offsetof(VRingAvail, ring[vring->num]),
 229                              vring->align);
 230    virtio_init_region_cache(vdev, n);
 231}
 232
 233/* Called within rcu_read_lock().  */
 234static void vring_split_desc_read(VirtIODevice *vdev, VRingDesc *desc,
 235                                  MemoryRegionCache *cache, int i)
 236{
 237    address_space_read_cached(cache, i * sizeof(VRingDesc),
 238                              desc, sizeof(VRingDesc));
 239    virtio_tswap64s(vdev, &desc->addr);
 240    virtio_tswap32s(vdev, &desc->len);
 241    virtio_tswap16s(vdev, &desc->flags);
 242    virtio_tswap16s(vdev, &desc->next);
 243}
 244
 245static void vring_packed_event_read(VirtIODevice *vdev,
 246                                    MemoryRegionCache *cache,
 247                                    VRingPackedDescEvent *e)
 248{
 249    hwaddr off_off = offsetof(VRingPackedDescEvent, off_wrap);
 250    hwaddr off_flags = offsetof(VRingPackedDescEvent, flags);
 251
 252    e->flags = virtio_lduw_phys_cached(vdev, cache, off_flags);
 253    /* Make sure flags is seen before off_wrap */
 254    smp_rmb();
 255    e->off_wrap = virtio_lduw_phys_cached(vdev, cache, off_off);
 256    virtio_tswap16s(vdev, &e->flags);
 257}
 258
 259static void vring_packed_off_wrap_write(VirtIODevice *vdev,
 260                                        MemoryRegionCache *cache,
 261                                        uint16_t off_wrap)
 262{
 263    hwaddr off = offsetof(VRingPackedDescEvent, off_wrap);
 264
 265    virtio_stw_phys_cached(vdev, cache, off, off_wrap);
 266    address_space_cache_invalidate(cache, off, sizeof(off_wrap));
 267}
 268
 269static void vring_packed_flags_write(VirtIODevice *vdev,
 270                                     MemoryRegionCache *cache, uint16_t flags)
 271{
 272    hwaddr off = offsetof(VRingPackedDescEvent, flags);
 273
 274    virtio_stw_phys_cached(vdev, cache, off, flags);
 275    address_space_cache_invalidate(cache, off, sizeof(flags));
 276}
 277
 278/* Called within rcu_read_lock().  */
 279static VRingMemoryRegionCaches *vring_get_region_caches(struct VirtQueue *vq)
 280{
 281    return qatomic_rcu_read(&vq->vring.caches);
 282}
 283
 284/* Called within rcu_read_lock().  */
 285static inline uint16_t vring_avail_flags(VirtQueue *vq)
 286{
 287    VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
 288    hwaddr pa = offsetof(VRingAvail, flags);
 289
 290    if (!caches) {
 291        return 0;
 292    }
 293
 294    return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
 295}
 296
 297/* Called within rcu_read_lock().  */
 298static inline uint16_t vring_avail_idx(VirtQueue *vq)
 299{
 300    VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
 301    hwaddr pa = offsetof(VRingAvail, idx);
 302
 303    if (!caches) {
 304        return 0;
 305    }
 306
 307    vq->shadow_avail_idx = virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
 308    return vq->shadow_avail_idx;
 309}
 310
 311/* Called within rcu_read_lock().  */
 312static inline uint16_t vring_avail_ring(VirtQueue *vq, int i)
 313{
 314    VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
 315    hwaddr pa = offsetof(VRingAvail, ring[i]);
 316
 317    if (!caches) {
 318        return 0;
 319    }
 320
 321    return virtio_lduw_phys_cached(vq->vdev, &caches->avail, pa);
 322}
 323
 324/* Called within rcu_read_lock().  */
 325static inline uint16_t vring_get_used_event(VirtQueue *vq)
 326{
 327    return vring_avail_ring(vq, vq->vring.num);
 328}
 329
 330/* Called within rcu_read_lock().  */
 331static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
 332                                    int i)
 333{
 334    VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
 335    hwaddr pa = offsetof(VRingUsed, ring[i]);
 336
 337    if (!caches) {
 338        return;
 339    }
 340
 341    virtio_tswap32s(vq->vdev, &uelem->id);
 342    virtio_tswap32s(vq->vdev, &uelem->len);
 343    address_space_write_cached(&caches->used, pa, uelem, sizeof(VRingUsedElem));
 344    address_space_cache_invalidate(&caches->used, pa, sizeof(VRingUsedElem));
 345}
 346
 347/* Called within rcu_read_lock().  */
 348static uint16_t vring_used_idx(VirtQueue *vq)
 349{
 350    VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
 351    hwaddr pa = offsetof(VRingUsed, idx);
 352
 353    if (!caches) {
 354        return 0;
 355    }
 356
 357    return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
 358}
 359
 360/* Called within rcu_read_lock().  */
 361static inline void vring_used_idx_set(VirtQueue *vq, uint16_t val)
 362{
 363    VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
 364    hwaddr pa = offsetof(VRingUsed, idx);
 365
 366    if (caches) {
 367        virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
 368        address_space_cache_invalidate(&caches->used, pa, sizeof(val));
 369    }
 370
 371    vq->used_idx = val;
 372}
 373
 374/* Called within rcu_read_lock().  */
 375static inline void vring_used_flags_set_bit(VirtQueue *vq, int mask)
 376{
 377    VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
 378    VirtIODevice *vdev = vq->vdev;
 379    hwaddr pa = offsetof(VRingUsed, flags);
 380    uint16_t flags;
 381
 382    if (!caches) {
 383        return;
 384    }
 385
 386    flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
 387    virtio_stw_phys_cached(vdev, &caches->used, pa, flags | mask);
 388    address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
 389}
 390
 391/* Called within rcu_read_lock().  */
 392static inline void vring_used_flags_unset_bit(VirtQueue *vq, int mask)
 393{
 394    VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
 395    VirtIODevice *vdev = vq->vdev;
 396    hwaddr pa = offsetof(VRingUsed, flags);
 397    uint16_t flags;
 398
 399    if (!caches) {
 400        return;
 401    }
 402
 403    flags = virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
 404    virtio_stw_phys_cached(vdev, &caches->used, pa, flags & ~mask);
 405    address_space_cache_invalidate(&caches->used, pa, sizeof(flags));
 406}
 407
 408/* Called within rcu_read_lock().  */
 409static inline void vring_set_avail_event(VirtQueue *vq, uint16_t val)
 410{
 411    VRingMemoryRegionCaches *caches;
 412    hwaddr pa;
 413    if (!vq->notification) {
 414        return;
 415    }
 416
 417    caches = vring_get_region_caches(vq);
 418    if (!caches) {
 419        return;
 420    }
 421
 422    pa = offsetof(VRingUsed, ring[vq->vring.num]);
 423    virtio_stw_phys_cached(vq->vdev, &caches->used, pa, val);
 424    address_space_cache_invalidate(&caches->used, pa, sizeof(val));
 425}
 426
 427static void virtio_queue_split_set_notification(VirtQueue *vq, int enable)
 428{
 429    RCU_READ_LOCK_GUARD();
 430
 431    if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
 432        vring_set_avail_event(vq, vring_avail_idx(vq));
 433    } else if (enable) {
 434        vring_used_flags_unset_bit(vq, VRING_USED_F_NO_NOTIFY);
 435    } else {
 436        vring_used_flags_set_bit(vq, VRING_USED_F_NO_NOTIFY);
 437    }
 438    if (enable) {
 439        /* Expose avail event/used flags before caller checks the avail idx. */
 440        smp_mb();
 441    }
 442}
 443
 444static void virtio_queue_packed_set_notification(VirtQueue *vq, int enable)
 445{
 446    uint16_t off_wrap;
 447    VRingPackedDescEvent e;
 448    VRingMemoryRegionCaches *caches;
 449
 450    RCU_READ_LOCK_GUARD();
 451    caches = vring_get_region_caches(vq);
 452    if (!caches) {
 453        return;
 454    }
 455
 456    vring_packed_event_read(vq->vdev, &caches->used, &e);
 457
 458    if (!enable) {
 459        e.flags = VRING_PACKED_EVENT_FLAG_DISABLE;
 460    } else if (virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX)) {
 461        off_wrap = vq->shadow_avail_idx | vq->shadow_avail_wrap_counter << 15;
 462        vring_packed_off_wrap_write(vq->vdev, &caches->used, off_wrap);
 463        /* Make sure off_wrap is wrote before flags */
 464        smp_wmb();
 465        e.flags = VRING_PACKED_EVENT_FLAG_DESC;
 466    } else {
 467        e.flags = VRING_PACKED_EVENT_FLAG_ENABLE;
 468    }
 469
 470    vring_packed_flags_write(vq->vdev, &caches->used, e.flags);
 471    if (enable) {
 472        /* Expose avail event/used flags before caller checks the avail idx. */
 473        smp_mb();
 474    }
 475}
 476
 477bool virtio_queue_get_notification(VirtQueue *vq)
 478{
 479    return vq->notification;
 480}
 481
 482void virtio_queue_set_notification(VirtQueue *vq, int enable)
 483{
 484    vq->notification = enable;
 485
 486    if (!vq->vring.desc) {
 487        return;
 488    }
 489
 490    if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
 491        virtio_queue_packed_set_notification(vq, enable);
 492    } else {
 493        virtio_queue_split_set_notification(vq, enable);
 494    }
 495}
 496
 497int virtio_queue_ready(VirtQueue *vq)
 498{
 499    return vq->vring.avail != 0;
 500}
 501
 502static void vring_packed_desc_read_flags(VirtIODevice *vdev,
 503                                         uint16_t *flags,
 504                                         MemoryRegionCache *cache,
 505                                         int i)
 506{
 507    hwaddr off = i * sizeof(VRingPackedDesc) + offsetof(VRingPackedDesc, flags);
 508
 509    *flags = virtio_lduw_phys_cached(vdev, cache, off);
 510}
 511
 512static void vring_packed_desc_read(VirtIODevice *vdev,
 513                                   VRingPackedDesc *desc,
 514                                   MemoryRegionCache *cache,
 515                                   int i, bool strict_order)
 516{
 517    hwaddr off = i * sizeof(VRingPackedDesc);
 518
 519    vring_packed_desc_read_flags(vdev, &desc->flags, cache, i);
 520
 521    if (strict_order) {
 522        /* Make sure flags is read before the rest fields. */
 523        smp_rmb();
 524    }
 525
 526    address_space_read_cached(cache, off + offsetof(VRingPackedDesc, addr),
 527                              &desc->addr, sizeof(desc->addr));
 528    address_space_read_cached(cache, off + offsetof(VRingPackedDesc, id),
 529                              &desc->id, sizeof(desc->id));
 530    address_space_read_cached(cache, off + offsetof(VRingPackedDesc, len),
 531                              &desc->len, sizeof(desc->len));
 532    virtio_tswap64s(vdev, &desc->addr);
 533    virtio_tswap16s(vdev, &desc->id);
 534    virtio_tswap32s(vdev, &desc->len);
 535}
 536
 537static void vring_packed_desc_write_data(VirtIODevice *vdev,
 538                                         VRingPackedDesc *desc,
 539                                         MemoryRegionCache *cache,
 540                                         int i)
 541{
 542    hwaddr off_id = i * sizeof(VRingPackedDesc) +
 543                    offsetof(VRingPackedDesc, id);
 544    hwaddr off_len = i * sizeof(VRingPackedDesc) +
 545                    offsetof(VRingPackedDesc, len);
 546
 547    virtio_tswap32s(vdev, &desc->len);
 548    virtio_tswap16s(vdev, &desc->id);
 549    address_space_write_cached(cache, off_id, &desc->id, sizeof(desc->id));
 550    address_space_cache_invalidate(cache, off_id, sizeof(desc->id));
 551    address_space_write_cached(cache, off_len, &desc->len, sizeof(desc->len));
 552    address_space_cache_invalidate(cache, off_len, sizeof(desc->len));
 553}
 554
 555static void vring_packed_desc_write_flags(VirtIODevice *vdev,
 556                                          VRingPackedDesc *desc,
 557                                          MemoryRegionCache *cache,
 558                                          int i)
 559{
 560    hwaddr off = i * sizeof(VRingPackedDesc) + offsetof(VRingPackedDesc, flags);
 561
 562    virtio_stw_phys_cached(vdev, cache, off, desc->flags);
 563    address_space_cache_invalidate(cache, off, sizeof(desc->flags));
 564}
 565
 566static void vring_packed_desc_write(VirtIODevice *vdev,
 567                                    VRingPackedDesc *desc,
 568                                    MemoryRegionCache *cache,
 569                                    int i, bool strict_order)
 570{
 571    vring_packed_desc_write_data(vdev, desc, cache, i);
 572    if (strict_order) {
 573        /* Make sure data is wrote before flags. */
 574        smp_wmb();
 575    }
 576    vring_packed_desc_write_flags(vdev, desc, cache, i);
 577}
 578
 579static inline bool is_desc_avail(uint16_t flags, bool wrap_counter)
 580{
 581    bool avail, used;
 582
 583    avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
 584    used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
 585    return (avail != used) && (avail == wrap_counter);
 586}
 587
 588/* Fetch avail_idx from VQ memory only when we really need to know if
 589 * guest has added some buffers.
 590 * Called within rcu_read_lock().  */
 591static int virtio_queue_empty_rcu(VirtQueue *vq)
 592{
 593    if (virtio_device_disabled(vq->vdev)) {
 594        return 1;
 595    }
 596
 597    if (unlikely(!vq->vring.avail)) {
 598        return 1;
 599    }
 600
 601    if (vq->shadow_avail_idx != vq->last_avail_idx) {
 602        return 0;
 603    }
 604
 605    return vring_avail_idx(vq) == vq->last_avail_idx;
 606}
 607
 608static int virtio_queue_split_empty(VirtQueue *vq)
 609{
 610    bool empty;
 611
 612    if (virtio_device_disabled(vq->vdev)) {
 613        return 1;
 614    }
 615
 616    if (unlikely(!vq->vring.avail)) {
 617        return 1;
 618    }
 619
 620    if (vq->shadow_avail_idx != vq->last_avail_idx) {
 621        return 0;
 622    }
 623
 624    RCU_READ_LOCK_GUARD();
 625    empty = vring_avail_idx(vq) == vq->last_avail_idx;
 626    return empty;
 627}
 628
 629static int virtio_queue_packed_empty_rcu(VirtQueue *vq)
 630{
 631    struct VRingPackedDesc desc;
 632    VRingMemoryRegionCaches *cache;
 633
 634    if (unlikely(!vq->vring.desc)) {
 635        return 1;
 636    }
 637
 638    cache = vring_get_region_caches(vq);
 639    if (!cache) {
 640        return 1;
 641    }
 642
 643    vring_packed_desc_read_flags(vq->vdev, &desc.flags, &cache->desc,
 644                                 vq->last_avail_idx);
 645
 646    return !is_desc_avail(desc.flags, vq->last_avail_wrap_counter);
 647}
 648
 649static int virtio_queue_packed_empty(VirtQueue *vq)
 650{
 651    RCU_READ_LOCK_GUARD();
 652    return virtio_queue_packed_empty_rcu(vq);
 653}
 654
 655int virtio_queue_empty(VirtQueue *vq)
 656{
 657    if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
 658        return virtio_queue_packed_empty(vq);
 659    } else {
 660        return virtio_queue_split_empty(vq);
 661    }
 662}
 663
 664static void virtqueue_unmap_sg(VirtQueue *vq, const VirtQueueElement *elem,
 665                               unsigned int len)
 666{
 667    AddressSpace *dma_as = vq->vdev->dma_as;
 668    unsigned int offset;
 669    int i;
 670
 671    offset = 0;
 672    for (i = 0; i < elem->in_num; i++) {
 673        size_t size = MIN(len - offset, elem->in_sg[i].iov_len);
 674
 675        dma_memory_unmap(dma_as, elem->in_sg[i].iov_base,
 676                         elem->in_sg[i].iov_len,
 677                         DMA_DIRECTION_FROM_DEVICE, size);
 678
 679        offset += size;
 680    }
 681
 682    for (i = 0; i < elem->out_num; i++)
 683        dma_memory_unmap(dma_as, elem->out_sg[i].iov_base,
 684                         elem->out_sg[i].iov_len,
 685                         DMA_DIRECTION_TO_DEVICE,
 686                         elem->out_sg[i].iov_len);
 687}
 688
 689/* virtqueue_detach_element:
 690 * @vq: The #VirtQueue
 691 * @elem: The #VirtQueueElement
 692 * @len: number of bytes written
 693 *
 694 * Detach the element from the virtqueue.  This function is suitable for device
 695 * reset or other situations where a #VirtQueueElement is simply freed and will
 696 * not be pushed or discarded.
 697 */
 698void virtqueue_detach_element(VirtQueue *vq, const VirtQueueElement *elem,
 699                              unsigned int len)
 700{
 701    vq->inuse -= elem->ndescs;
 702    virtqueue_unmap_sg(vq, elem, len);
 703}
 704
 705static void virtqueue_split_rewind(VirtQueue *vq, unsigned int num)
 706{
 707    vq->last_avail_idx -= num;
 708}
 709
 710static void virtqueue_packed_rewind(VirtQueue *vq, unsigned int num)
 711{
 712    if (vq->last_avail_idx < num) {
 713        vq->last_avail_idx = vq->vring.num + vq->last_avail_idx - num;
 714        vq->last_avail_wrap_counter ^= 1;
 715    } else {
 716        vq->last_avail_idx -= num;
 717    }
 718}
 719
 720/* virtqueue_unpop:
 721 * @vq: The #VirtQueue
 722 * @elem: The #VirtQueueElement
 723 * @len: number of bytes written
 724 *
 725 * Pretend the most recent element wasn't popped from the virtqueue.  The next
 726 * call to virtqueue_pop() will refetch the element.
 727 */
 728void virtqueue_unpop(VirtQueue *vq, const VirtQueueElement *elem,
 729                     unsigned int len)
 730{
 731
 732    if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
 733        virtqueue_packed_rewind(vq, 1);
 734    } else {
 735        virtqueue_split_rewind(vq, 1);
 736    }
 737
 738    virtqueue_detach_element(vq, elem, len);
 739}
 740
 741/* virtqueue_rewind:
 742 * @vq: The #VirtQueue
 743 * @num: Number of elements to push back
 744 *
 745 * Pretend that elements weren't popped from the virtqueue.  The next
 746 * virtqueue_pop() will refetch the oldest element.
 747 *
 748 * Use virtqueue_unpop() instead if you have a VirtQueueElement.
 749 *
 750 * Returns: true on success, false if @num is greater than the number of in use
 751 * elements.
 752 */
 753bool virtqueue_rewind(VirtQueue *vq, unsigned int num)
 754{
 755    if (num > vq->inuse) {
 756        return false;
 757    }
 758
 759    vq->inuse -= num;
 760    if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
 761        virtqueue_packed_rewind(vq, num);
 762    } else {
 763        virtqueue_split_rewind(vq, num);
 764    }
 765    return true;
 766}
 767
 768static void virtqueue_split_fill(VirtQueue *vq, const VirtQueueElement *elem,
 769                    unsigned int len, unsigned int idx)
 770{
 771    VRingUsedElem uelem;
 772
 773    if (unlikely(!vq->vring.used)) {
 774        return;
 775    }
 776
 777    idx = (idx + vq->used_idx) % vq->vring.num;
 778
 779    uelem.id = elem->index;
 780    uelem.len = len;
 781    vring_used_write(vq, &uelem, idx);
 782}
 783
 784static void virtqueue_packed_fill(VirtQueue *vq, const VirtQueueElement *elem,
 785                                  unsigned int len, unsigned int idx)
 786{
 787    vq->used_elems[idx].index = elem->index;
 788    vq->used_elems[idx].len = len;
 789    vq->used_elems[idx].ndescs = elem->ndescs;
 790}
 791
 792static void virtqueue_packed_fill_desc(VirtQueue *vq,
 793                                       const VirtQueueElement *elem,
 794                                       unsigned int idx,
 795                                       bool strict_order)
 796{
 797    uint16_t head;
 798    VRingMemoryRegionCaches *caches;
 799    VRingPackedDesc desc = {
 800        .id = elem->index,
 801        .len = elem->len,
 802    };
 803    bool wrap_counter = vq->used_wrap_counter;
 804
 805    if (unlikely(!vq->vring.desc)) {
 806        return;
 807    }
 808
 809    head = vq->used_idx + idx;
 810    if (head >= vq->vring.num) {
 811        head -= vq->vring.num;
 812        wrap_counter ^= 1;
 813    }
 814    if (wrap_counter) {
 815        desc.flags |= (1 << VRING_PACKED_DESC_F_AVAIL);
 816        desc.flags |= (1 << VRING_PACKED_DESC_F_USED);
 817    } else {
 818        desc.flags &= ~(1 << VRING_PACKED_DESC_F_AVAIL);
 819        desc.flags &= ~(1 << VRING_PACKED_DESC_F_USED);
 820    }
 821
 822    caches = vring_get_region_caches(vq);
 823    if (!caches) {
 824        return;
 825    }
 826
 827    vring_packed_desc_write(vq->vdev, &desc, &caches->desc, head, strict_order);
 828}
 829
 830/* Called within rcu_read_lock().  */
 831void virtqueue_fill(VirtQueue *vq, const VirtQueueElement *elem,
 832                    unsigned int len, unsigned int idx)
 833{
 834    trace_virtqueue_fill(vq, elem, len, idx);
 835
 836    virtqueue_unmap_sg(vq, elem, len);
 837
 838    if (virtio_device_disabled(vq->vdev)) {
 839        return;
 840    }
 841
 842    if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
 843        virtqueue_packed_fill(vq, elem, len, idx);
 844    } else {
 845        virtqueue_split_fill(vq, elem, len, idx);
 846    }
 847}
 848
 849/* Called within rcu_read_lock().  */
 850static void virtqueue_split_flush(VirtQueue *vq, unsigned int count)
 851{
 852    uint16_t old, new;
 853
 854    if (unlikely(!vq->vring.used)) {
 855        return;
 856    }
 857
 858    /* Make sure buffer is written before we update index. */
 859    smp_wmb();
 860    trace_virtqueue_flush(vq, count);
 861    old = vq->used_idx;
 862    new = old + count;
 863    vring_used_idx_set(vq, new);
 864    vq->inuse -= count;
 865    if (unlikely((int16_t)(new - vq->signalled_used) < (uint16_t)(new - old)))
 866        vq->signalled_used_valid = false;
 867}
 868
 869static void virtqueue_packed_flush(VirtQueue *vq, unsigned int count)
 870{
 871    unsigned int i, ndescs = 0;
 872
 873    if (unlikely(!vq->vring.desc)) {
 874        return;
 875    }
 876
 877    for (i = 1; i < count; i++) {
 878        virtqueue_packed_fill_desc(vq, &vq->used_elems[i], i, false);
 879        ndescs += vq->used_elems[i].ndescs;
 880    }
 881    virtqueue_packed_fill_desc(vq, &vq->used_elems[0], 0, true);
 882    ndescs += vq->used_elems[0].ndescs;
 883
 884    vq->inuse -= ndescs;
 885    vq->used_idx += ndescs;
 886    if (vq->used_idx >= vq->vring.num) {
 887        vq->used_idx -= vq->vring.num;
 888        vq->used_wrap_counter ^= 1;
 889    }
 890}
 891
 892void virtqueue_flush(VirtQueue *vq, unsigned int count)
 893{
 894    if (virtio_device_disabled(vq->vdev)) {
 895        vq->inuse -= count;
 896        return;
 897    }
 898
 899    if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
 900        virtqueue_packed_flush(vq, count);
 901    } else {
 902        virtqueue_split_flush(vq, count);
 903    }
 904}
 905
 906void virtqueue_push(VirtQueue *vq, const VirtQueueElement *elem,
 907                    unsigned int len)
 908{
 909    RCU_READ_LOCK_GUARD();
 910    virtqueue_fill(vq, elem, len, 0);
 911    virtqueue_flush(vq, 1);
 912}
 913
 914/* Called within rcu_read_lock().  */
 915static int virtqueue_num_heads(VirtQueue *vq, unsigned int idx)
 916{
 917    uint16_t num_heads = vring_avail_idx(vq) - idx;
 918
 919    /* Check it isn't doing very strange things with descriptor numbers. */
 920    if (num_heads > vq->vring.num) {
 921        virtio_error(vq->vdev, "Guest moved used index from %u to %u",
 922                     idx, vq->shadow_avail_idx);
 923        return -EINVAL;
 924    }
 925    /* On success, callers read a descriptor at vq->last_avail_idx.
 926     * Make sure descriptor read does not bypass avail index read. */
 927    if (num_heads) {
 928        smp_rmb();
 929    }
 930
 931    return num_heads;
 932}
 933
 934/* Called within rcu_read_lock().  */
 935static bool virtqueue_get_head(VirtQueue *vq, unsigned int idx,
 936                               unsigned int *head)
 937{
 938    /* Grab the next descriptor number they're advertising, and increment
 939     * the index we've seen. */
 940    *head = vring_avail_ring(vq, idx % vq->vring.num);
 941
 942    /* If their number is silly, that's a fatal mistake. */
 943    if (*head >= vq->vring.num) {
 944        virtio_error(vq->vdev, "Guest says index %u is available", *head);
 945        return false;
 946    }
 947
 948    return true;
 949}
 950
 951enum {
 952    VIRTQUEUE_READ_DESC_ERROR = -1,
 953    VIRTQUEUE_READ_DESC_DONE = 0,   /* end of chain */
 954    VIRTQUEUE_READ_DESC_MORE = 1,   /* more buffers in chain */
 955};
 956
 957static int virtqueue_split_read_next_desc(VirtIODevice *vdev, VRingDesc *desc,
 958                                          MemoryRegionCache *desc_cache,
 959                                          unsigned int max, unsigned int *next)
 960{
 961    /* If this descriptor says it doesn't chain, we're done. */
 962    if (!(desc->flags & VRING_DESC_F_NEXT)) {
 963        return VIRTQUEUE_READ_DESC_DONE;
 964    }
 965
 966    /* Check they're not leading us off end of descriptors. */
 967    *next = desc->next;
 968    /* Make sure compiler knows to grab that: we don't want it changing! */
 969    smp_wmb();
 970
 971    if (*next >= max) {
 972        virtio_error(vdev, "Desc next is %u", *next);
 973        return VIRTQUEUE_READ_DESC_ERROR;
 974    }
 975
 976    vring_split_desc_read(vdev, desc, desc_cache, *next);
 977    return VIRTQUEUE_READ_DESC_MORE;
 978}
 979
 980static void virtqueue_split_get_avail_bytes(VirtQueue *vq,
 981                            unsigned int *in_bytes, unsigned int *out_bytes,
 982                            unsigned max_in_bytes, unsigned max_out_bytes)
 983{
 984    VirtIODevice *vdev = vq->vdev;
 985    unsigned int max, idx;
 986    unsigned int total_bufs, in_total, out_total;
 987    VRingMemoryRegionCaches *caches;
 988    MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
 989    int64_t len = 0;
 990    int rc;
 991
 992    RCU_READ_LOCK_GUARD();
 993
 994    idx = vq->last_avail_idx;
 995    total_bufs = in_total = out_total = 0;
 996
 997    max = vq->vring.num;
 998    caches = vring_get_region_caches(vq);
 999    if (!caches) {
1000        goto err;
1001    }
1002
1003    while ((rc = virtqueue_num_heads(vq, idx)) > 0) {
1004        MemoryRegionCache *desc_cache = &caches->desc;
1005        unsigned int num_bufs;
1006        VRingDesc desc;
1007        unsigned int i;
1008
1009        num_bufs = total_bufs;
1010
1011        if (!virtqueue_get_head(vq, idx++, &i)) {
1012            goto err;
1013        }
1014
1015        vring_split_desc_read(vdev, &desc, desc_cache, i);
1016
1017        if (desc.flags & VRING_DESC_F_INDIRECT) {
1018            if (!desc.len || (desc.len % sizeof(VRingDesc))) {
1019                virtio_error(vdev, "Invalid size for indirect buffer table");
1020                goto err;
1021            }
1022
1023            /* If we've got too many, that implies a descriptor loop. */
1024            if (num_bufs >= max) {
1025                virtio_error(vdev, "Looped descriptor");
1026                goto err;
1027            }
1028
1029            /* loop over the indirect descriptor table */
1030            len = address_space_cache_init(&indirect_desc_cache,
1031                                           vdev->dma_as,
1032                                           desc.addr, desc.len, false);
1033            desc_cache = &indirect_desc_cache;
1034            if (len < desc.len) {
1035                virtio_error(vdev, "Cannot map indirect buffer");
1036                goto err;
1037            }
1038
1039            max = desc.len / sizeof(VRingDesc);
1040            num_bufs = i = 0;
1041            vring_split_desc_read(vdev, &desc, desc_cache, i);
1042        }
1043
1044        do {
1045            /* If we've got too many, that implies a descriptor loop. */
1046            if (++num_bufs > max) {
1047                virtio_error(vdev, "Looped descriptor");
1048                goto err;
1049            }
1050
1051            if (desc.flags & VRING_DESC_F_WRITE) {
1052                in_total += desc.len;
1053            } else {
1054                out_total += desc.len;
1055            }
1056            if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
1057                goto done;
1058            }
1059
1060            rc = virtqueue_split_read_next_desc(vdev, &desc, desc_cache, max, &i);
1061        } while (rc == VIRTQUEUE_READ_DESC_MORE);
1062
1063        if (rc == VIRTQUEUE_READ_DESC_ERROR) {
1064            goto err;
1065        }
1066
1067        if (desc_cache == &indirect_desc_cache) {
1068            address_space_cache_destroy(&indirect_desc_cache);
1069            total_bufs++;
1070        } else {
1071            total_bufs = num_bufs;
1072        }
1073    }
1074
1075    if (rc < 0) {
1076        goto err;
1077    }
1078
1079done:
1080    address_space_cache_destroy(&indirect_desc_cache);
1081    if (in_bytes) {
1082        *in_bytes = in_total;
1083    }
1084    if (out_bytes) {
1085        *out_bytes = out_total;
1086    }
1087    return;
1088
1089err:
1090    in_total = out_total = 0;
1091    goto done;
1092}
1093
1094static int virtqueue_packed_read_next_desc(VirtQueue *vq,
1095                                           VRingPackedDesc *desc,
1096                                           MemoryRegionCache
1097                                           *desc_cache,
1098                                           unsigned int max,
1099                                           unsigned int *next,
1100                                           bool indirect)
1101{
1102    /* If this descriptor says it doesn't chain, we're done. */
1103    if (!indirect && !(desc->flags & VRING_DESC_F_NEXT)) {
1104        return VIRTQUEUE_READ_DESC_DONE;
1105    }
1106
1107    ++*next;
1108    if (*next == max) {
1109        if (indirect) {
1110            return VIRTQUEUE_READ_DESC_DONE;
1111        } else {
1112            (*next) -= vq->vring.num;
1113        }
1114    }
1115
1116    vring_packed_desc_read(vq->vdev, desc, desc_cache, *next, false);
1117    return VIRTQUEUE_READ_DESC_MORE;
1118}
1119
1120static void virtqueue_packed_get_avail_bytes(VirtQueue *vq,
1121                                             unsigned int *in_bytes,
1122                                             unsigned int *out_bytes,
1123                                             unsigned max_in_bytes,
1124                                             unsigned max_out_bytes)
1125{
1126    VirtIODevice *vdev = vq->vdev;
1127    unsigned int max, idx;
1128    unsigned int total_bufs, in_total, out_total;
1129    MemoryRegionCache *desc_cache;
1130    VRingMemoryRegionCaches *caches;
1131    MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
1132    int64_t len = 0;
1133    VRingPackedDesc desc;
1134    bool wrap_counter;
1135
1136    RCU_READ_LOCK_GUARD();
1137    idx = vq->last_avail_idx;
1138    wrap_counter = vq->last_avail_wrap_counter;
1139    total_bufs = in_total = out_total = 0;
1140
1141    max = vq->vring.num;
1142    caches = vring_get_region_caches(vq);
1143    if (!caches) {
1144        goto err;
1145    }
1146
1147    for (;;) {
1148        unsigned int num_bufs = total_bufs;
1149        unsigned int i = idx;
1150        int rc;
1151
1152        desc_cache = &caches->desc;
1153        vring_packed_desc_read(vdev, &desc, desc_cache, idx, true);
1154        if (!is_desc_avail(desc.flags, wrap_counter)) {
1155            break;
1156        }
1157
1158        if (desc.flags & VRING_DESC_F_INDIRECT) {
1159            if (desc.len % sizeof(VRingPackedDesc)) {
1160                virtio_error(vdev, "Invalid size for indirect buffer table");
1161                goto err;
1162            }
1163
1164            /* If we've got too many, that implies a descriptor loop. */
1165            if (num_bufs >= max) {
1166                virtio_error(vdev, "Looped descriptor");
1167                goto err;
1168            }
1169
1170            /* loop over the indirect descriptor table */
1171            len = address_space_cache_init(&indirect_desc_cache,
1172                                           vdev->dma_as,
1173                                           desc.addr, desc.len, false);
1174            desc_cache = &indirect_desc_cache;
1175            if (len < desc.len) {
1176                virtio_error(vdev, "Cannot map indirect buffer");
1177                goto err;
1178            }
1179
1180            max = desc.len / sizeof(VRingPackedDesc);
1181            num_bufs = i = 0;
1182            vring_packed_desc_read(vdev, &desc, desc_cache, i, false);
1183        }
1184
1185        do {
1186            /* If we've got too many, that implies a descriptor loop. */
1187            if (++num_bufs > max) {
1188                virtio_error(vdev, "Looped descriptor");
1189                goto err;
1190            }
1191
1192            if (desc.flags & VRING_DESC_F_WRITE) {
1193                in_total += desc.len;
1194            } else {
1195                out_total += desc.len;
1196            }
1197            if (in_total >= max_in_bytes && out_total >= max_out_bytes) {
1198                goto done;
1199            }
1200
1201            rc = virtqueue_packed_read_next_desc(vq, &desc, desc_cache, max,
1202                                                 &i, desc_cache ==
1203                                                 &indirect_desc_cache);
1204        } while (rc == VIRTQUEUE_READ_DESC_MORE);
1205
1206        if (desc_cache == &indirect_desc_cache) {
1207            address_space_cache_destroy(&indirect_desc_cache);
1208            total_bufs++;
1209            idx++;
1210        } else {
1211            idx += num_bufs - total_bufs;
1212            total_bufs = num_bufs;
1213        }
1214
1215        if (idx >= vq->vring.num) {
1216            idx -= vq->vring.num;
1217            wrap_counter ^= 1;
1218        }
1219    }
1220
1221    /* Record the index and wrap counter for a kick we want */
1222    vq->shadow_avail_idx = idx;
1223    vq->shadow_avail_wrap_counter = wrap_counter;
1224done:
1225    address_space_cache_destroy(&indirect_desc_cache);
1226    if (in_bytes) {
1227        *in_bytes = in_total;
1228    }
1229    if (out_bytes) {
1230        *out_bytes = out_total;
1231    }
1232    return;
1233
1234err:
1235    in_total = out_total = 0;
1236    goto done;
1237}
1238
1239void virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes,
1240                               unsigned int *out_bytes,
1241                               unsigned max_in_bytes, unsigned max_out_bytes)
1242{
1243    uint16_t desc_size;
1244    VRingMemoryRegionCaches *caches;
1245
1246    if (unlikely(!vq->vring.desc)) {
1247        goto err;
1248    }
1249
1250    caches = vring_get_region_caches(vq);
1251    if (!caches) {
1252        goto err;
1253    }
1254
1255    desc_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED) ?
1256                                sizeof(VRingPackedDesc) : sizeof(VRingDesc);
1257    if (caches->desc.len < vq->vring.num * desc_size) {
1258        virtio_error(vq->vdev, "Cannot map descriptor ring");
1259        goto err;
1260    }
1261
1262    if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
1263        virtqueue_packed_get_avail_bytes(vq, in_bytes, out_bytes,
1264                                         max_in_bytes, max_out_bytes);
1265    } else {
1266        virtqueue_split_get_avail_bytes(vq, in_bytes, out_bytes,
1267                                        max_in_bytes, max_out_bytes);
1268    }
1269
1270    return;
1271err:
1272    if (in_bytes) {
1273        *in_bytes = 0;
1274    }
1275    if (out_bytes) {
1276        *out_bytes = 0;
1277    }
1278}
1279
1280int virtqueue_avail_bytes(VirtQueue *vq, unsigned int in_bytes,
1281                          unsigned int out_bytes)
1282{
1283    unsigned int in_total, out_total;
1284
1285    virtqueue_get_avail_bytes(vq, &in_total, &out_total, in_bytes, out_bytes);
1286    return in_bytes <= in_total && out_bytes <= out_total;
1287}
1288
1289static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg,
1290                               hwaddr *addr, struct iovec *iov,
1291                               unsigned int max_num_sg, bool is_write,
1292                               hwaddr pa, size_t sz)
1293{
1294    bool ok = false;
1295    unsigned num_sg = *p_num_sg;
1296    assert(num_sg <= max_num_sg);
1297
1298    if (!sz) {
1299        virtio_error(vdev, "virtio: zero sized buffers are not allowed");
1300        goto out;
1301    }
1302
1303    while (sz) {
1304        hwaddr len = sz;
1305
1306        if (num_sg == max_num_sg) {
1307            virtio_error(vdev, "virtio: too many write descriptors in "
1308                               "indirect table");
1309            goto out;
1310        }
1311
1312        iov[num_sg].iov_base = dma_memory_map(vdev->dma_as, pa, &len,
1313                                              is_write ?
1314                                              DMA_DIRECTION_FROM_DEVICE :
1315                                              DMA_DIRECTION_TO_DEVICE);
1316        if (!iov[num_sg].iov_base) {
1317            virtio_error(vdev, "virtio: bogus descriptor or out of resources");
1318            goto out;
1319        }
1320
1321        iov[num_sg].iov_len = len;
1322        addr[num_sg] = pa;
1323
1324        sz -= len;
1325        pa += len;
1326        num_sg++;
1327    }
1328    ok = true;
1329
1330out:
1331    *p_num_sg = num_sg;
1332    return ok;
1333}
1334
1335/* Only used by error code paths before we have a VirtQueueElement (therefore
1336 * virtqueue_unmap_sg() can't be used).  Assumes buffers weren't written to
1337 * yet.
1338 */
1339static void virtqueue_undo_map_desc(unsigned int out_num, unsigned int in_num,
1340                                    struct iovec *iov)
1341{
1342    unsigned int i;
1343
1344    for (i = 0; i < out_num + in_num; i++) {
1345        int is_write = i >= out_num;
1346
1347        cpu_physical_memory_unmap(iov->iov_base, iov->iov_len, is_write, 0);
1348        iov++;
1349    }
1350}
1351
1352static void virtqueue_map_iovec(VirtIODevice *vdev, struct iovec *sg,
1353                                hwaddr *addr, unsigned int num_sg,
1354                                bool is_write)
1355{
1356    unsigned int i;
1357    hwaddr len;
1358
1359    for (i = 0; i < num_sg; i++) {
1360        len = sg[i].iov_len;
1361        sg[i].iov_base = dma_memory_map(vdev->dma_as,
1362                                        addr[i], &len, is_write ?
1363                                        DMA_DIRECTION_FROM_DEVICE :
1364                                        DMA_DIRECTION_TO_DEVICE);
1365        if (!sg[i].iov_base) {
1366            error_report("virtio: error trying to map MMIO memory");
1367            exit(1);
1368        }
1369        if (len != sg[i].iov_len) {
1370            error_report("virtio: unexpected memory split");
1371            exit(1);
1372        }
1373    }
1374}
1375
1376void virtqueue_map(VirtIODevice *vdev, VirtQueueElement *elem)
1377{
1378    virtqueue_map_iovec(vdev, elem->in_sg, elem->in_addr, elem->in_num, true);
1379    virtqueue_map_iovec(vdev, elem->out_sg, elem->out_addr, elem->out_num,
1380                                                                        false);
1381}
1382
1383static void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num)
1384{
1385    VirtQueueElement *elem;
1386    size_t in_addr_ofs = QEMU_ALIGN_UP(sz, __alignof__(elem->in_addr[0]));
1387    size_t out_addr_ofs = in_addr_ofs + in_num * sizeof(elem->in_addr[0]);
1388    size_t out_addr_end = out_addr_ofs + out_num * sizeof(elem->out_addr[0]);
1389    size_t in_sg_ofs = QEMU_ALIGN_UP(out_addr_end, __alignof__(elem->in_sg[0]));
1390    size_t out_sg_ofs = in_sg_ofs + in_num * sizeof(elem->in_sg[0]);
1391    size_t out_sg_end = out_sg_ofs + out_num * sizeof(elem->out_sg[0]);
1392
1393    assert(sz >= sizeof(VirtQueueElement));
1394    elem = g_malloc(out_sg_end);
1395    trace_virtqueue_alloc_element(elem, sz, in_num, out_num);
1396    elem->out_num = out_num;
1397    elem->in_num = in_num;
1398    elem->in_addr = (void *)elem + in_addr_ofs;
1399    elem->out_addr = (void *)elem + out_addr_ofs;
1400    elem->in_sg = (void *)elem + in_sg_ofs;
1401    elem->out_sg = (void *)elem + out_sg_ofs;
1402    return elem;
1403}
1404
1405static void *virtqueue_split_pop(VirtQueue *vq, size_t sz)
1406{
1407    unsigned int i, head, max;
1408    VRingMemoryRegionCaches *caches;
1409    MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
1410    MemoryRegionCache *desc_cache;
1411    int64_t len;
1412    VirtIODevice *vdev = vq->vdev;
1413    VirtQueueElement *elem = NULL;
1414    unsigned out_num, in_num, elem_entries;
1415    hwaddr addr[VIRTQUEUE_MAX_SIZE];
1416    struct iovec iov[VIRTQUEUE_MAX_SIZE];
1417    VRingDesc desc;
1418    int rc;
1419
1420    RCU_READ_LOCK_GUARD();
1421    if (virtio_queue_empty_rcu(vq)) {
1422        goto done;
1423    }
1424    /* Needed after virtio_queue_empty(), see comment in
1425     * virtqueue_num_heads(). */
1426    smp_rmb();
1427
1428    /* When we start there are none of either input nor output. */
1429    out_num = in_num = elem_entries = 0;
1430
1431    max = vq->vring.num;
1432
1433    if (vq->inuse >= vq->vring.num) {
1434        virtio_error(vdev, "Virtqueue size exceeded");
1435        goto done;
1436    }
1437
1438    if (!virtqueue_get_head(vq, vq->last_avail_idx++, &head)) {
1439        goto done;
1440    }
1441
1442    if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
1443        vring_set_avail_event(vq, vq->last_avail_idx);
1444    }
1445
1446    i = head;
1447
1448    caches = vring_get_region_caches(vq);
1449    if (!caches) {
1450        virtio_error(vdev, "Region caches not initialized");
1451        goto done;
1452    }
1453
1454    if (caches->desc.len < max * sizeof(VRingDesc)) {
1455        virtio_error(vdev, "Cannot map descriptor ring");
1456        goto done;
1457    }
1458
1459    desc_cache = &caches->desc;
1460    vring_split_desc_read(vdev, &desc, desc_cache, i);
1461    if (desc.flags & VRING_DESC_F_INDIRECT) {
1462        if (!desc.len || (desc.len % sizeof(VRingDesc))) {
1463            virtio_error(vdev, "Invalid size for indirect buffer table");
1464            goto done;
1465        }
1466
1467        /* loop over the indirect descriptor table */
1468        len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
1469                                       desc.addr, desc.len, false);
1470        desc_cache = &indirect_desc_cache;
1471        if (len < desc.len) {
1472            virtio_error(vdev, "Cannot map indirect buffer");
1473            goto done;
1474        }
1475
1476        max = desc.len / sizeof(VRingDesc);
1477        i = 0;
1478        vring_split_desc_read(vdev, &desc, desc_cache, i);
1479    }
1480
1481    /* Collect all the descriptors */
1482    do {
1483        bool map_ok;
1484
1485        if (desc.flags & VRING_DESC_F_WRITE) {
1486            map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num,
1487                                        iov + out_num,
1488                                        VIRTQUEUE_MAX_SIZE - out_num, true,
1489                                        desc.addr, desc.len);
1490        } else {
1491            if (in_num) {
1492                virtio_error(vdev, "Incorrect order for descriptors");
1493                goto err_undo_map;
1494            }
1495            map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov,
1496                                        VIRTQUEUE_MAX_SIZE, false,
1497                                        desc.addr, desc.len);
1498        }
1499        if (!map_ok) {
1500            goto err_undo_map;
1501        }
1502
1503        /* If we've got too many, that implies a descriptor loop. */
1504        if (++elem_entries > max) {
1505            virtio_error(vdev, "Looped descriptor");
1506            goto err_undo_map;
1507        }
1508
1509        rc = virtqueue_split_read_next_desc(vdev, &desc, desc_cache, max, &i);
1510    } while (rc == VIRTQUEUE_READ_DESC_MORE);
1511
1512    if (rc == VIRTQUEUE_READ_DESC_ERROR) {
1513        goto err_undo_map;
1514    }
1515
1516    /* Now copy what we have collected and mapped */
1517    elem = virtqueue_alloc_element(sz, out_num, in_num);
1518    elem->index = head;
1519    elem->ndescs = 1;
1520    for (i = 0; i < out_num; i++) {
1521        elem->out_addr[i] = addr[i];
1522        elem->out_sg[i] = iov[i];
1523    }
1524    for (i = 0; i < in_num; i++) {
1525        elem->in_addr[i] = addr[out_num + i];
1526        elem->in_sg[i] = iov[out_num + i];
1527    }
1528
1529    vq->inuse++;
1530
1531    trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
1532done:
1533    address_space_cache_destroy(&indirect_desc_cache);
1534
1535    return elem;
1536
1537err_undo_map:
1538    virtqueue_undo_map_desc(out_num, in_num, iov);
1539    goto done;
1540}
1541
1542static void *virtqueue_packed_pop(VirtQueue *vq, size_t sz)
1543{
1544    unsigned int i, max;
1545    VRingMemoryRegionCaches *caches;
1546    MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
1547    MemoryRegionCache *desc_cache;
1548    int64_t len;
1549    VirtIODevice *vdev = vq->vdev;
1550    VirtQueueElement *elem = NULL;
1551    unsigned out_num, in_num, elem_entries;
1552    hwaddr addr[VIRTQUEUE_MAX_SIZE];
1553    struct iovec iov[VIRTQUEUE_MAX_SIZE];
1554    VRingPackedDesc desc;
1555    uint16_t id;
1556    int rc;
1557
1558    RCU_READ_LOCK_GUARD();
1559    if (virtio_queue_packed_empty_rcu(vq)) {
1560        goto done;
1561    }
1562
1563    /* When we start there are none of either input nor output. */
1564    out_num = in_num = elem_entries = 0;
1565
1566    max = vq->vring.num;
1567
1568    if (vq->inuse >= vq->vring.num) {
1569        virtio_error(vdev, "Virtqueue size exceeded");
1570        goto done;
1571    }
1572
1573    i = vq->last_avail_idx;
1574
1575    caches = vring_get_region_caches(vq);
1576    if (!caches) {
1577        virtio_error(vdev, "Region caches not initialized");
1578        goto done;
1579    }
1580
1581    if (caches->desc.len < max * sizeof(VRingDesc)) {
1582        virtio_error(vdev, "Cannot map descriptor ring");
1583        goto done;
1584    }
1585
1586    desc_cache = &caches->desc;
1587    vring_packed_desc_read(vdev, &desc, desc_cache, i, true);
1588    id = desc.id;
1589    if (desc.flags & VRING_DESC_F_INDIRECT) {
1590        if (desc.len % sizeof(VRingPackedDesc)) {
1591            virtio_error(vdev, "Invalid size for indirect buffer table");
1592            goto done;
1593        }
1594
1595        /* loop over the indirect descriptor table */
1596        len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
1597                                       desc.addr, desc.len, false);
1598        desc_cache = &indirect_desc_cache;
1599        if (len < desc.len) {
1600            virtio_error(vdev, "Cannot map indirect buffer");
1601            goto done;
1602        }
1603
1604        max = desc.len / sizeof(VRingPackedDesc);
1605        i = 0;
1606        vring_packed_desc_read(vdev, &desc, desc_cache, i, false);
1607    }
1608
1609    /* Collect all the descriptors */
1610    do {
1611        bool map_ok;
1612
1613        if (desc.flags & VRING_DESC_F_WRITE) {
1614            map_ok = virtqueue_map_desc(vdev, &in_num, addr + out_num,
1615                                        iov + out_num,
1616                                        VIRTQUEUE_MAX_SIZE - out_num, true,
1617                                        desc.addr, desc.len);
1618        } else {
1619            if (in_num) {
1620                virtio_error(vdev, "Incorrect order for descriptors");
1621                goto err_undo_map;
1622            }
1623            map_ok = virtqueue_map_desc(vdev, &out_num, addr, iov,
1624                                        VIRTQUEUE_MAX_SIZE, false,
1625                                        desc.addr, desc.len);
1626        }
1627        if (!map_ok) {
1628            goto err_undo_map;
1629        }
1630
1631        /* If we've got too many, that implies a descriptor loop. */
1632        if (++elem_entries > max) {
1633            virtio_error(vdev, "Looped descriptor");
1634            goto err_undo_map;
1635        }
1636
1637        rc = virtqueue_packed_read_next_desc(vq, &desc, desc_cache, max, &i,
1638                                             desc_cache ==
1639                                             &indirect_desc_cache);
1640    } while (rc == VIRTQUEUE_READ_DESC_MORE);
1641
1642    /* Now copy what we have collected and mapped */
1643    elem = virtqueue_alloc_element(sz, out_num, in_num);
1644    for (i = 0; i < out_num; i++) {
1645        elem->out_addr[i] = addr[i];
1646        elem->out_sg[i] = iov[i];
1647    }
1648    for (i = 0; i < in_num; i++) {
1649        elem->in_addr[i] = addr[out_num + i];
1650        elem->in_sg[i] = iov[out_num + i];
1651    }
1652
1653    elem->index = id;
1654    elem->ndescs = (desc_cache == &indirect_desc_cache) ? 1 : elem_entries;
1655    vq->last_avail_idx += elem->ndescs;
1656    vq->inuse += elem->ndescs;
1657
1658    if (vq->last_avail_idx >= vq->vring.num) {
1659        vq->last_avail_idx -= vq->vring.num;
1660        vq->last_avail_wrap_counter ^= 1;
1661    }
1662
1663    vq->shadow_avail_idx = vq->last_avail_idx;
1664    vq->shadow_avail_wrap_counter = vq->last_avail_wrap_counter;
1665
1666    trace_virtqueue_pop(vq, elem, elem->in_num, elem->out_num);
1667done:
1668    address_space_cache_destroy(&indirect_desc_cache);
1669
1670    return elem;
1671
1672err_undo_map:
1673    virtqueue_undo_map_desc(out_num, in_num, iov);
1674    goto done;
1675}
1676
1677void *virtqueue_pop(VirtQueue *vq, size_t sz)
1678{
1679    if (virtio_device_disabled(vq->vdev)) {
1680        return NULL;
1681    }
1682
1683    if (virtio_vdev_has_feature(vq->vdev, VIRTIO_F_RING_PACKED)) {
1684        return virtqueue_packed_pop(vq, sz);
1685    } else {
1686        return virtqueue_split_pop(vq, sz);
1687    }
1688}
1689
1690static unsigned int virtqueue_packed_drop_all(VirtQueue *vq)
1691{
1692    VRingMemoryRegionCaches *caches;
1693    MemoryRegionCache *desc_cache;
1694    unsigned int dropped = 0;
1695    VirtQueueElement elem = {};
1696    VirtIODevice *vdev = vq->vdev;
1697    VRingPackedDesc desc;
1698
1699    caches = vring_get_region_caches(vq);
1700    if (!caches) {
1701        return 0;
1702    }
1703
1704    desc_cache = &caches->desc;
1705
1706    virtio_queue_set_notification(vq, 0);
1707
1708    while (vq->inuse < vq->vring.num) {
1709        unsigned int idx = vq->last_avail_idx;
1710        /*
1711         * works similar to virtqueue_pop but does not map buffers
1712         * and does not allocate any memory.
1713         */
1714        vring_packed_desc_read(vdev, &desc, desc_cache,
1715                               vq->last_avail_idx , true);
1716        if (!is_desc_avail(desc.flags, vq->last_avail_wrap_counter)) {
1717            break;
1718        }
1719        elem.index = desc.id;
1720        elem.ndescs = 1;
1721        while (virtqueue_packed_read_next_desc(vq, &desc, desc_cache,
1722                                               vq->vring.num, &idx, false)) {
1723            ++elem.ndescs;
1724        }
1725        /*
1726         * immediately push the element, nothing to unmap
1727         * as both in_num and out_num are set to 0.
1728         */
1729        virtqueue_push(vq, &elem, 0);
1730        dropped++;
1731        vq->last_avail_idx += elem.ndescs;
1732        if (vq->last_avail_idx >= vq->vring.num) {
1733            vq->last_avail_idx -= vq->vring.num;
1734            vq->last_avail_wrap_counter ^= 1;
1735        }
1736    }
1737
1738    return dropped;
1739}
1740
1741static unsigned int virtqueue_split_drop_all(VirtQueue *vq)
1742{
1743    unsigned int dropped = 0;
1744    VirtQueueElement elem = {};
1745    VirtIODevice *vdev = vq->vdev;
1746    bool fEventIdx = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
1747
1748    while (!virtio_queue_empty(vq) && vq->inuse < vq->vring.num) {
1749        /* works similar to virtqueue_pop but does not map buffers
1750        * and does not allocate any memory */
1751        smp_rmb();
1752        if (!virtqueue_get_head(vq, vq->last_avail_idx, &elem.index)) {
1753            break;
1754        }
1755        vq->inuse++;
1756        vq->last_avail_idx++;
1757        if (fEventIdx) {
1758            vring_set_avail_event(vq, vq->last_avail_idx);
1759        }
1760        /* immediately push the element, nothing to unmap
1761         * as both in_num and out_num are set to 0 */
1762        virtqueue_push(vq, &elem, 0);
1763        dropped++;
1764    }
1765
1766    return dropped;
1767}
1768
1769/* virtqueue_drop_all:
1770 * @vq: The #VirtQueue
1771 * Drops all queued buffers and indicates them to the guest
1772 * as if they are done. Useful when buffers can not be
1773 * processed but must be returned to the guest.
1774 */
1775unsigned int virtqueue_drop_all(VirtQueue *vq)
1776{
1777    struct VirtIODevice *vdev = vq->vdev;
1778
1779    if (virtio_device_disabled(vq->vdev)) {
1780        return 0;
1781    }
1782
1783    if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
1784        return virtqueue_packed_drop_all(vq);
1785    } else {
1786        return virtqueue_split_drop_all(vq);
1787    }
1788}
1789
1790/* Reading and writing a structure directly to QEMUFile is *awful*, but
1791 * it is what QEMU has always done by mistake.  We can change it sooner
1792 * or later by bumping the version number of the affected vm states.
1793 * In the meanwhile, since the in-memory layout of VirtQueueElement
1794 * has changed, we need to marshal to and from the layout that was
1795 * used before the change.
1796 */
1797typedef struct VirtQueueElementOld {
1798    unsigned int index;
1799    unsigned int out_num;
1800    unsigned int in_num;
1801    hwaddr in_addr[VIRTQUEUE_MAX_SIZE];
1802    hwaddr out_addr[VIRTQUEUE_MAX_SIZE];
1803    struct iovec in_sg[VIRTQUEUE_MAX_SIZE];
1804    struct iovec out_sg[VIRTQUEUE_MAX_SIZE];
1805} VirtQueueElementOld;
1806
1807void *qemu_get_virtqueue_element(VirtIODevice *vdev, QEMUFile *f, size_t sz)
1808{
1809    VirtQueueElement *elem;
1810    VirtQueueElementOld data;
1811    int i;
1812
1813    qemu_get_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
1814
1815    /* TODO: teach all callers that this can fail, and return failure instead
1816     * of asserting here.
1817     * This is just one thing (there are probably more) that must be
1818     * fixed before we can allow NDEBUG compilation.
1819     */
1820    assert(ARRAY_SIZE(data.in_addr) >= data.in_num);
1821    assert(ARRAY_SIZE(data.out_addr) >= data.out_num);
1822
1823    elem = virtqueue_alloc_element(sz, data.out_num, data.in_num);
1824    elem->index = data.index;
1825
1826    for (i = 0; i < elem->in_num; i++) {
1827        elem->in_addr[i] = data.in_addr[i];
1828    }
1829
1830    for (i = 0; i < elem->out_num; i++) {
1831        elem->out_addr[i] = data.out_addr[i];
1832    }
1833
1834    for (i = 0; i < elem->in_num; i++) {
1835        /* Base is overwritten by virtqueue_map.  */
1836        elem->in_sg[i].iov_base = 0;
1837        elem->in_sg[i].iov_len = data.in_sg[i].iov_len;
1838    }
1839
1840    for (i = 0; i < elem->out_num; i++) {
1841        /* Base is overwritten by virtqueue_map.  */
1842        elem->out_sg[i].iov_base = 0;
1843        elem->out_sg[i].iov_len = data.out_sg[i].iov_len;
1844    }
1845
1846    if (virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
1847        qemu_get_be32s(f, &elem->ndescs);
1848    }
1849
1850    virtqueue_map(vdev, elem);
1851    return elem;
1852}
1853
1854void qemu_put_virtqueue_element(VirtIODevice *vdev, QEMUFile *f,
1855                                VirtQueueElement *elem)
1856{
1857    VirtQueueElementOld data;
1858    int i;
1859
1860    memset(&data, 0, sizeof(data));
1861    data.index = elem->index;
1862    data.in_num = elem->in_num;
1863    data.out_num = elem->out_num;
1864
1865    for (i = 0; i < elem->in_num; i++) {
1866        data.in_addr[i] = elem->in_addr[i];
1867    }
1868
1869    for (i = 0; i < elem->out_num; i++) {
1870        data.out_addr[i] = elem->out_addr[i];
1871    }
1872
1873    for (i = 0; i < elem->in_num; i++) {
1874        /* Base is overwritten by virtqueue_map when loading.  Do not
1875         * save it, as it would leak the QEMU address space layout.  */
1876        data.in_sg[i].iov_len = elem->in_sg[i].iov_len;
1877    }
1878
1879    for (i = 0; i < elem->out_num; i++) {
1880        /* Do not save iov_base as above.  */
1881        data.out_sg[i].iov_len = elem->out_sg[i].iov_len;
1882    }
1883
1884    if (virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
1885        qemu_put_be32s(f, &elem->ndescs);
1886    }
1887
1888    qemu_put_buffer(f, (uint8_t *)&data, sizeof(VirtQueueElementOld));
1889}
1890
1891/* virtio device */
1892static void virtio_notify_vector(VirtIODevice *vdev, uint16_t vector)
1893{
1894    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
1895    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
1896
1897    if (virtio_device_disabled(vdev)) {
1898        return;
1899    }
1900
1901    if (k->notify) {
1902        k->notify(qbus->parent, vector);
1903    }
1904}
1905
1906void virtio_update_irq(VirtIODevice *vdev)
1907{
1908    virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
1909}
1910
1911static int virtio_validate_features(VirtIODevice *vdev)
1912{
1913    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1914
1915    if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM) &&
1916        !virtio_vdev_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) {
1917        return -EFAULT;
1918    }
1919
1920    if (k->validate_features) {
1921        return k->validate_features(vdev);
1922    } else {
1923        return 0;
1924    }
1925}
1926
1927int virtio_set_status(VirtIODevice *vdev, uint8_t val)
1928{
1929    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1930    trace_virtio_set_status(vdev, val);
1931
1932    if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1933        if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) &&
1934            val & VIRTIO_CONFIG_S_FEATURES_OK) {
1935            int ret = virtio_validate_features(vdev);
1936
1937            if (ret) {
1938                return ret;
1939            }
1940        }
1941    }
1942
1943    if ((vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) !=
1944        (val & VIRTIO_CONFIG_S_DRIVER_OK)) {
1945        virtio_set_started(vdev, val & VIRTIO_CONFIG_S_DRIVER_OK);
1946    }
1947
1948    if (k->set_status) {
1949        k->set_status(vdev, val);
1950    }
1951    vdev->status = val;
1952
1953    return 0;
1954}
1955
1956static enum virtio_device_endian virtio_default_endian(void)
1957{
1958    if (target_words_bigendian()) {
1959        return VIRTIO_DEVICE_ENDIAN_BIG;
1960    } else {
1961        return VIRTIO_DEVICE_ENDIAN_LITTLE;
1962    }
1963}
1964
1965static enum virtio_device_endian virtio_current_cpu_endian(void)
1966{
1967    if (cpu_virtio_is_big_endian(current_cpu)) {
1968        return VIRTIO_DEVICE_ENDIAN_BIG;
1969    } else {
1970        return VIRTIO_DEVICE_ENDIAN_LITTLE;
1971    }
1972}
1973
1974void virtio_reset(void *opaque)
1975{
1976    VirtIODevice *vdev = opaque;
1977    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
1978    int i;
1979
1980    virtio_set_status(vdev, 0);
1981    if (current_cpu) {
1982        /* Guest initiated reset */
1983        vdev->device_endian = virtio_current_cpu_endian();
1984    } else {
1985        /* System reset */
1986        vdev->device_endian = virtio_default_endian();
1987    }
1988
1989    if (k->reset) {
1990        k->reset(vdev);
1991    }
1992
1993    vdev->start_on_kick = false;
1994    vdev->started = false;
1995    vdev->broken = false;
1996    vdev->guest_features = 0;
1997    vdev->queue_sel = 0;
1998    vdev->status = 0;
1999    vdev->disabled = false;
2000    qatomic_set(&vdev->isr, 0);
2001    vdev->config_vector = VIRTIO_NO_VECTOR;
2002    virtio_notify_vector(vdev, vdev->config_vector);
2003
2004    for(i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2005        vdev->vq[i].vring.desc = 0;
2006        vdev->vq[i].vring.avail = 0;
2007        vdev->vq[i].vring.used = 0;
2008        vdev->vq[i].last_avail_idx = 0;
2009        vdev->vq[i].shadow_avail_idx = 0;
2010        vdev->vq[i].used_idx = 0;
2011        vdev->vq[i].last_avail_wrap_counter = true;
2012        vdev->vq[i].shadow_avail_wrap_counter = true;
2013        vdev->vq[i].used_wrap_counter = true;
2014        virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR);
2015        vdev->vq[i].signalled_used = 0;
2016        vdev->vq[i].signalled_used_valid = false;
2017        vdev->vq[i].notification = true;
2018        vdev->vq[i].vring.num = vdev->vq[i].vring.num_default;
2019        vdev->vq[i].inuse = 0;
2020        virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
2021    }
2022}
2023
2024uint32_t virtio_config_readb(VirtIODevice *vdev, uint32_t addr)
2025{
2026    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2027    uint8_t val;
2028
2029    if (addr + sizeof(val) > vdev->config_len) {
2030        return (uint32_t)-1;
2031    }
2032
2033    k->get_config(vdev, vdev->config);
2034
2035    val = ldub_p(vdev->config + addr);
2036    return val;
2037}
2038
2039uint32_t virtio_config_readw(VirtIODevice *vdev, uint32_t addr)
2040{
2041    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2042    uint16_t val;
2043
2044    if (addr + sizeof(val) > vdev->config_len) {
2045        return (uint32_t)-1;
2046    }
2047
2048    k->get_config(vdev, vdev->config);
2049
2050    val = lduw_p(vdev->config + addr);
2051    return val;
2052}
2053
2054uint32_t virtio_config_readl(VirtIODevice *vdev, uint32_t addr)
2055{
2056    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2057    uint32_t val;
2058
2059    if (addr + sizeof(val) > vdev->config_len) {
2060        return (uint32_t)-1;
2061    }
2062
2063    k->get_config(vdev, vdev->config);
2064
2065    val = ldl_p(vdev->config + addr);
2066    return val;
2067}
2068
2069void virtio_config_writeb(VirtIODevice *vdev, uint32_t addr, uint32_t data)
2070{
2071    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2072    uint8_t val = data;
2073
2074    if (addr + sizeof(val) > vdev->config_len) {
2075        return;
2076    }
2077
2078    stb_p(vdev->config + addr, val);
2079
2080    if (k->set_config) {
2081        k->set_config(vdev, vdev->config);
2082    }
2083}
2084
2085void virtio_config_writew(VirtIODevice *vdev, uint32_t addr, uint32_t data)
2086{
2087    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2088    uint16_t val = data;
2089
2090    if (addr + sizeof(val) > vdev->config_len) {
2091        return;
2092    }
2093
2094    stw_p(vdev->config + addr, val);
2095
2096    if (k->set_config) {
2097        k->set_config(vdev, vdev->config);
2098    }
2099}
2100
2101void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
2102{
2103    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2104    uint32_t val = data;
2105
2106    if (addr + sizeof(val) > vdev->config_len) {
2107        return;
2108    }
2109
2110    stl_p(vdev->config + addr, val);
2111
2112    if (k->set_config) {
2113        k->set_config(vdev, vdev->config);
2114    }
2115}
2116
2117uint32_t virtio_config_modern_readb(VirtIODevice *vdev, uint32_t addr)
2118{
2119    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2120    uint8_t val;
2121
2122    if (addr + sizeof(val) > vdev->config_len) {
2123        return (uint32_t)-1;
2124    }
2125
2126    k->get_config(vdev, vdev->config);
2127
2128    val = ldub_p(vdev->config + addr);
2129    return val;
2130}
2131
2132uint32_t virtio_config_modern_readw(VirtIODevice *vdev, uint32_t addr)
2133{
2134    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2135    uint16_t val;
2136
2137    if (addr + sizeof(val) > vdev->config_len) {
2138        return (uint32_t)-1;
2139    }
2140
2141    k->get_config(vdev, vdev->config);
2142
2143    val = lduw_le_p(vdev->config + addr);
2144    return val;
2145}
2146
2147uint32_t virtio_config_modern_readl(VirtIODevice *vdev, uint32_t addr)
2148{
2149    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2150    uint32_t val;
2151
2152    if (addr + sizeof(val) > vdev->config_len) {
2153        return (uint32_t)-1;
2154    }
2155
2156    k->get_config(vdev, vdev->config);
2157
2158    val = ldl_le_p(vdev->config + addr);
2159    return val;
2160}
2161
2162void virtio_config_modern_writeb(VirtIODevice *vdev,
2163                                 uint32_t addr, uint32_t data)
2164{
2165    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2166    uint8_t val = data;
2167
2168    if (addr + sizeof(val) > vdev->config_len) {
2169        return;
2170    }
2171
2172    stb_p(vdev->config + addr, val);
2173
2174    if (k->set_config) {
2175        k->set_config(vdev, vdev->config);
2176    }
2177}
2178
2179void virtio_config_modern_writew(VirtIODevice *vdev,
2180                                 uint32_t addr, uint32_t data)
2181{
2182    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2183    uint16_t val = data;
2184
2185    if (addr + sizeof(val) > vdev->config_len) {
2186        return;
2187    }
2188
2189    stw_le_p(vdev->config + addr, val);
2190
2191    if (k->set_config) {
2192        k->set_config(vdev, vdev->config);
2193    }
2194}
2195
2196void virtio_config_modern_writel(VirtIODevice *vdev,
2197                                 uint32_t addr, uint32_t data)
2198{
2199    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2200    uint32_t val = data;
2201
2202    if (addr + sizeof(val) > vdev->config_len) {
2203        return;
2204    }
2205
2206    stl_le_p(vdev->config + addr, val);
2207
2208    if (k->set_config) {
2209        k->set_config(vdev, vdev->config);
2210    }
2211}
2212
2213void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr)
2214{
2215    if (!vdev->vq[n].vring.num) {
2216        return;
2217    }
2218    vdev->vq[n].vring.desc = addr;
2219    virtio_queue_update_rings(vdev, n);
2220}
2221
2222hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n)
2223{
2224    return vdev->vq[n].vring.desc;
2225}
2226
2227void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc,
2228                            hwaddr avail, hwaddr used)
2229{
2230    if (!vdev->vq[n].vring.num) {
2231        return;
2232    }
2233    vdev->vq[n].vring.desc = desc;
2234    vdev->vq[n].vring.avail = avail;
2235    vdev->vq[n].vring.used = used;
2236    virtio_init_region_cache(vdev, n);
2237}
2238
2239void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
2240{
2241    /* Don't allow guest to flip queue between existent and
2242     * nonexistent states, or to set it to an invalid size.
2243     */
2244    if (!!num != !!vdev->vq[n].vring.num ||
2245        num > VIRTQUEUE_MAX_SIZE ||
2246        num < 0) {
2247        return;
2248    }
2249    vdev->vq[n].vring.num = num;
2250}
2251
2252VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector)
2253{
2254    return QLIST_FIRST(&vdev->vector_queues[vector]);
2255}
2256
2257VirtQueue *virtio_vector_next_queue(VirtQueue *vq)
2258{
2259    return QLIST_NEXT(vq, node);
2260}
2261
2262int virtio_queue_get_num(VirtIODevice *vdev, int n)
2263{
2264    return vdev->vq[n].vring.num;
2265}
2266
2267int virtio_queue_get_max_num(VirtIODevice *vdev, int n)
2268{
2269    return vdev->vq[n].vring.num_default;
2270}
2271
2272int virtio_get_num_queues(VirtIODevice *vdev)
2273{
2274    int i;
2275
2276    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2277        if (!virtio_queue_get_num(vdev, i)) {
2278            break;
2279        }
2280    }
2281
2282    return i;
2283}
2284
2285void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
2286{
2287    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2288    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2289
2290    /* virtio-1 compliant devices cannot change the alignment */
2291    if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2292        error_report("tried to modify queue alignment for virtio-1 device");
2293        return;
2294    }
2295    /* Check that the transport told us it was going to do this
2296     * (so a buggy transport will immediately assert rather than
2297     * silently failing to migrate this state)
2298     */
2299    assert(k->has_variable_vring_alignment);
2300
2301    if (align) {
2302        vdev->vq[n].vring.align = align;
2303        virtio_queue_update_rings(vdev, n);
2304    }
2305}
2306
2307static bool virtio_queue_notify_aio_vq(VirtQueue *vq)
2308{
2309    bool ret = false;
2310
2311    if (vq->vring.desc && vq->handle_aio_output) {
2312        VirtIODevice *vdev = vq->vdev;
2313
2314        trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
2315        ret = vq->handle_aio_output(vdev, vq);
2316
2317        if (unlikely(vdev->start_on_kick)) {
2318            virtio_set_started(vdev, true);
2319        }
2320    }
2321
2322    return ret;
2323}
2324
2325static void virtio_queue_notify_vq(VirtQueue *vq)
2326{
2327    if (vq->vring.desc && vq->handle_output) {
2328        VirtIODevice *vdev = vq->vdev;
2329
2330        if (unlikely(vdev->broken)) {
2331            return;
2332        }
2333
2334        trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
2335        vq->handle_output(vdev, vq);
2336
2337        if (unlikely(vdev->start_on_kick)) {
2338            virtio_set_started(vdev, true);
2339        }
2340    }
2341}
2342
2343void virtio_queue_notify(VirtIODevice *vdev, int n)
2344{
2345    VirtQueue *vq = &vdev->vq[n];
2346
2347    if (unlikely(!vq->vring.desc || vdev->broken)) {
2348        return;
2349    }
2350
2351    trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
2352    if (vq->host_notifier_enabled) {
2353        event_notifier_set(&vq->host_notifier);
2354    } else if (vq->handle_output) {
2355        vq->handle_output(vdev, vq);
2356
2357        if (unlikely(vdev->start_on_kick)) {
2358            virtio_set_started(vdev, true);
2359        }
2360    }
2361}
2362
2363uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
2364{
2365    return n < VIRTIO_QUEUE_MAX ? vdev->vq[n].vector :
2366        VIRTIO_NO_VECTOR;
2367}
2368
2369void virtio_queue_set_vector(VirtIODevice *vdev, int n, uint16_t vector)
2370{
2371    VirtQueue *vq = &vdev->vq[n];
2372
2373    if (n < VIRTIO_QUEUE_MAX) {
2374        if (vdev->vector_queues &&
2375            vdev->vq[n].vector != VIRTIO_NO_VECTOR) {
2376            QLIST_REMOVE(vq, node);
2377        }
2378        vdev->vq[n].vector = vector;
2379        if (vdev->vector_queues &&
2380            vector != VIRTIO_NO_VECTOR) {
2381            QLIST_INSERT_HEAD(&vdev->vector_queues[vector], vq, node);
2382        }
2383    }
2384}
2385
2386VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size,
2387                            VirtIOHandleOutput handle_output)
2388{
2389    int i;
2390
2391    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2392        if (vdev->vq[i].vring.num == 0)
2393            break;
2394    }
2395
2396    if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE)
2397        abort();
2398
2399    vdev->vq[i].vring.num = queue_size;
2400    vdev->vq[i].vring.num_default = queue_size;
2401    vdev->vq[i].vring.align = VIRTIO_PCI_VRING_ALIGN;
2402    vdev->vq[i].handle_output = handle_output;
2403    vdev->vq[i].handle_aio_output = NULL;
2404    vdev->vq[i].used_elems = g_malloc0(sizeof(VirtQueueElement) *
2405                                       queue_size);
2406
2407    return &vdev->vq[i];
2408}
2409
2410void virtio_delete_queue(VirtQueue *vq)
2411{
2412    vq->vring.num = 0;
2413    vq->vring.num_default = 0;
2414    vq->handle_output = NULL;
2415    vq->handle_aio_output = NULL;
2416    g_free(vq->used_elems);
2417    vq->used_elems = NULL;
2418    virtio_virtqueue_reset_region_cache(vq);
2419}
2420
2421void virtio_del_queue(VirtIODevice *vdev, int n)
2422{
2423    if (n < 0 || n >= VIRTIO_QUEUE_MAX) {
2424        abort();
2425    }
2426
2427    virtio_delete_queue(&vdev->vq[n]);
2428}
2429
2430static void virtio_set_isr(VirtIODevice *vdev, int value)
2431{
2432    uint8_t old = qatomic_read(&vdev->isr);
2433
2434    /* Do not write ISR if it does not change, so that its cacheline remains
2435     * shared in the common case where the guest does not read it.
2436     */
2437    if ((old & value) != value) {
2438        qatomic_or(&vdev->isr, value);
2439    }
2440}
2441
2442/* Called within rcu_read_lock(). */
2443static bool virtio_split_should_notify(VirtIODevice *vdev, VirtQueue *vq)
2444{
2445    uint16_t old, new;
2446    bool v;
2447    /* We need to expose used array entries before checking used event. */
2448    smp_mb();
2449    /* Always notify when queue is empty (when feature acknowledge) */
2450    if (virtio_vdev_has_feature(vdev, VIRTIO_F_NOTIFY_ON_EMPTY) &&
2451        !vq->inuse && virtio_queue_empty(vq)) {
2452        return true;
2453    }
2454
2455    if (!virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
2456        return !(vring_avail_flags(vq) & VRING_AVAIL_F_NO_INTERRUPT);
2457    }
2458
2459    v = vq->signalled_used_valid;
2460    vq->signalled_used_valid = true;
2461    old = vq->signalled_used;
2462    new = vq->signalled_used = vq->used_idx;
2463    return !v || vring_need_event(vring_get_used_event(vq), new, old);
2464}
2465
2466static bool vring_packed_need_event(VirtQueue *vq, bool wrap,
2467                                    uint16_t off_wrap, uint16_t new,
2468                                    uint16_t old)
2469{
2470    int off = off_wrap & ~(1 << 15);
2471
2472    if (wrap != off_wrap >> 15) {
2473        off -= vq->vring.num;
2474    }
2475
2476    return vring_need_event(off, new, old);
2477}
2478
2479/* Called within rcu_read_lock(). */
2480static bool virtio_packed_should_notify(VirtIODevice *vdev, VirtQueue *vq)
2481{
2482    VRingPackedDescEvent e;
2483    uint16_t old, new;
2484    bool v;
2485    VRingMemoryRegionCaches *caches;
2486
2487    caches = vring_get_region_caches(vq);
2488    if (!caches) {
2489        return false;
2490    }
2491
2492    vring_packed_event_read(vdev, &caches->avail, &e);
2493
2494    old = vq->signalled_used;
2495    new = vq->signalled_used = vq->used_idx;
2496    v = vq->signalled_used_valid;
2497    vq->signalled_used_valid = true;
2498
2499    if (e.flags == VRING_PACKED_EVENT_FLAG_DISABLE) {
2500        return false;
2501    } else if (e.flags == VRING_PACKED_EVENT_FLAG_ENABLE) {
2502        return true;
2503    }
2504
2505    return !v || vring_packed_need_event(vq, vq->used_wrap_counter,
2506                                         e.off_wrap, new, old);
2507}
2508
2509/* Called within rcu_read_lock().  */
2510static bool virtio_should_notify(VirtIODevice *vdev, VirtQueue *vq)
2511{
2512    if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
2513        return virtio_packed_should_notify(vdev, vq);
2514    } else {
2515        return virtio_split_should_notify(vdev, vq);
2516    }
2517}
2518
2519void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq)
2520{
2521    WITH_RCU_READ_LOCK_GUARD() {
2522        if (!virtio_should_notify(vdev, vq)) {
2523            return;
2524        }
2525    }
2526
2527    trace_virtio_notify_irqfd(vdev, vq);
2528
2529    /*
2530     * virtio spec 1.0 says ISR bit 0 should be ignored with MSI, but
2531     * windows drivers included in virtio-win 1.8.0 (circa 2015) are
2532     * incorrectly polling this bit during crashdump and hibernation
2533     * in MSI mode, causing a hang if this bit is never updated.
2534     * Recent releases of Windows do not really shut down, but rather
2535     * log out and hibernate to make the next startup faster.  Hence,
2536     * this manifested as a more serious hang during shutdown with
2537     *
2538     * Next driver release from 2016 fixed this problem, so working around it
2539     * is not a must, but it's easy to do so let's do it here.
2540     *
2541     * Note: it's safe to update ISR from any thread as it was switched
2542     * to an atomic operation.
2543     */
2544    virtio_set_isr(vq->vdev, 0x1);
2545    event_notifier_set(&vq->guest_notifier);
2546}
2547
2548static void virtio_irq(VirtQueue *vq)
2549{
2550    virtio_set_isr(vq->vdev, 0x1);
2551    virtio_notify_vector(vq->vdev, vq->vector);
2552}
2553
2554void virtio_notify(VirtIODevice *vdev, VirtQueue *vq)
2555{
2556    WITH_RCU_READ_LOCK_GUARD() {
2557        if (!virtio_should_notify(vdev, vq)) {
2558            return;
2559        }
2560    }
2561
2562    trace_virtio_notify(vdev, vq);
2563    virtio_irq(vq);
2564}
2565
2566void virtio_notify_config(VirtIODevice *vdev)
2567{
2568    if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))
2569        return;
2570
2571    virtio_set_isr(vdev, 0x3);
2572    vdev->generation++;
2573    virtio_notify_vector(vdev, vdev->config_vector);
2574}
2575
2576static bool virtio_device_endian_needed(void *opaque)
2577{
2578    VirtIODevice *vdev = opaque;
2579
2580    assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN);
2581    if (!virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2582        return vdev->device_endian != virtio_default_endian();
2583    }
2584    /* Devices conforming to VIRTIO 1.0 or later are always LE. */
2585    return vdev->device_endian != VIRTIO_DEVICE_ENDIAN_LITTLE;
2586}
2587
2588static bool virtio_64bit_features_needed(void *opaque)
2589{
2590    VirtIODevice *vdev = opaque;
2591
2592    return (vdev->host_features >> 32) != 0;
2593}
2594
2595static bool virtio_virtqueue_needed(void *opaque)
2596{
2597    VirtIODevice *vdev = opaque;
2598
2599    return virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1);
2600}
2601
2602static bool virtio_packed_virtqueue_needed(void *opaque)
2603{
2604    VirtIODevice *vdev = opaque;
2605
2606    return virtio_host_has_feature(vdev, VIRTIO_F_RING_PACKED);
2607}
2608
2609static bool virtio_ringsize_needed(void *opaque)
2610{
2611    VirtIODevice *vdev = opaque;
2612    int i;
2613
2614    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2615        if (vdev->vq[i].vring.num != vdev->vq[i].vring.num_default) {
2616            return true;
2617        }
2618    }
2619    return false;
2620}
2621
2622static bool virtio_extra_state_needed(void *opaque)
2623{
2624    VirtIODevice *vdev = opaque;
2625    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2626    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2627
2628    return k->has_extra_state &&
2629        k->has_extra_state(qbus->parent);
2630}
2631
2632static bool virtio_broken_needed(void *opaque)
2633{
2634    VirtIODevice *vdev = opaque;
2635
2636    return vdev->broken;
2637}
2638
2639static bool virtio_started_needed(void *opaque)
2640{
2641    VirtIODevice *vdev = opaque;
2642
2643    return vdev->started;
2644}
2645
2646static bool virtio_disabled_needed(void *opaque)
2647{
2648    VirtIODevice *vdev = opaque;
2649
2650    return vdev->disabled;
2651}
2652
2653static const VMStateDescription vmstate_virtqueue = {
2654    .name = "virtqueue_state",
2655    .version_id = 1,
2656    .minimum_version_id = 1,
2657    .fields = (VMStateField[]) {
2658        VMSTATE_UINT64(vring.avail, struct VirtQueue),
2659        VMSTATE_UINT64(vring.used, struct VirtQueue),
2660        VMSTATE_END_OF_LIST()
2661    }
2662};
2663
2664static const VMStateDescription vmstate_packed_virtqueue = {
2665    .name = "packed_virtqueue_state",
2666    .version_id = 1,
2667    .minimum_version_id = 1,
2668    .fields = (VMStateField[]) {
2669        VMSTATE_UINT16(last_avail_idx, struct VirtQueue),
2670        VMSTATE_BOOL(last_avail_wrap_counter, struct VirtQueue),
2671        VMSTATE_UINT16(used_idx, struct VirtQueue),
2672        VMSTATE_BOOL(used_wrap_counter, struct VirtQueue),
2673        VMSTATE_UINT32(inuse, struct VirtQueue),
2674        VMSTATE_END_OF_LIST()
2675    }
2676};
2677
2678static const VMStateDescription vmstate_virtio_virtqueues = {
2679    .name = "virtio/virtqueues",
2680    .version_id = 1,
2681    .minimum_version_id = 1,
2682    .needed = &virtio_virtqueue_needed,
2683    .fields = (VMStateField[]) {
2684        VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
2685                      VIRTIO_QUEUE_MAX, 0, vmstate_virtqueue, VirtQueue),
2686        VMSTATE_END_OF_LIST()
2687    }
2688};
2689
2690static const VMStateDescription vmstate_virtio_packed_virtqueues = {
2691    .name = "virtio/packed_virtqueues",
2692    .version_id = 1,
2693    .minimum_version_id = 1,
2694    .needed = &virtio_packed_virtqueue_needed,
2695    .fields = (VMStateField[]) {
2696        VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
2697                      VIRTIO_QUEUE_MAX, 0, vmstate_packed_virtqueue, VirtQueue),
2698        VMSTATE_END_OF_LIST()
2699    }
2700};
2701
2702static const VMStateDescription vmstate_ringsize = {
2703    .name = "ringsize_state",
2704    .version_id = 1,
2705    .minimum_version_id = 1,
2706    .fields = (VMStateField[]) {
2707        VMSTATE_UINT32(vring.num_default, struct VirtQueue),
2708        VMSTATE_END_OF_LIST()
2709    }
2710};
2711
2712static const VMStateDescription vmstate_virtio_ringsize = {
2713    .name = "virtio/ringsize",
2714    .version_id = 1,
2715    .minimum_version_id = 1,
2716    .needed = &virtio_ringsize_needed,
2717    .fields = (VMStateField[]) {
2718        VMSTATE_STRUCT_VARRAY_POINTER_KNOWN(vq, struct VirtIODevice,
2719                      VIRTIO_QUEUE_MAX, 0, vmstate_ringsize, VirtQueue),
2720        VMSTATE_END_OF_LIST()
2721    }
2722};
2723
2724static int get_extra_state(QEMUFile *f, void *pv, size_t size,
2725                           const VMStateField *field)
2726{
2727    VirtIODevice *vdev = pv;
2728    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2729    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2730
2731    if (!k->load_extra_state) {
2732        return -1;
2733    } else {
2734        return k->load_extra_state(qbus->parent, f);
2735    }
2736}
2737
2738static int put_extra_state(QEMUFile *f, void *pv, size_t size,
2739                           const VMStateField *field, JSONWriter *vmdesc)
2740{
2741    VirtIODevice *vdev = pv;
2742    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2743    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2744
2745    k->save_extra_state(qbus->parent, f);
2746    return 0;
2747}
2748
2749static const VMStateInfo vmstate_info_extra_state = {
2750    .name = "virtqueue_extra_state",
2751    .get = get_extra_state,
2752    .put = put_extra_state,
2753};
2754
2755static const VMStateDescription vmstate_virtio_extra_state = {
2756    .name = "virtio/extra_state",
2757    .version_id = 1,
2758    .minimum_version_id = 1,
2759    .needed = &virtio_extra_state_needed,
2760    .fields = (VMStateField[]) {
2761        {
2762            .name         = "extra_state",
2763            .version_id   = 0,
2764            .field_exists = NULL,
2765            .size         = 0,
2766            .info         = &vmstate_info_extra_state,
2767            .flags        = VMS_SINGLE,
2768            .offset       = 0,
2769        },
2770        VMSTATE_END_OF_LIST()
2771    }
2772};
2773
2774static const VMStateDescription vmstate_virtio_device_endian = {
2775    .name = "virtio/device_endian",
2776    .version_id = 1,
2777    .minimum_version_id = 1,
2778    .needed = &virtio_device_endian_needed,
2779    .fields = (VMStateField[]) {
2780        VMSTATE_UINT8(device_endian, VirtIODevice),
2781        VMSTATE_END_OF_LIST()
2782    }
2783};
2784
2785static const VMStateDescription vmstate_virtio_64bit_features = {
2786    .name = "virtio/64bit_features",
2787    .version_id = 1,
2788    .minimum_version_id = 1,
2789    .needed = &virtio_64bit_features_needed,
2790    .fields = (VMStateField[]) {
2791        VMSTATE_UINT64(guest_features, VirtIODevice),
2792        VMSTATE_END_OF_LIST()
2793    }
2794};
2795
2796static const VMStateDescription vmstate_virtio_broken = {
2797    .name = "virtio/broken",
2798    .version_id = 1,
2799    .minimum_version_id = 1,
2800    .needed = &virtio_broken_needed,
2801    .fields = (VMStateField[]) {
2802        VMSTATE_BOOL(broken, VirtIODevice),
2803        VMSTATE_END_OF_LIST()
2804    }
2805};
2806
2807static const VMStateDescription vmstate_virtio_started = {
2808    .name = "virtio/started",
2809    .version_id = 1,
2810    .minimum_version_id = 1,
2811    .needed = &virtio_started_needed,
2812    .fields = (VMStateField[]) {
2813        VMSTATE_BOOL(started, VirtIODevice),
2814        VMSTATE_END_OF_LIST()
2815    }
2816};
2817
2818static const VMStateDescription vmstate_virtio_disabled = {
2819    .name = "virtio/disabled",
2820    .version_id = 1,
2821    .minimum_version_id = 1,
2822    .needed = &virtio_disabled_needed,
2823    .fields = (VMStateField[]) {
2824        VMSTATE_BOOL(disabled, VirtIODevice),
2825        VMSTATE_END_OF_LIST()
2826    }
2827};
2828
2829static const VMStateDescription vmstate_virtio = {
2830    .name = "virtio",
2831    .version_id = 1,
2832    .minimum_version_id = 1,
2833    .minimum_version_id_old = 1,
2834    .fields = (VMStateField[]) {
2835        VMSTATE_END_OF_LIST()
2836    },
2837    .subsections = (const VMStateDescription*[]) {
2838        &vmstate_virtio_device_endian,
2839        &vmstate_virtio_64bit_features,
2840        &vmstate_virtio_virtqueues,
2841        &vmstate_virtio_ringsize,
2842        &vmstate_virtio_broken,
2843        &vmstate_virtio_extra_state,
2844        &vmstate_virtio_started,
2845        &vmstate_virtio_packed_virtqueues,
2846        &vmstate_virtio_disabled,
2847        NULL
2848    }
2849};
2850
2851int virtio_save(VirtIODevice *vdev, QEMUFile *f)
2852{
2853    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2854    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2855    VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
2856    uint32_t guest_features_lo = (vdev->guest_features & 0xffffffff);
2857    int i;
2858
2859    if (k->save_config) {
2860        k->save_config(qbus->parent, f);
2861    }
2862
2863    qemu_put_8s(f, &vdev->status);
2864    qemu_put_8s(f, &vdev->isr);
2865    qemu_put_be16s(f, &vdev->queue_sel);
2866    qemu_put_be32s(f, &guest_features_lo);
2867    qemu_put_be32(f, vdev->config_len);
2868    qemu_put_buffer(f, vdev->config, vdev->config_len);
2869
2870    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2871        if (vdev->vq[i].vring.num == 0)
2872            break;
2873    }
2874
2875    qemu_put_be32(f, i);
2876
2877    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2878        if (vdev->vq[i].vring.num == 0)
2879            break;
2880
2881        qemu_put_be32(f, vdev->vq[i].vring.num);
2882        if (k->has_variable_vring_alignment) {
2883            qemu_put_be32(f, vdev->vq[i].vring.align);
2884        }
2885        /*
2886         * Save desc now, the rest of the ring addresses are saved in
2887         * subsections for VIRTIO-1 devices.
2888         */
2889        qemu_put_be64(f, vdev->vq[i].vring.desc);
2890        qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
2891        if (k->save_queue) {
2892            k->save_queue(qbus->parent, i, f);
2893        }
2894    }
2895
2896    if (vdc->save != NULL) {
2897        vdc->save(vdev, f);
2898    }
2899
2900    if (vdc->vmsd) {
2901        int ret = vmstate_save_state(f, vdc->vmsd, vdev, NULL);
2902        if (ret) {
2903            return ret;
2904        }
2905    }
2906
2907    /* Subsections */
2908    return vmstate_save_state(f, &vmstate_virtio, vdev, NULL);
2909}
2910
2911/* A wrapper for use as a VMState .put function */
2912static int virtio_device_put(QEMUFile *f, void *opaque, size_t size,
2913                              const VMStateField *field, JSONWriter *vmdesc)
2914{
2915    return virtio_save(VIRTIO_DEVICE(opaque), f);
2916}
2917
2918/* A wrapper for use as a VMState .get function */
2919static int virtio_device_get(QEMUFile *f, void *opaque, size_t size,
2920                             const VMStateField *field)
2921{
2922    VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
2923    DeviceClass *dc = DEVICE_CLASS(VIRTIO_DEVICE_GET_CLASS(vdev));
2924
2925    return virtio_load(vdev, f, dc->vmsd->version_id);
2926}
2927
2928const VMStateInfo  virtio_vmstate_info = {
2929    .name = "virtio",
2930    .get = virtio_device_get,
2931    .put = virtio_device_put,
2932};
2933
2934static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val)
2935{
2936    VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
2937    bool bad = (val & ~(vdev->host_features)) != 0;
2938
2939    val &= vdev->host_features;
2940    if (k->set_features) {
2941        k->set_features(vdev, val);
2942    }
2943    vdev->guest_features = val;
2944    return bad ? -1 : 0;
2945}
2946
2947int virtio_set_features(VirtIODevice *vdev, uint64_t val)
2948{
2949    int ret;
2950    /*
2951     * The driver must not attempt to set features after feature negotiation
2952     * has finished.
2953     */
2954    if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) {
2955        return -EINVAL;
2956    }
2957    ret = virtio_set_features_nocheck(vdev, val);
2958    if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
2959        /* VIRTIO_RING_F_EVENT_IDX changes the size of the caches.  */
2960        int i;
2961        for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2962            if (vdev->vq[i].vring.num != 0) {
2963                virtio_init_region_cache(vdev, i);
2964            }
2965        }
2966    }
2967    if (!ret) {
2968        if (!virtio_device_started(vdev, vdev->status) &&
2969            !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
2970            vdev->start_on_kick = true;
2971        }
2972    }
2973    return ret;
2974}
2975
2976size_t virtio_feature_get_config_size(const VirtIOFeature *feature_sizes,
2977                                      uint64_t host_features)
2978{
2979    size_t config_size = 0;
2980    int i;
2981
2982    for (i = 0; feature_sizes[i].flags != 0; i++) {
2983        if (host_features & feature_sizes[i].flags) {
2984            config_size = MAX(feature_sizes[i].end, config_size);
2985        }
2986    }
2987
2988    return config_size;
2989}
2990
2991int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
2992{
2993    int i, ret;
2994    int32_t config_len;
2995    uint32_t num;
2996    uint32_t features;
2997    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
2998    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
2999    VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
3000
3001    /*
3002     * We poison the endianness to ensure it does not get used before
3003     * subsections have been loaded.
3004     */
3005    vdev->device_endian = VIRTIO_DEVICE_ENDIAN_UNKNOWN;
3006
3007    if (k->load_config) {
3008        ret = k->load_config(qbus->parent, f);
3009        if (ret)
3010            return ret;
3011    }
3012
3013    qemu_get_8s(f, &vdev->status);
3014    qemu_get_8s(f, &vdev->isr);
3015    qemu_get_be16s(f, &vdev->queue_sel);
3016    if (vdev->queue_sel >= VIRTIO_QUEUE_MAX) {
3017        return -1;
3018    }
3019    qemu_get_be32s(f, &features);
3020
3021    /*
3022     * Temporarily set guest_features low bits - needed by
3023     * virtio net load code testing for VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
3024     * VIRTIO_NET_F_GUEST_ANNOUNCE and VIRTIO_NET_F_CTRL_VQ.
3025     *
3026     * Note: devices should always test host features in future - don't create
3027     * new dependencies like this.
3028     */
3029    vdev->guest_features = features;
3030
3031    config_len = qemu_get_be32(f);
3032
3033    /*
3034     * There are cases where the incoming config can be bigger or smaller
3035     * than what we have; so load what we have space for, and skip
3036     * any excess that's in the stream.
3037     */
3038    qemu_get_buffer(f, vdev->config, MIN(config_len, vdev->config_len));
3039
3040    while (config_len > vdev->config_len) {
3041        qemu_get_byte(f);
3042        config_len--;
3043    }
3044
3045    num = qemu_get_be32(f);
3046
3047    if (num > VIRTIO_QUEUE_MAX) {
3048        error_report("Invalid number of virtqueues: 0x%x", num);
3049        return -1;
3050    }
3051
3052    for (i = 0; i < num; i++) {
3053        vdev->vq[i].vring.num = qemu_get_be32(f);
3054        if (k->has_variable_vring_alignment) {
3055            vdev->vq[i].vring.align = qemu_get_be32(f);
3056        }
3057        vdev->vq[i].vring.desc = qemu_get_be64(f);
3058        qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
3059        vdev->vq[i].signalled_used_valid = false;
3060        vdev->vq[i].notification = true;
3061
3062        if (!vdev->vq[i].vring.desc && vdev->vq[i].last_avail_idx) {
3063            error_report("VQ %d address 0x0 "
3064                         "inconsistent with Host index 0x%x",
3065                         i, vdev->vq[i].last_avail_idx);
3066            return -1;
3067        }
3068        if (k->load_queue) {
3069            ret = k->load_queue(qbus->parent, i, f);
3070            if (ret)
3071                return ret;
3072        }
3073    }
3074
3075    virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
3076
3077    if (vdc->load != NULL) {
3078        ret = vdc->load(vdev, f, version_id);
3079        if (ret) {
3080            return ret;
3081        }
3082    }
3083
3084    if (vdc->vmsd) {
3085        ret = vmstate_load_state(f, vdc->vmsd, vdev, version_id);
3086        if (ret) {
3087            return ret;
3088        }
3089    }
3090
3091    /* Subsections */
3092    ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1);
3093    if (ret) {
3094        return ret;
3095    }
3096
3097    if (vdev->device_endian == VIRTIO_DEVICE_ENDIAN_UNKNOWN) {
3098        vdev->device_endian = virtio_default_endian();
3099    }
3100
3101    if (virtio_64bit_features_needed(vdev)) {
3102        /*
3103         * Subsection load filled vdev->guest_features.  Run them
3104         * through virtio_set_features to sanity-check them against
3105         * host_features.
3106         */
3107        uint64_t features64 = vdev->guest_features;
3108        if (virtio_set_features_nocheck(vdev, features64) < 0) {
3109            error_report("Features 0x%" PRIx64 " unsupported. "
3110                         "Allowed features: 0x%" PRIx64,
3111                         features64, vdev->host_features);
3112            return -1;
3113        }
3114    } else {
3115        if (virtio_set_features_nocheck(vdev, features) < 0) {
3116            error_report("Features 0x%x unsupported. "
3117                         "Allowed features: 0x%" PRIx64,
3118                         features, vdev->host_features);
3119            return -1;
3120        }
3121    }
3122
3123    if (!virtio_device_started(vdev, vdev->status) &&
3124        !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3125        vdev->start_on_kick = true;
3126    }
3127
3128    RCU_READ_LOCK_GUARD();
3129    for (i = 0; i < num; i++) {
3130        if (vdev->vq[i].vring.desc) {
3131            uint16_t nheads;
3132
3133            /*
3134             * VIRTIO-1 devices migrate desc, used, and avail ring addresses so
3135             * only the region cache needs to be set up.  Legacy devices need
3136             * to calculate used and avail ring addresses based on the desc
3137             * address.
3138             */
3139            if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3140                virtio_init_region_cache(vdev, i);
3141            } else {
3142                virtio_queue_update_rings(vdev, i);
3143            }
3144
3145            if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3146                vdev->vq[i].shadow_avail_idx = vdev->vq[i].last_avail_idx;
3147                vdev->vq[i].shadow_avail_wrap_counter =
3148                                        vdev->vq[i].last_avail_wrap_counter;
3149                continue;
3150            }
3151
3152            nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
3153            /* Check it isn't doing strange things with descriptor numbers. */
3154            if (nheads > vdev->vq[i].vring.num) {
3155                virtio_error(vdev, "VQ %d size 0x%x Guest index 0x%x "
3156                             "inconsistent with Host index 0x%x: delta 0x%x",
3157                             i, vdev->vq[i].vring.num,
3158                             vring_avail_idx(&vdev->vq[i]),
3159                             vdev->vq[i].last_avail_idx, nheads);
3160                vdev->vq[i].used_idx = 0;
3161                vdev->vq[i].shadow_avail_idx = 0;
3162                vdev->vq[i].inuse = 0;
3163                continue;
3164            }
3165            vdev->vq[i].used_idx = vring_used_idx(&vdev->vq[i]);
3166            vdev->vq[i].shadow_avail_idx = vring_avail_idx(&vdev->vq[i]);
3167
3168            /*
3169             * Some devices migrate VirtQueueElements that have been popped
3170             * from the avail ring but not yet returned to the used ring.
3171             * Since max ring size < UINT16_MAX it's safe to use modulo
3172             * UINT16_MAX + 1 subtraction.
3173             */
3174            vdev->vq[i].inuse = (uint16_t)(vdev->vq[i].last_avail_idx -
3175                                vdev->vq[i].used_idx);
3176            if (vdev->vq[i].inuse > vdev->vq[i].vring.num) {
3177                error_report("VQ %d size 0x%x < last_avail_idx 0x%x - "
3178                             "used_idx 0x%x",
3179                             i, vdev->vq[i].vring.num,
3180                             vdev->vq[i].last_avail_idx,
3181                             vdev->vq[i].used_idx);
3182                return -1;
3183            }
3184        }
3185    }
3186
3187    if (vdc->post_load) {
3188        ret = vdc->post_load(vdev);
3189        if (ret) {
3190            return ret;
3191        }
3192    }
3193
3194    return 0;
3195}
3196
3197void virtio_cleanup(VirtIODevice *vdev)
3198{
3199    qemu_del_vm_change_state_handler(vdev->vmstate);
3200}
3201
3202static void virtio_vmstate_change(void *opaque, bool running, RunState state)
3203{
3204    VirtIODevice *vdev = opaque;
3205    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3206    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3207    bool backend_run = running && virtio_device_started(vdev, vdev->status);
3208    vdev->vm_running = running;
3209
3210    if (backend_run) {
3211        virtio_set_status(vdev, vdev->status);
3212    }
3213
3214    if (k->vmstate_change) {
3215        k->vmstate_change(qbus->parent, backend_run);
3216    }
3217
3218    if (!backend_run) {
3219        virtio_set_status(vdev, vdev->status);
3220    }
3221}
3222
3223void virtio_instance_init_common(Object *proxy_obj, void *data,
3224                                 size_t vdev_size, const char *vdev_name)
3225{
3226    DeviceState *vdev = data;
3227
3228    object_initialize_child_with_props(proxy_obj, "virtio-backend", vdev,
3229                                       vdev_size, vdev_name, &error_abort,
3230                                       NULL);
3231    qdev_alias_all_properties(vdev, proxy_obj);
3232}
3233
3234void virtio_init(VirtIODevice *vdev, const char *name,
3235                 uint16_t device_id, size_t config_size)
3236{
3237    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3238    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3239    int i;
3240    int nvectors = k->query_nvectors ? k->query_nvectors(qbus->parent) : 0;
3241
3242    if (nvectors) {
3243        vdev->vector_queues =
3244            g_malloc0(sizeof(*vdev->vector_queues) * nvectors);
3245    }
3246
3247    vdev->start_on_kick = false;
3248    vdev->started = false;
3249    vdev->device_id = device_id;
3250    vdev->status = 0;
3251    qatomic_set(&vdev->isr, 0);
3252    vdev->queue_sel = 0;
3253    vdev->config_vector = VIRTIO_NO_VECTOR;
3254    vdev->vq = g_malloc0(sizeof(VirtQueue) * VIRTIO_QUEUE_MAX);
3255    vdev->vm_running = runstate_is_running();
3256    vdev->broken = false;
3257    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
3258        vdev->vq[i].vector = VIRTIO_NO_VECTOR;
3259        vdev->vq[i].vdev = vdev;
3260        vdev->vq[i].queue_index = i;
3261        vdev->vq[i].host_notifier_enabled = false;
3262    }
3263
3264    vdev->name = name;
3265    vdev->config_len = config_size;
3266    if (vdev->config_len) {
3267        vdev->config = g_malloc0(config_size);
3268    } else {
3269        vdev->config = NULL;
3270    }
3271    vdev->vmstate = qdev_add_vm_change_state_handler(DEVICE(vdev),
3272            virtio_vmstate_change, vdev);
3273    vdev->device_endian = virtio_default_endian();
3274    vdev->use_guest_notifier_mask = true;
3275}
3276
3277/*
3278 * Only devices that have already been around prior to defining the virtio
3279 * standard support legacy mode; this includes devices not specified in the
3280 * standard. All newer devices conform to the virtio standard only.
3281 */
3282bool virtio_legacy_allowed(VirtIODevice *vdev)
3283{
3284    switch (vdev->device_id) {
3285    case VIRTIO_ID_NET:
3286    case VIRTIO_ID_BLOCK:
3287    case VIRTIO_ID_CONSOLE:
3288    case VIRTIO_ID_RNG:
3289    case VIRTIO_ID_BALLOON:
3290    case VIRTIO_ID_RPMSG:
3291    case VIRTIO_ID_SCSI:
3292    case VIRTIO_ID_9P:
3293    case VIRTIO_ID_RPROC_SERIAL:
3294    case VIRTIO_ID_CAIF:
3295        return true;
3296    default:
3297        return false;
3298    }
3299}
3300
3301bool virtio_legacy_check_disabled(VirtIODevice *vdev)
3302{
3303    return vdev->disable_legacy_check;
3304}
3305
3306hwaddr virtio_queue_get_desc_addr(VirtIODevice *vdev, int n)
3307{
3308    return vdev->vq[n].vring.desc;
3309}
3310
3311bool virtio_queue_enabled_legacy(VirtIODevice *vdev, int n)
3312{
3313    return virtio_queue_get_desc_addr(vdev, n) != 0;
3314}
3315
3316bool virtio_queue_enabled(VirtIODevice *vdev, int n)
3317{
3318    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3319    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3320
3321    if (k->queue_enabled) {
3322        return k->queue_enabled(qbus->parent, n);
3323    }
3324    return virtio_queue_enabled_legacy(vdev, n);
3325}
3326
3327hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
3328{
3329    return vdev->vq[n].vring.avail;
3330}
3331
3332hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
3333{
3334    return vdev->vq[n].vring.used;
3335}
3336
3337hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
3338{
3339    return sizeof(VRingDesc) * vdev->vq[n].vring.num;
3340}
3341
3342hwaddr virtio_queue_get_avail_size(VirtIODevice *vdev, int n)
3343{
3344    int s;
3345
3346    if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3347        return sizeof(struct VRingPackedDescEvent);
3348    }
3349
3350    s = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
3351    return offsetof(VRingAvail, ring) +
3352        sizeof(uint16_t) * vdev->vq[n].vring.num + s;
3353}
3354
3355hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n)
3356{
3357    int s;
3358
3359    if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3360        return sizeof(struct VRingPackedDescEvent);
3361    }
3362
3363    s = virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
3364    return offsetof(VRingUsed, ring) +
3365        sizeof(VRingUsedElem) * vdev->vq[n].vring.num + s;
3366}
3367
3368static unsigned int virtio_queue_packed_get_last_avail_idx(VirtIODevice *vdev,
3369                                                           int n)
3370{
3371    unsigned int avail, used;
3372
3373    avail = vdev->vq[n].last_avail_idx;
3374    avail |= ((uint16_t)vdev->vq[n].last_avail_wrap_counter) << 15;
3375
3376    used = vdev->vq[n].used_idx;
3377    used |= ((uint16_t)vdev->vq[n].used_wrap_counter) << 15;
3378
3379    return avail | used << 16;
3380}
3381
3382static uint16_t virtio_queue_split_get_last_avail_idx(VirtIODevice *vdev,
3383                                                      int n)
3384{
3385    return vdev->vq[n].last_avail_idx;
3386}
3387
3388unsigned int virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
3389{
3390    if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3391        return virtio_queue_packed_get_last_avail_idx(vdev, n);
3392    } else {
3393        return virtio_queue_split_get_last_avail_idx(vdev, n);
3394    }
3395}
3396
3397static void virtio_queue_packed_set_last_avail_idx(VirtIODevice *vdev,
3398                                                   int n, unsigned int idx)
3399{
3400    struct VirtQueue *vq = &vdev->vq[n];
3401
3402    vq->last_avail_idx = vq->shadow_avail_idx = idx & 0x7fff;
3403    vq->last_avail_wrap_counter =
3404        vq->shadow_avail_wrap_counter = !!(idx & 0x8000);
3405    idx >>= 16;
3406    vq->used_idx = idx & 0x7ffff;
3407    vq->used_wrap_counter = !!(idx & 0x8000);
3408}
3409
3410static void virtio_queue_split_set_last_avail_idx(VirtIODevice *vdev,
3411                                                  int n, unsigned int idx)
3412{
3413        vdev->vq[n].last_avail_idx = idx;
3414        vdev->vq[n].shadow_avail_idx = idx;
3415}
3416
3417void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n,
3418                                     unsigned int idx)
3419{
3420    if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3421        virtio_queue_packed_set_last_avail_idx(vdev, n, idx);
3422    } else {
3423        virtio_queue_split_set_last_avail_idx(vdev, n, idx);
3424    }
3425}
3426
3427static void virtio_queue_packed_restore_last_avail_idx(VirtIODevice *vdev,
3428                                                       int n)
3429{
3430    /* We don't have a reference like avail idx in shared memory */
3431    return;
3432}
3433
3434static void virtio_queue_split_restore_last_avail_idx(VirtIODevice *vdev,
3435                                                      int n)
3436{
3437    RCU_READ_LOCK_GUARD();
3438    if (vdev->vq[n].vring.desc) {
3439        vdev->vq[n].last_avail_idx = vring_used_idx(&vdev->vq[n]);
3440        vdev->vq[n].shadow_avail_idx = vdev->vq[n].last_avail_idx;
3441    }
3442}
3443
3444void virtio_queue_restore_last_avail_idx(VirtIODevice *vdev, int n)
3445{
3446    if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3447        virtio_queue_packed_restore_last_avail_idx(vdev, n);
3448    } else {
3449        virtio_queue_split_restore_last_avail_idx(vdev, n);
3450    }
3451}
3452
3453static void virtio_queue_packed_update_used_idx(VirtIODevice *vdev, int n)
3454{
3455    /* used idx was updated through set_last_avail_idx() */
3456    return;
3457}
3458
3459static void virtio_split_packed_update_used_idx(VirtIODevice *vdev, int n)
3460{
3461    RCU_READ_LOCK_GUARD();
3462    if (vdev->vq[n].vring.desc) {
3463        vdev->vq[n].used_idx = vring_used_idx(&vdev->vq[n]);
3464    }
3465}
3466
3467void virtio_queue_update_used_idx(VirtIODevice *vdev, int n)
3468{
3469    if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
3470        return virtio_queue_packed_update_used_idx(vdev, n);
3471    } else {
3472        return virtio_split_packed_update_used_idx(vdev, n);
3473    }
3474}
3475
3476void virtio_queue_invalidate_signalled_used(VirtIODevice *vdev, int n)
3477{
3478    vdev->vq[n].signalled_used_valid = false;
3479}
3480
3481VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
3482{
3483    return vdev->vq + n;
3484}
3485
3486uint16_t virtio_get_queue_index(VirtQueue *vq)
3487{
3488    return vq->queue_index;
3489}
3490
3491static void virtio_queue_guest_notifier_read(EventNotifier *n)
3492{
3493    VirtQueue *vq = container_of(n, VirtQueue, guest_notifier);
3494    if (event_notifier_test_and_clear(n)) {
3495        virtio_irq(vq);
3496    }
3497}
3498
3499void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
3500                                                bool with_irqfd)
3501{
3502    if (assign && !with_irqfd) {
3503        event_notifier_set_handler(&vq->guest_notifier,
3504                                   virtio_queue_guest_notifier_read);
3505    } else {
3506        event_notifier_set_handler(&vq->guest_notifier, NULL);
3507    }
3508    if (!assign) {
3509        /* Test and clear notifier before closing it,
3510         * in case poll callback didn't have time to run. */
3511        virtio_queue_guest_notifier_read(&vq->guest_notifier);
3512    }
3513}
3514
3515EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
3516{
3517    return &vq->guest_notifier;
3518}
3519
3520static void virtio_queue_host_notifier_aio_read(EventNotifier *n)
3521{
3522    VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3523    if (event_notifier_test_and_clear(n)) {
3524        virtio_queue_notify_aio_vq(vq);
3525    }
3526}
3527
3528static void virtio_queue_host_notifier_aio_poll_begin(EventNotifier *n)
3529{
3530    VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3531
3532    virtio_queue_set_notification(vq, 0);
3533}
3534
3535static bool virtio_queue_host_notifier_aio_poll(void *opaque)
3536{
3537    EventNotifier *n = opaque;
3538    VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3539
3540    if (!vq->vring.desc || virtio_queue_empty(vq)) {
3541        return false;
3542    }
3543
3544    return virtio_queue_notify_aio_vq(vq);
3545}
3546
3547static void virtio_queue_host_notifier_aio_poll_end(EventNotifier *n)
3548{
3549    VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3550
3551    /* Caller polls once more after this to catch requests that race with us */
3552    virtio_queue_set_notification(vq, 1);
3553}
3554
3555void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx,
3556                                                VirtIOHandleAIOOutput handle_output)
3557{
3558    if (handle_output) {
3559        vq->handle_aio_output = handle_output;
3560        aio_set_event_notifier(ctx, &vq->host_notifier, true,
3561                               virtio_queue_host_notifier_aio_read,
3562                               virtio_queue_host_notifier_aio_poll);
3563        aio_set_event_notifier_poll(ctx, &vq->host_notifier,
3564                                    virtio_queue_host_notifier_aio_poll_begin,
3565                                    virtio_queue_host_notifier_aio_poll_end);
3566    } else {
3567        aio_set_event_notifier(ctx, &vq->host_notifier, true, NULL, NULL);
3568        /* Test and clear notifier before after disabling event,
3569         * in case poll callback didn't have time to run. */
3570        virtio_queue_host_notifier_aio_read(&vq->host_notifier);
3571        vq->handle_aio_output = NULL;
3572    }
3573}
3574
3575void virtio_queue_host_notifier_read(EventNotifier *n)
3576{
3577    VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
3578    if (event_notifier_test_and_clear(n)) {
3579        virtio_queue_notify_vq(vq);
3580    }
3581}
3582
3583EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
3584{
3585    return &vq->host_notifier;
3586}
3587
3588void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled)
3589{
3590    vq->host_notifier_enabled = enabled;
3591}
3592
3593int virtio_queue_set_host_notifier_mr(VirtIODevice *vdev, int n,
3594                                      MemoryRegion *mr, bool assign)
3595{
3596    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3597    VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
3598
3599    if (k->set_host_notifier_mr) {
3600        return k->set_host_notifier_mr(qbus->parent, n, mr, assign);
3601    }
3602
3603    return -1;
3604}
3605
3606void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name)
3607{
3608    g_free(vdev->bus_name);
3609    vdev->bus_name = g_strdup(bus_name);
3610}
3611
3612void GCC_FMT_ATTR(2, 3) virtio_error(VirtIODevice *vdev, const char *fmt, ...)
3613{
3614    va_list ap;
3615
3616    va_start(ap, fmt);
3617    error_vreport(fmt, ap);
3618    va_end(ap);
3619
3620    if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) {
3621        vdev->status = vdev->status | VIRTIO_CONFIG_S_NEEDS_RESET;
3622        virtio_notify_config(vdev);
3623    }
3624
3625    vdev->broken = true;
3626}
3627
3628static void virtio_memory_listener_commit(MemoryListener *listener)
3629{
3630    VirtIODevice *vdev = container_of(listener, VirtIODevice, listener);
3631    int i;
3632
3633    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
3634        if (vdev->vq[i].vring.num == 0) {
3635            break;
3636        }
3637        virtio_init_region_cache(vdev, i);
3638    }
3639}
3640
3641static void virtio_device_realize(DeviceState *dev, Error **errp)
3642{
3643    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
3644    VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
3645    Error *err = NULL;
3646
3647    /* Devices should either use vmsd or the load/save methods */
3648    assert(!vdc->vmsd || !vdc->load);
3649
3650    if (vdc->realize != NULL) {
3651        vdc->realize(dev, &err);
3652        if (err != NULL) {
3653            error_propagate(errp, err);
3654            return;
3655        }
3656    }
3657
3658    virtio_bus_device_plugged(vdev, &err);
3659    if (err != NULL) {
3660        error_propagate(errp, err);
3661        vdc->unrealize(dev);
3662        return;
3663    }
3664
3665    vdev->listener.commit = virtio_memory_listener_commit;
3666    memory_listener_register(&vdev->listener, vdev->dma_as);
3667}
3668
3669static void virtio_device_unrealize(DeviceState *dev)
3670{
3671    VirtIODevice *vdev = VIRTIO_DEVICE(dev);
3672    VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
3673
3674    memory_listener_unregister(&vdev->listener);
3675    virtio_bus_device_unplugged(vdev);
3676
3677    if (vdc->unrealize != NULL) {
3678        vdc->unrealize(dev);
3679    }
3680
3681    g_free(vdev->bus_name);
3682    vdev->bus_name = NULL;
3683}
3684
3685static void virtio_device_free_virtqueues(VirtIODevice *vdev)
3686{
3687    int i;
3688    if (!vdev->vq) {
3689        return;
3690    }
3691
3692    for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
3693        if (vdev->vq[i].vring.num == 0) {
3694            break;
3695        }
3696        virtio_virtqueue_reset_region_cache(&vdev->vq[i]);
3697    }
3698    g_free(vdev->vq);
3699}
3700
3701static void virtio_device_instance_finalize(Object *obj)
3702{
3703    VirtIODevice *vdev = VIRTIO_DEVICE(obj);
3704
3705    virtio_device_free_virtqueues(vdev);
3706
3707    g_free(vdev->config);
3708    g_free(vdev->vector_queues);
3709}
3710
3711static Property virtio_properties[] = {
3712    DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features),
3713    DEFINE_PROP_BOOL("use-started", VirtIODevice, use_started, true),
3714    DEFINE_PROP_BOOL("use-disabled-flag", VirtIODevice, use_disabled_flag, true),
3715    DEFINE_PROP_BOOL("x-disable-legacy-check", VirtIODevice,
3716                     disable_legacy_check, false),
3717    DEFINE_PROP_END_OF_LIST(),
3718};
3719
3720static int virtio_device_start_ioeventfd_impl(VirtIODevice *vdev)
3721{
3722    VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
3723    int i, n, r, err;
3724
3725    /*
3726     * Batch all the host notifiers in a single transaction to avoid
3727     * quadratic time complexity in address_space_update_ioeventfds().
3728     */
3729    memory_region_transaction_begin();
3730    for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
3731        VirtQueue *vq = &vdev->vq[n];
3732        if (!virtio_queue_get_num(vdev, n)) {
3733            continue;
3734        }
3735        r = virtio_bus_set_host_notifier(qbus, n, true);
3736        if (r < 0) {
3737            err = r;
3738            goto assign_error;
3739        }
3740        event_notifier_set_handler(&vq->host_notifier,
3741                                   virtio_queue_host_notifier_read);
3742    }
3743
3744    for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
3745        /* Kick right away to begin processing requests already in vring */
3746        VirtQueue *vq = &vdev->vq[n];
3747        if (!vq->vring.num) {
3748            continue;
3749        }
3750        event_notifier_set(&vq->host_notifier);
3751    }
3752    memory_region_transaction_commit();
3753    return 0;
3754
3755assign_error:
3756    i = n; /* save n for a second iteration after transaction is committed. */
3757    while (--n >= 0) {
3758        VirtQueue *vq = &vdev->vq[n];
3759        if (!virtio_queue_get_num(vdev, n)) {
3760            continue;
3761        }
3762
3763        event_notifier_set_handler(&vq->host_notifier, NULL);
3764        r = virtio_bus_set_host_notifier(qbus, n, false);
3765        assert(r >= 0);
3766    }
3767    /*
3768     * The transaction expects the ioeventfds to be open when it
3769     * commits. Do it now, before the cleanup loop.
3770     */
3771    memory_region_transaction_commit();
3772
3773    while (--i >= 0) {
3774        if (!virtio_queue_get_num(vdev, i)) {
3775            continue;
3776        }
3777        virtio_bus_cleanup_host_notifier(qbus, i);
3778    }
3779    return err;
3780}
3781
3782int virtio_device_start_ioeventfd(VirtIODevice *vdev)
3783{
3784    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3785    VirtioBusState *vbus = VIRTIO_BUS(qbus);
3786
3787    return virtio_bus_start_ioeventfd(vbus);
3788}
3789
3790static void virtio_device_stop_ioeventfd_impl(VirtIODevice *vdev)
3791{
3792    VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
3793    int n, r;
3794
3795    /*
3796     * Batch all the host notifiers in a single transaction to avoid
3797     * quadratic time complexity in address_space_update_ioeventfds().
3798     */
3799    memory_region_transaction_begin();
3800    for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
3801        VirtQueue *vq = &vdev->vq[n];
3802
3803        if (!virtio_queue_get_num(vdev, n)) {
3804            continue;
3805        }
3806        event_notifier_set_handler(&vq->host_notifier, NULL);
3807        r = virtio_bus_set_host_notifier(qbus, n, false);
3808        assert(r >= 0);
3809    }
3810    /*
3811     * The transaction expects the ioeventfds to be open when it
3812     * commits. Do it now, before the cleanup loop.
3813     */
3814    memory_region_transaction_commit();
3815
3816    for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
3817        if (!virtio_queue_get_num(vdev, n)) {
3818            continue;
3819        }
3820        virtio_bus_cleanup_host_notifier(qbus, n);
3821    }
3822}
3823
3824int virtio_device_grab_ioeventfd(VirtIODevice *vdev)
3825{
3826    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3827    VirtioBusState *vbus = VIRTIO_BUS(qbus);
3828
3829    return virtio_bus_grab_ioeventfd(vbus);
3830}
3831
3832void virtio_device_release_ioeventfd(VirtIODevice *vdev)
3833{
3834    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3835    VirtioBusState *vbus = VIRTIO_BUS(qbus);
3836
3837    virtio_bus_release_ioeventfd(vbus);
3838}
3839
3840static void virtio_device_class_init(ObjectClass *klass, void *data)
3841{
3842    /* Set the default value here. */
3843    VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
3844    DeviceClass *dc = DEVICE_CLASS(klass);
3845
3846    dc->realize = virtio_device_realize;
3847    dc->unrealize = virtio_device_unrealize;
3848    dc->bus_type = TYPE_VIRTIO_BUS;
3849    device_class_set_props(dc, virtio_properties);
3850    vdc->start_ioeventfd = virtio_device_start_ioeventfd_impl;
3851    vdc->stop_ioeventfd = virtio_device_stop_ioeventfd_impl;
3852
3853    vdc->legacy_features |= VIRTIO_LEGACY_FEATURES;
3854}
3855
3856bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev)
3857{
3858    BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
3859    VirtioBusState *vbus = VIRTIO_BUS(qbus);
3860
3861    return virtio_bus_ioeventfd_enabled(vbus);
3862}
3863
3864static const TypeInfo virtio_device_info = {
3865    .name = TYPE_VIRTIO_DEVICE,
3866    .parent = TYPE_DEVICE,
3867    .instance_size = sizeof(VirtIODevice),
3868    .class_init = virtio_device_class_init,
3869    .instance_finalize = virtio_device_instance_finalize,
3870    .abstract = true,
3871    .class_size = sizeof(VirtioDeviceClass),
3872};
3873
3874static void virtio_register_types(void)
3875{
3876    type_register_static(&virtio_device_info);
3877}
3878
3879type_init(virtio_register_types)
3880