qemu/subprojects/libvhost-user/libvhost-user.h
<<
>>
Prefs
   1/*
   2 * Vhost User library
   3 *
   4 * Copyright (c) 2016 Red Hat, Inc.
   5 *
   6 * Authors:
   7 *  Victor Kaplansky <victork@redhat.com>
   8 *  Marc-André Lureau <mlureau@redhat.com>
   9 *
  10 * This work is licensed under the terms of the GNU GPL, version 2 or
  11 * later.  See the COPYING file in the top-level directory.
  12 */
  13
  14#ifndef LIBVHOST_USER_H
  15#define LIBVHOST_USER_H
  16
  17#include <stdint.h>
  18#include <stdbool.h>
  19#include <stddef.h>
  20#include <poll.h>
  21#include <linux/vhost.h>
  22#include <pthread.h>
  23#include "standard-headers/linux/virtio_ring.h"
  24
  25/* Based on qemu/hw/virtio/vhost-user.c */
  26#define VHOST_USER_F_PROTOCOL_FEATURES 30
  27#define VHOST_LOG_PAGE 4096
  28
  29#define VIRTQUEUE_MAX_SIZE 1024
  30
  31#define VHOST_MEMORY_BASELINE_NREGIONS 8
  32
  33/*
  34 * Set a reasonable maximum number of ram slots, which will be supported by
  35 * any architecture.
  36 */
  37#define VHOST_USER_MAX_RAM_SLOTS 32
  38
  39#define VHOST_USER_HDR_SIZE offsetof(VhostUserMsg, payload.u64)
  40
  41typedef enum VhostSetConfigType {
  42    VHOST_SET_CONFIG_TYPE_MASTER = 0,
  43    VHOST_SET_CONFIG_TYPE_MIGRATION = 1,
  44} VhostSetConfigType;
  45
  46/*
  47 * Maximum size of virtio device config space
  48 */
  49#define VHOST_USER_MAX_CONFIG_SIZE 256
  50
  51enum VhostUserProtocolFeature {
  52    VHOST_USER_PROTOCOL_F_MQ = 0,
  53    VHOST_USER_PROTOCOL_F_LOG_SHMFD = 1,
  54    VHOST_USER_PROTOCOL_F_RARP = 2,
  55    VHOST_USER_PROTOCOL_F_REPLY_ACK = 3,
  56    VHOST_USER_PROTOCOL_F_NET_MTU = 4,
  57    VHOST_USER_PROTOCOL_F_SLAVE_REQ = 5,
  58    VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6,
  59    VHOST_USER_PROTOCOL_F_CRYPTO_SESSION = 7,
  60    VHOST_USER_PROTOCOL_F_PAGEFAULT = 8,
  61    VHOST_USER_PROTOCOL_F_CONFIG = 9,
  62    VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD = 10,
  63    VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11,
  64    VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12,
  65    VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS = 14,
  66    VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS = 15,
  67
  68    VHOST_USER_PROTOCOL_F_MAX
  69};
  70
  71#define VHOST_USER_PROTOCOL_FEATURE_MASK ((1 << VHOST_USER_PROTOCOL_F_MAX) - 1)
  72
  73typedef enum VhostUserRequest {
  74    VHOST_USER_NONE = 0,
  75    VHOST_USER_GET_FEATURES = 1,
  76    VHOST_USER_SET_FEATURES = 2,
  77    VHOST_USER_SET_OWNER = 3,
  78    VHOST_USER_RESET_OWNER = 4,
  79    VHOST_USER_SET_MEM_TABLE = 5,
  80    VHOST_USER_SET_LOG_BASE = 6,
  81    VHOST_USER_SET_LOG_FD = 7,
  82    VHOST_USER_SET_VRING_NUM = 8,
  83    VHOST_USER_SET_VRING_ADDR = 9,
  84    VHOST_USER_SET_VRING_BASE = 10,
  85    VHOST_USER_GET_VRING_BASE = 11,
  86    VHOST_USER_SET_VRING_KICK = 12,
  87    VHOST_USER_SET_VRING_CALL = 13,
  88    VHOST_USER_SET_VRING_ERR = 14,
  89    VHOST_USER_GET_PROTOCOL_FEATURES = 15,
  90    VHOST_USER_SET_PROTOCOL_FEATURES = 16,
  91    VHOST_USER_GET_QUEUE_NUM = 17,
  92    VHOST_USER_SET_VRING_ENABLE = 18,
  93    VHOST_USER_SEND_RARP = 19,
  94    VHOST_USER_NET_SET_MTU = 20,
  95    VHOST_USER_SET_SLAVE_REQ_FD = 21,
  96    VHOST_USER_IOTLB_MSG = 22,
  97    VHOST_USER_SET_VRING_ENDIAN = 23,
  98    VHOST_USER_GET_CONFIG = 24,
  99    VHOST_USER_SET_CONFIG = 25,
 100    VHOST_USER_CREATE_CRYPTO_SESSION = 26,
 101    VHOST_USER_CLOSE_CRYPTO_SESSION = 27,
 102    VHOST_USER_POSTCOPY_ADVISE  = 28,
 103    VHOST_USER_POSTCOPY_LISTEN  = 29,
 104    VHOST_USER_POSTCOPY_END     = 30,
 105    VHOST_USER_GET_INFLIGHT_FD = 31,
 106    VHOST_USER_SET_INFLIGHT_FD = 32,
 107    VHOST_USER_GPU_SET_SOCKET = 33,
 108    VHOST_USER_VRING_KICK = 35,
 109    VHOST_USER_GET_MAX_MEM_SLOTS = 36,
 110    VHOST_USER_ADD_MEM_REG = 37,
 111    VHOST_USER_REM_MEM_REG = 38,
 112    VHOST_USER_MAX
 113} VhostUserRequest;
 114
 115typedef enum VhostUserSlaveRequest {
 116    VHOST_USER_SLAVE_NONE = 0,
 117    VHOST_USER_SLAVE_IOTLB_MSG = 1,
 118    VHOST_USER_SLAVE_CONFIG_CHANGE_MSG = 2,
 119    VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG = 3,
 120    VHOST_USER_SLAVE_VRING_CALL = 4,
 121    VHOST_USER_SLAVE_VRING_ERR = 5,
 122    VHOST_USER_SLAVE_MAX
 123}  VhostUserSlaveRequest;
 124
 125typedef struct VhostUserMemoryRegion {
 126    uint64_t guest_phys_addr;
 127    uint64_t memory_size;
 128    uint64_t userspace_addr;
 129    uint64_t mmap_offset;
 130} VhostUserMemoryRegion;
 131
 132typedef struct VhostUserMemory {
 133    uint32_t nregions;
 134    uint32_t padding;
 135    VhostUserMemoryRegion regions[VHOST_MEMORY_BASELINE_NREGIONS];
 136} VhostUserMemory;
 137
 138typedef struct VhostUserMemRegMsg {
 139    uint64_t padding;
 140    VhostUserMemoryRegion region;
 141} VhostUserMemRegMsg;
 142
 143typedef struct VhostUserLog {
 144    uint64_t mmap_size;
 145    uint64_t mmap_offset;
 146} VhostUserLog;
 147
 148typedef struct VhostUserConfig {
 149    uint32_t offset;
 150    uint32_t size;
 151    uint32_t flags;
 152    uint8_t region[VHOST_USER_MAX_CONFIG_SIZE];
 153} VhostUserConfig;
 154
 155static VhostUserConfig c __attribute__ ((unused));
 156#define VHOST_USER_CONFIG_HDR_SIZE (sizeof(c.offset) \
 157                                   + sizeof(c.size) \
 158                                   + sizeof(c.flags))
 159
 160typedef struct VhostUserVringArea {
 161    uint64_t u64;
 162    uint64_t size;
 163    uint64_t offset;
 164} VhostUserVringArea;
 165
 166typedef struct VhostUserInflight {
 167    uint64_t mmap_size;
 168    uint64_t mmap_offset;
 169    uint16_t num_queues;
 170    uint16_t queue_size;
 171} VhostUserInflight;
 172
 173#if defined(_WIN32) && (defined(__x86_64__) || defined(__i386__))
 174# define VU_PACKED __attribute__((gcc_struct, packed))
 175#else
 176# define VU_PACKED __attribute__((packed))
 177#endif
 178
 179typedef struct VhostUserMsg {
 180    int request;
 181
 182#define VHOST_USER_VERSION_MASK     (0x3)
 183#define VHOST_USER_REPLY_MASK       (0x1 << 2)
 184#define VHOST_USER_NEED_REPLY_MASK  (0x1 << 3)
 185    uint32_t flags;
 186    uint32_t size; /* the following payload size */
 187
 188    union {
 189#define VHOST_USER_VRING_IDX_MASK   (0xff)
 190#define VHOST_USER_VRING_NOFD_MASK  (0x1 << 8)
 191        uint64_t u64;
 192        struct vhost_vring_state state;
 193        struct vhost_vring_addr addr;
 194        VhostUserMemory memory;
 195        VhostUserMemRegMsg memreg;
 196        VhostUserLog log;
 197        VhostUserConfig config;
 198        VhostUserVringArea area;
 199        VhostUserInflight inflight;
 200    } payload;
 201
 202    int fds[VHOST_MEMORY_BASELINE_NREGIONS];
 203    int fd_num;
 204    uint8_t *data;
 205} VU_PACKED VhostUserMsg;
 206
 207typedef struct VuDevRegion {
 208    /* Guest Physical address. */
 209    uint64_t gpa;
 210    /* Memory region size. */
 211    uint64_t size;
 212    /* QEMU virtual address (userspace). */
 213    uint64_t qva;
 214    /* Starting offset in our mmaped space. */
 215    uint64_t mmap_offset;
 216    /* Start address of mmaped space. */
 217    uint64_t mmap_addr;
 218} VuDevRegion;
 219
 220typedef struct VuDev VuDev;
 221
 222typedef uint64_t (*vu_get_features_cb) (VuDev *dev);
 223typedef void (*vu_set_features_cb) (VuDev *dev, uint64_t features);
 224typedef int (*vu_process_msg_cb) (VuDev *dev, VhostUserMsg *vmsg,
 225                                  int *do_reply);
 226typedef bool (*vu_read_msg_cb) (VuDev *dev, int sock, VhostUserMsg *vmsg);
 227typedef void (*vu_queue_set_started_cb) (VuDev *dev, int qidx, bool started);
 228typedef bool (*vu_queue_is_processed_in_order_cb) (VuDev *dev, int qidx);
 229typedef int (*vu_get_config_cb) (VuDev *dev, uint8_t *config, uint32_t len);
 230typedef int (*vu_set_config_cb) (VuDev *dev, const uint8_t *data,
 231                                 uint32_t offset, uint32_t size,
 232                                 uint32_t flags);
 233
 234typedef struct VuDevIface {
 235    /* called by VHOST_USER_GET_FEATURES to get the features bitmask */
 236    vu_get_features_cb get_features;
 237    /* enable vhost implementation features */
 238    vu_set_features_cb set_features;
 239    /* get the protocol feature bitmask from the underlying vhost
 240     * implementation */
 241    vu_get_features_cb get_protocol_features;
 242    /* enable protocol features in the underlying vhost implementation. */
 243    vu_set_features_cb set_protocol_features;
 244    /* process_msg is called for each vhost-user message received */
 245    /* skip libvhost-user processing if return value != 0 */
 246    vu_process_msg_cb process_msg;
 247    /* tells when queues can be processed */
 248    vu_queue_set_started_cb queue_set_started;
 249    /*
 250     * If the queue is processed in order, in which case it will be
 251     * resumed to vring.used->idx. This can help to support resuming
 252     * on unmanaged exit/crash.
 253     */
 254    vu_queue_is_processed_in_order_cb queue_is_processed_in_order;
 255    /* get the config space of the device */
 256    vu_get_config_cb get_config;
 257    /* set the config space of the device */
 258    vu_set_config_cb set_config;
 259} VuDevIface;
 260
 261typedef void (*vu_queue_handler_cb) (VuDev *dev, int qidx);
 262
 263typedef struct VuRing {
 264    unsigned int num;
 265    struct vring_desc *desc;
 266    struct vring_avail *avail;
 267    struct vring_used *used;
 268    uint64_t log_guest_addr;
 269    uint32_t flags;
 270} VuRing;
 271
 272typedef struct VuDescStateSplit {
 273    /* Indicate whether this descriptor is inflight or not.
 274     * Only available for head-descriptor. */
 275    uint8_t inflight;
 276
 277    /* Padding */
 278    uint8_t padding[5];
 279
 280    /* Maintain a list for the last batch of used descriptors.
 281     * Only available when batching is used for submitting */
 282    uint16_t next;
 283
 284    /* Used to preserve the order of fetching available descriptors.
 285     * Only available for head-descriptor. */
 286    uint64_t counter;
 287} VuDescStateSplit;
 288
 289typedef struct VuVirtqInflight {
 290    /* The feature flags of this region. Now it's initialized to 0. */
 291    uint64_t features;
 292
 293    /* The version of this region. It's 1 currently.
 294     * Zero value indicates a vm reset happened. */
 295    uint16_t version;
 296
 297    /* The size of VuDescStateSplit array. It's equal to the virtqueue
 298     * size. Slave could get it from queue size field of VhostUserInflight. */
 299    uint16_t desc_num;
 300
 301    /* The head of list that track the last batch of used descriptors. */
 302    uint16_t last_batch_head;
 303
 304    /* Storing the idx value of used ring */
 305    uint16_t used_idx;
 306
 307    /* Used to track the state of each descriptor in descriptor table */
 308    VuDescStateSplit desc[];
 309} VuVirtqInflight;
 310
 311typedef struct VuVirtqInflightDesc {
 312    uint16_t index;
 313    uint64_t counter;
 314} VuVirtqInflightDesc;
 315
 316typedef struct VuVirtq {
 317    VuRing vring;
 318
 319    VuVirtqInflight *inflight;
 320
 321    VuVirtqInflightDesc *resubmit_list;
 322
 323    uint16_t resubmit_num;
 324
 325    uint64_t counter;
 326
 327    /* Next head to pop */
 328    uint16_t last_avail_idx;
 329
 330    /* Last avail_idx read from VQ. */
 331    uint16_t shadow_avail_idx;
 332
 333    uint16_t used_idx;
 334
 335    /* Last used index value we have signalled on */
 336    uint16_t signalled_used;
 337
 338    /* Last used index value we have signalled on */
 339    bool signalled_used_valid;
 340
 341    /* Notification enabled? */
 342    bool notification;
 343
 344    int inuse;
 345
 346    vu_queue_handler_cb handler;
 347
 348    int call_fd;
 349    int kick_fd;
 350    int err_fd;
 351    unsigned int enable;
 352    bool started;
 353
 354    /* Guest addresses of our ring */
 355    struct vhost_vring_addr vra;
 356} VuVirtq;
 357
 358enum VuWatchCondtion {
 359    VU_WATCH_IN = POLLIN,
 360    VU_WATCH_OUT = POLLOUT,
 361    VU_WATCH_PRI = POLLPRI,
 362    VU_WATCH_ERR = POLLERR,
 363    VU_WATCH_HUP = POLLHUP,
 364};
 365
 366typedef void (*vu_panic_cb) (VuDev *dev, const char *err);
 367typedef void (*vu_watch_cb) (VuDev *dev, int condition, void *data);
 368typedef void (*vu_set_watch_cb) (VuDev *dev, int fd, int condition,
 369                                 vu_watch_cb cb, void *data);
 370typedef void (*vu_remove_watch_cb) (VuDev *dev, int fd);
 371
 372typedef struct VuDevInflightInfo {
 373    int fd;
 374    void *addr;
 375    uint64_t size;
 376} VuDevInflightInfo;
 377
 378struct VuDev {
 379    int sock;
 380    uint32_t nregions;
 381    VuDevRegion regions[VHOST_USER_MAX_RAM_SLOTS];
 382    VuVirtq *vq;
 383    VuDevInflightInfo inflight_info;
 384    int log_call_fd;
 385    /* Must be held while using slave_fd */
 386    pthread_mutex_t slave_mutex;
 387    int slave_fd;
 388    uint64_t log_size;
 389    uint8_t *log_table;
 390    uint64_t features;
 391    uint64_t protocol_features;
 392    bool broken;
 393    uint16_t max_queues;
 394
 395    /*
 396     * @read_msg: custom method to read vhost-user message
 397     *
 398     * Read data from vhost_user socket fd and fill up
 399     * the passed VhostUserMsg *vmsg struct.
 400     *
 401     * If reading fails, it should close the received set of file
 402     * descriptors as socket message's auxiliary data.
 403     *
 404     * For the details, please refer to vu_message_read in libvhost-user.c
 405     * which will be used by default if not custom method is provided when
 406     * calling vu_init
 407     *
 408     * Returns: true if vhost-user message successfully received,
 409     *          otherwise return false.
 410     *
 411     */
 412    vu_read_msg_cb read_msg;
 413
 414    /*
 415     * @set_watch: add or update the given fd to the watch set,
 416     * call cb when condition is met.
 417     */
 418    vu_set_watch_cb set_watch;
 419
 420    /* @remove_watch: remove the given fd from the watch set */
 421    vu_remove_watch_cb remove_watch;
 422
 423    /*
 424     * @panic: encountered an unrecoverable error, you may try to re-initialize
 425     */
 426    vu_panic_cb panic;
 427    const VuDevIface *iface;
 428
 429    /* Postcopy data */
 430    int postcopy_ufd;
 431    bool postcopy_listening;
 432};
 433
 434typedef struct VuVirtqElement {
 435    unsigned int index;
 436    unsigned int out_num;
 437    unsigned int in_num;
 438    struct iovec *in_sg;
 439    struct iovec *out_sg;
 440} VuVirtqElement;
 441
 442/**
 443 * vu_init:
 444 * @dev: a VuDev context
 445 * @max_queues: maximum number of virtqueues
 446 * @socket: the socket connected to vhost-user master
 447 * @panic: a panic callback
 448 * @set_watch: a set_watch callback
 449 * @remove_watch: a remove_watch callback
 450 * @iface: a VuDevIface structure with vhost-user device callbacks
 451 *
 452 * Initializes a VuDev vhost-user context.
 453 *
 454 * Returns: true on success, false on failure.
 455 **/
 456bool vu_init(VuDev *dev,
 457             uint16_t max_queues,
 458             int socket,
 459             vu_panic_cb panic,
 460             vu_read_msg_cb read_msg,
 461             vu_set_watch_cb set_watch,
 462             vu_remove_watch_cb remove_watch,
 463             const VuDevIface *iface);
 464
 465
 466/**
 467 * vu_deinit:
 468 * @dev: a VuDev context
 469 *
 470 * Cleans up the VuDev context
 471 */
 472void vu_deinit(VuDev *dev);
 473
 474/**
 475 * vu_dispatch:
 476 * @dev: a VuDev context
 477 *
 478 * Process one vhost-user message.
 479 *
 480 * Returns: TRUE on success, FALSE on failure.
 481 */
 482bool vu_dispatch(VuDev *dev);
 483
 484/**
 485 * vu_gpa_to_va:
 486 * @dev: a VuDev context
 487 * @plen: guest memory size
 488 * @guest_addr: guest address
 489 *
 490 * Translate a guest address to a pointer. Returns NULL on failure.
 491 */
 492void *vu_gpa_to_va(VuDev *dev, uint64_t *plen, uint64_t guest_addr);
 493
 494/**
 495 * vu_get_queue:
 496 * @dev: a VuDev context
 497 * @qidx: queue index
 498 *
 499 * Returns the queue number @qidx.
 500 */
 501VuVirtq *vu_get_queue(VuDev *dev, int qidx);
 502
 503/**
 504 * vu_set_queue_handler:
 505 * @dev: a VuDev context
 506 * @vq: a VuVirtq queue
 507 * @handler: the queue handler callback
 508 *
 509 * Set the queue handler. This function may be called several times
 510 * for the same queue. If called with NULL @handler, the handler is
 511 * removed.
 512 */
 513void vu_set_queue_handler(VuDev *dev, VuVirtq *vq,
 514                          vu_queue_handler_cb handler);
 515
 516/**
 517 * vu_set_queue_host_notifier:
 518 * @dev: a VuDev context
 519 * @vq: a VuVirtq queue
 520 * @fd: a file descriptor
 521 * @size: host page size
 522 * @offset: notifier offset in @fd file
 523 *
 524 * Set queue's host notifier. This function may be called several
 525 * times for the same queue. If called with -1 @fd, the notifier
 526 * is removed.
 527 */
 528bool vu_set_queue_host_notifier(VuDev *dev, VuVirtq *vq, int fd,
 529                                int size, int offset);
 530
 531/**
 532 * vu_queue_set_notification:
 533 * @dev: a VuDev context
 534 * @vq: a VuVirtq queue
 535 * @enable: state
 536 *
 537 * Set whether the queue notifies (via event index or interrupt)
 538 */
 539void vu_queue_set_notification(VuDev *dev, VuVirtq *vq, int enable);
 540
 541/**
 542 * vu_queue_enabled:
 543 * @dev: a VuDev context
 544 * @vq: a VuVirtq queue
 545 *
 546 * Returns: whether the queue is enabled.
 547 */
 548bool vu_queue_enabled(VuDev *dev, VuVirtq *vq);
 549
 550/**
 551 * vu_queue_started:
 552 * @dev: a VuDev context
 553 * @vq: a VuVirtq queue
 554 *
 555 * Returns: whether the queue is started.
 556 */
 557bool vu_queue_started(const VuDev *dev, const VuVirtq *vq);
 558
 559/**
 560 * vu_queue_empty:
 561 * @dev: a VuDev context
 562 * @vq: a VuVirtq queue
 563 *
 564 * Returns: true if the queue is empty or not ready.
 565 */
 566bool vu_queue_empty(VuDev *dev, VuVirtq *vq);
 567
 568/**
 569 * vu_queue_notify:
 570 * @dev: a VuDev context
 571 * @vq: a VuVirtq queue
 572 *
 573 * Request to notify the queue via callfd (skipped if unnecessary)
 574 */
 575void vu_queue_notify(VuDev *dev, VuVirtq *vq);
 576
 577/**
 578 * vu_queue_notify_sync:
 579 * @dev: a VuDev context
 580 * @vq: a VuVirtq queue
 581 *
 582 * Request to notify the queue via callfd (skipped if unnecessary)
 583 * or sync message if possible.
 584 */
 585void vu_queue_notify_sync(VuDev *dev, VuVirtq *vq);
 586
 587/**
 588 * vu_queue_pop:
 589 * @dev: a VuDev context
 590 * @vq: a VuVirtq queue
 591 * @sz: the size of struct to return (must be >= VuVirtqElement)
 592 *
 593 * Returns: a VuVirtqElement filled from the queue or NULL. The
 594 * returned element must be free()-d by the caller.
 595 */
 596void *vu_queue_pop(VuDev *dev, VuVirtq *vq, size_t sz);
 597
 598
 599/**
 600 * vu_queue_unpop:
 601 * @dev: a VuDev context
 602 * @vq: a VuVirtq queue
 603 * @elem: The #VuVirtqElement
 604 * @len: number of bytes written
 605 *
 606 * Pretend the most recent element wasn't popped from the virtqueue.  The next
 607 * call to vu_queue_pop() will refetch the element.
 608 */
 609void vu_queue_unpop(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem,
 610                    size_t len);
 611
 612/**
 613 * vu_queue_rewind:
 614 * @dev: a VuDev context
 615 * @vq: a VuVirtq queue
 616 * @num: number of elements to push back
 617 *
 618 * Pretend that elements weren't popped from the virtqueue.  The next
 619 * virtqueue_pop() will refetch the oldest element.
 620 *
 621 * Returns: true on success, false if @num is greater than the number of in use
 622 * elements.
 623 */
 624bool vu_queue_rewind(VuDev *dev, VuVirtq *vq, unsigned int num);
 625
 626/**
 627 * vu_queue_fill:
 628 * @dev: a VuDev context
 629 * @vq: a VuVirtq queue
 630 * @elem: a VuVirtqElement
 631 * @len: length in bytes to write
 632 * @idx: optional offset for the used ring index (0 in general)
 633 *
 634 * Fill the used ring with @elem element.
 635 */
 636void vu_queue_fill(VuDev *dev, VuVirtq *vq,
 637                   const VuVirtqElement *elem,
 638                   unsigned int len, unsigned int idx);
 639
 640/**
 641 * vu_queue_push:
 642 * @dev: a VuDev context
 643 * @vq: a VuVirtq queue
 644 * @elem: a VuVirtqElement
 645 * @len: length in bytes to write
 646 *
 647 * Helper that combines vu_queue_fill() with a vu_queue_flush().
 648 */
 649void vu_queue_push(VuDev *dev, VuVirtq *vq,
 650                   const VuVirtqElement *elem, unsigned int len);
 651
 652/**
 653 * vu_queue_flush:
 654 * @dev: a VuDev context
 655 * @vq: a VuVirtq queue
 656 * @num: number of elements to flush
 657 *
 658 * Mark the last number of elements as done (used.idx is updated by
 659 * num elements).
 660*/
 661void vu_queue_flush(VuDev *dev, VuVirtq *vq, unsigned int num);
 662
 663/**
 664 * vu_queue_get_avail_bytes:
 665 * @dev: a VuDev context
 666 * @vq: a VuVirtq queue
 667 * @in_bytes: in bytes
 668 * @out_bytes: out bytes
 669 * @max_in_bytes: stop counting after max_in_bytes
 670 * @max_out_bytes: stop counting after max_out_bytes
 671 *
 672 * Count the number of available bytes, up to max_in_bytes/max_out_bytes.
 673 */
 674void vu_queue_get_avail_bytes(VuDev *vdev, VuVirtq *vq, unsigned int *in_bytes,
 675                              unsigned int *out_bytes,
 676                              unsigned max_in_bytes, unsigned max_out_bytes);
 677
 678/**
 679 * vu_queue_avail_bytes:
 680 * @dev: a VuDev context
 681 * @vq: a VuVirtq queue
 682 * @in_bytes: expected in bytes
 683 * @out_bytes: expected out bytes
 684 *
 685 * Returns: true if in_bytes <= in_total && out_bytes <= out_total
 686 */
 687bool vu_queue_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int in_bytes,
 688                          unsigned int out_bytes);
 689
 690#endif /* LIBVHOST_USER_H */
 691