dpdk/lib/vhost/vhost.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: BSD-3-Clause
   2 * Copyright(c) 2010-2018 Intel Corporation
   3 */
   4
   5#ifndef _VHOST_NET_CDEV_H_
   6#define _VHOST_NET_CDEV_H_
   7#include <stdint.h>
   8#include <stdio.h>
   9#include <stdbool.h>
  10#include <sys/types.h>
  11#include <sys/queue.h>
  12#include <unistd.h>
  13#include <linux/vhost.h>
  14#include <linux/virtio_net.h>
  15#include <sys/socket.h>
  16#include <linux/if.h>
  17
  18#include <rte_log.h>
  19#include <rte_ether.h>
  20#include <rte_rwlock.h>
  21#include <rte_malloc.h>
  22
  23#include "rte_vhost.h"
  24#include "rte_vdpa.h"
  25#include "rte_vdpa_dev.h"
  26
  27#include "rte_vhost_async.h"
  28
  29/* Used to indicate that the device is running on a data core */
  30#define VIRTIO_DEV_RUNNING ((uint32_t)1 << 0)
  31/* Used to indicate that the device is ready to operate */
  32#define VIRTIO_DEV_READY ((uint32_t)1 << 1)
  33/* Used to indicate that the built-in vhost net device backend is enabled */
  34#define VIRTIO_DEV_BUILTIN_VIRTIO_NET ((uint32_t)1 << 2)
  35/* Used to indicate that the device has its own data path and configured */
  36#define VIRTIO_DEV_VDPA_CONFIGURED ((uint32_t)1 << 3)
  37/* Used to indicate that the feature negotiation failed */
  38#define VIRTIO_DEV_FEATURES_FAILED ((uint32_t)1 << 4)
  39/* Used to indicate that the virtio_net tx code should fill TX ol_flags */
  40#define VIRTIO_DEV_LEGACY_OL_FLAGS ((uint32_t)1 << 5)
  41
  42/* Backend value set by guest. */
  43#define VIRTIO_DEV_STOPPED -1
  44
  45#define BUF_VECTOR_MAX 256
  46
  47#define VHOST_LOG_CACHE_NR 32
  48
  49#define MAX_PKT_BURST 32
  50
  51#define VHOST_MAX_ASYNC_IT (MAX_PKT_BURST * 2)
  52#define VHOST_MAX_ASYNC_VEC (BUF_VECTOR_MAX * 4)
  53
  54#define PACKED_DESC_ENQUEUE_USED_FLAG(w)        \
  55        ((w) ? (VRING_DESC_F_AVAIL | VRING_DESC_F_USED | VRING_DESC_F_WRITE) : \
  56                VRING_DESC_F_WRITE)
  57#define PACKED_DESC_DEQUEUE_USED_FLAG(w)        \
  58        ((w) ? (VRING_DESC_F_AVAIL | VRING_DESC_F_USED) : 0x0)
  59#define PACKED_DESC_SINGLE_DEQUEUE_FLAG (VRING_DESC_F_NEXT | \
  60                                         VRING_DESC_F_INDIRECT)
  61
  62#define PACKED_BATCH_SIZE (RTE_CACHE_LINE_SIZE / \
  63                            sizeof(struct vring_packed_desc))
  64#define PACKED_BATCH_MASK (PACKED_BATCH_SIZE - 1)
  65
  66#ifdef VHOST_GCC_UNROLL_PRAGMA
  67#define vhost_for_each_try_unroll(iter, val, size) _Pragma("GCC unroll 4") \
  68        for (iter = val; iter < size; iter++)
  69#endif
  70
  71#ifdef VHOST_CLANG_UNROLL_PRAGMA
  72#define vhost_for_each_try_unroll(iter, val, size) _Pragma("unroll 4") \
  73        for (iter = val; iter < size; iter++)
  74#endif
  75
  76#ifdef VHOST_ICC_UNROLL_PRAGMA
  77#define vhost_for_each_try_unroll(iter, val, size) _Pragma("unroll (4)") \
  78        for (iter = val; iter < size; iter++)
  79#endif
  80
  81#ifndef vhost_for_each_try_unroll
  82#define vhost_for_each_try_unroll(iter, val, num) \
  83        for (iter = val; iter < num; iter++)
  84#endif
  85
  86/**
  87 * Structure contains buffer address, length and descriptor index
  88 * from vring to do scatter RX.
  89 */
  90struct buf_vector {
  91        uint64_t buf_iova;
  92        uint64_t buf_addr;
  93        uint32_t buf_len;
  94        uint32_t desc_idx;
  95};
  96
  97/*
  98 * Structure contains the info for each batched memory copy.
  99 */
 100struct batch_copy_elem {
 101        void *dst;
 102        void *src;
 103        uint32_t len;
 104        uint64_t log_addr;
 105};
 106
 107/*
 108 * Structure that contains the info for batched dirty logging.
 109 */
 110struct log_cache_entry {
 111        uint32_t offset;
 112        unsigned long val;
 113};
 114
 115struct vring_used_elem_packed {
 116        uint16_t id;
 117        uint16_t flags;
 118        uint32_t len;
 119        uint32_t count;
 120};
 121
 122/**
 123 * Structure contains variables relevant to RX/TX virtqueues.
 124 */
 125struct vhost_virtqueue {
 126        union {
 127                struct vring_desc       *desc;
 128                struct vring_packed_desc   *desc_packed;
 129        };
 130        union {
 131                struct vring_avail      *avail;
 132                struct vring_packed_desc_event *driver_event;
 133        };
 134        union {
 135                struct vring_used       *used;
 136                struct vring_packed_desc_event *device_event;
 137        };
 138        uint16_t                size;
 139
 140        uint16_t                last_avail_idx;
 141        uint16_t                last_used_idx;
 142        /* Last used index we notify to front end. */
 143        uint16_t                signalled_used;
 144        bool                    signalled_used_valid;
 145#define VIRTIO_INVALID_EVENTFD          (-1)
 146#define VIRTIO_UNINITIALIZED_EVENTFD    (-2)
 147
 148        bool                    enabled;
 149        bool                    access_ok;
 150        bool                    ready;
 151
 152        rte_spinlock_t          access_lock;
 153
 154
 155        union {
 156                struct vring_used_elem  *shadow_used_split;
 157                struct vring_used_elem_packed *shadow_used_packed;
 158        };
 159        uint16_t                shadow_used_idx;
 160        /* Record packed ring enqueue latest desc cache aligned index */
 161        uint16_t                shadow_aligned_idx;
 162        /* Record packed ring first dequeue desc index */
 163        uint16_t                shadow_last_used_idx;
 164
 165        uint16_t                batch_copy_nb_elems;
 166        struct batch_copy_elem  *batch_copy_elems;
 167        bool                    used_wrap_counter;
 168        bool                    avail_wrap_counter;
 169
 170        /* Physical address of used ring, for logging */
 171        uint16_t                log_cache_nb_elem;
 172        uint64_t                log_guest_addr;
 173        struct log_cache_entry  *log_cache;
 174
 175        rte_rwlock_t    iotlb_lock;
 176        rte_rwlock_t    iotlb_pending_lock;
 177        struct rte_mempool *iotlb_pool;
 178        TAILQ_HEAD(, vhost_iotlb_entry) iotlb_list;
 179        TAILQ_HEAD(, vhost_iotlb_entry) iotlb_pending_list;
 180        int                             iotlb_cache_nr;
 181
 182        /* Used to notify the guest (trigger interrupt) */
 183        int                     callfd;
 184        /* Currently unused as polling mode is enabled */
 185        int                     kickfd;
 186
 187        /* inflight share memory info */
 188        union {
 189                struct rte_vhost_inflight_info_split *inflight_split;
 190                struct rte_vhost_inflight_info_packed *inflight_packed;
 191        };
 192        struct rte_vhost_resubmit_info *resubmit_inflight;
 193        uint64_t                global_counter;
 194
 195        /* operation callbacks for async dma */
 196        struct rte_vhost_async_channel_ops      async_ops;
 197
 198        struct rte_vhost_iov_iter *it_pool;
 199        struct iovec *vec_pool;
 200
 201        /* async data transfer status */
 202        struct async_inflight_info *async_pkts_info;
 203        uint16_t        async_pkts_idx;
 204        uint16_t        async_pkts_inflight_n;
 205        uint16_t        async_last_pkts_n;
 206        union {
 207                struct vring_used_elem  *async_descs_split;
 208                struct vring_used_elem_packed *async_buffers_packed;
 209        };
 210        union {
 211                uint16_t async_desc_idx_split;
 212                uint16_t async_buffer_idx_packed;
 213        };
 214        union {
 215                uint16_t last_async_desc_idx_split;
 216                uint16_t last_async_buffer_idx_packed;
 217        };
 218
 219        /* vq async features */
 220        bool            async_inorder;
 221        bool            async_registered;
 222        uint16_t        async_threshold;
 223
 224        int                     notif_enable;
 225#define VIRTIO_UNINITIALIZED_NOTIF      (-1)
 226
 227        struct vhost_vring_addr ring_addrs;
 228} __rte_cache_aligned;
 229
 230/* Virtio device status as per Virtio specification */
 231#define VIRTIO_DEVICE_STATUS_RESET              0x00
 232#define VIRTIO_DEVICE_STATUS_ACK                0x01
 233#define VIRTIO_DEVICE_STATUS_DRIVER             0x02
 234#define VIRTIO_DEVICE_STATUS_DRIVER_OK          0x04
 235#define VIRTIO_DEVICE_STATUS_FEATURES_OK        0x08
 236#define VIRTIO_DEVICE_STATUS_DEV_NEED_RESET     0x40
 237#define VIRTIO_DEVICE_STATUS_FAILED             0x80
 238
 239#define VHOST_MAX_VRING                 0x100
 240#define VHOST_MAX_QUEUE_PAIRS           0x80
 241
 242/* Declare IOMMU related bits for older kernels */
 243#ifndef VIRTIO_F_IOMMU_PLATFORM
 244
 245#define VIRTIO_F_IOMMU_PLATFORM 33
 246
 247struct vhost_iotlb_msg {
 248        __u64 iova;
 249        __u64 size;
 250        __u64 uaddr;
 251#define VHOST_ACCESS_RO      0x1
 252#define VHOST_ACCESS_WO      0x2
 253#define VHOST_ACCESS_RW      0x3
 254        __u8 perm;
 255#define VHOST_IOTLB_MISS           1
 256#define VHOST_IOTLB_UPDATE         2
 257#define VHOST_IOTLB_INVALIDATE     3
 258#define VHOST_IOTLB_ACCESS_FAIL    4
 259        __u8 type;
 260};
 261
 262#define VHOST_IOTLB_MSG 0x1
 263
 264struct vhost_msg {
 265        int type;
 266        union {
 267                struct vhost_iotlb_msg iotlb;
 268                __u8 padding[64];
 269        };
 270};
 271#endif
 272
 273/*
 274 * Define virtio 1.0 for older kernels
 275 */
 276#ifndef VIRTIO_F_VERSION_1
 277 #define VIRTIO_F_VERSION_1 32
 278#endif
 279
 280/* Declare packed ring related bits for older kernels */
 281#ifndef VIRTIO_F_RING_PACKED
 282
 283#define VIRTIO_F_RING_PACKED 34
 284
 285struct vring_packed_desc {
 286        uint64_t addr;
 287        uint32_t len;
 288        uint16_t id;
 289        uint16_t flags;
 290};
 291
 292struct vring_packed_desc_event {
 293        uint16_t off_wrap;
 294        uint16_t flags;
 295};
 296#endif
 297
 298/*
 299 * Declare below packed ring defines unconditionally
 300 * as Kernel header might use different names.
 301 */
 302#define VRING_DESC_F_AVAIL      (1ULL << 7)
 303#define VRING_DESC_F_USED       (1ULL << 15)
 304
 305#define VRING_EVENT_F_ENABLE 0x0
 306#define VRING_EVENT_F_DISABLE 0x1
 307#define VRING_EVENT_F_DESC 0x2
 308
 309/*
 310 * Available and used descs are in same order
 311 */
 312#ifndef VIRTIO_F_IN_ORDER
 313#define VIRTIO_F_IN_ORDER      35
 314#endif
 315
 316/* Features supported by this builtin vhost-user net driver. */
 317#define VIRTIO_NET_SUPPORTED_FEATURES ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | \
 318                                (1ULL << VIRTIO_F_ANY_LAYOUT) | \
 319                                (1ULL << VIRTIO_NET_F_CTRL_VQ) | \
 320                                (1ULL << VIRTIO_NET_F_CTRL_RX) | \
 321                                (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) | \
 322                                (1ULL << VIRTIO_NET_F_MQ)      | \
 323                                (1ULL << VIRTIO_F_VERSION_1)   | \
 324                                (1ULL << VHOST_F_LOG_ALL)      | \
 325                                (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \
 326                                (1ULL << VIRTIO_NET_F_GSO) | \
 327                                (1ULL << VIRTIO_NET_F_HOST_TSO4) | \
 328                                (1ULL << VIRTIO_NET_F_HOST_TSO6) | \
 329                                (1ULL << VIRTIO_NET_F_HOST_UFO) | \
 330                                (1ULL << VIRTIO_NET_F_HOST_ECN) | \
 331                                (1ULL << VIRTIO_NET_F_CSUM)    | \
 332                                (1ULL << VIRTIO_NET_F_GUEST_CSUM) | \
 333                                (1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
 334                                (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
 335                                (1ULL << VIRTIO_NET_F_GUEST_UFO) | \
 336                                (1ULL << VIRTIO_NET_F_GUEST_ECN) | \
 337                                (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | \
 338                                (1ULL << VIRTIO_RING_F_EVENT_IDX) | \
 339                                (1ULL << VIRTIO_NET_F_MTU)  | \
 340                                (1ULL << VIRTIO_F_IN_ORDER) | \
 341                                (1ULL << VIRTIO_F_IOMMU_PLATFORM) | \
 342                                (1ULL << VIRTIO_F_RING_PACKED))
 343
 344
 345struct guest_page {
 346        uint64_t guest_phys_addr;
 347        uint64_t host_phys_addr;
 348        uint64_t size;
 349};
 350
 351struct inflight_mem_info {
 352        int             fd;
 353        void            *addr;
 354        uint64_t        size;
 355};
 356
 357/**
 358 * Device structure contains all configuration information relating
 359 * to the device.
 360 */
 361struct virtio_net {
 362        /* Frontend (QEMU) memory and memory region information */
 363        struct rte_vhost_memory *mem;
 364        uint64_t                features;
 365        uint64_t                protocol_features;
 366        int                     vid;
 367        uint32_t                flags;
 368        uint16_t                vhost_hlen;
 369        /* to tell if we need broadcast rarp packet */
 370        int16_t                 broadcast_rarp;
 371        uint32_t                nr_vring;
 372        int                     async_copy;
 373        int                     extbuf;
 374        int                     linearbuf;
 375        struct vhost_virtqueue  *virtqueue[VHOST_MAX_QUEUE_PAIRS * 2];
 376        struct inflight_mem_info *inflight_info;
 377#define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
 378        char                    ifname[IF_NAME_SZ];
 379        uint64_t                log_size;
 380        uint64_t                log_base;
 381        uint64_t                log_addr;
 382        struct rte_ether_addr   mac;
 383        uint16_t                mtu;
 384        uint8_t                 status;
 385
 386        struct vhost_device_ops const *notify_ops;
 387
 388        uint32_t                nr_guest_pages;
 389        uint32_t                max_guest_pages;
 390        struct guest_page       *guest_pages;
 391
 392        int                     slave_req_fd;
 393        rte_spinlock_t          slave_req_lock;
 394
 395        int                     postcopy_ufd;
 396        int                     postcopy_listening;
 397
 398        struct rte_vdpa_device *vdpa_dev;
 399
 400        /* context data for the external message handlers */
 401        void                    *extern_data;
 402        /* pre and post vhost user message handlers for the device */
 403        struct rte_vhost_user_extern_ops extern_ops;
 404} __rte_cache_aligned;
 405
 406static __rte_always_inline bool
 407vq_is_packed(struct virtio_net *dev)
 408{
 409        return dev->features & (1ull << VIRTIO_F_RING_PACKED);
 410}
 411
 412static inline bool
 413desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
 414{
 415        uint16_t flags = __atomic_load_n(&desc->flags, __ATOMIC_ACQUIRE);
 416
 417        return wrap_counter == !!(flags & VRING_DESC_F_AVAIL) &&
 418                wrap_counter != !!(flags & VRING_DESC_F_USED);
 419}
 420
 421static inline void
 422vq_inc_last_used_packed(struct vhost_virtqueue *vq, uint16_t num)
 423{
 424        vq->last_used_idx += num;
 425        if (vq->last_used_idx >= vq->size) {
 426                vq->used_wrap_counter ^= 1;
 427                vq->last_used_idx -= vq->size;
 428        }
 429}
 430
 431static inline void
 432vq_inc_last_avail_packed(struct vhost_virtqueue *vq, uint16_t num)
 433{
 434        vq->last_avail_idx += num;
 435        if (vq->last_avail_idx >= vq->size) {
 436                vq->avail_wrap_counter ^= 1;
 437                vq->last_avail_idx -= vq->size;
 438        }
 439}
 440
 441void __vhost_log_cache_write(struct virtio_net *dev,
 442                struct vhost_virtqueue *vq,
 443                uint64_t addr, uint64_t len);
 444void __vhost_log_cache_write_iova(struct virtio_net *dev,
 445                struct vhost_virtqueue *vq,
 446                uint64_t iova, uint64_t len);
 447void __vhost_log_cache_sync(struct virtio_net *dev,
 448                struct vhost_virtqueue *vq);
 449void __vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len);
 450void __vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
 451                            uint64_t iova, uint64_t len);
 452
 453static __rte_always_inline void
 454vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
 455{
 456        if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
 457                __vhost_log_write(dev, addr, len);
 458}
 459
 460static __rte_always_inline void
 461vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq)
 462{
 463        if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
 464                __vhost_log_cache_sync(dev, vq);
 465}
 466
 467static __rte_always_inline void
 468vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq,
 469                        uint64_t addr, uint64_t len)
 470{
 471        if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL)))
 472                __vhost_log_cache_write(dev, vq, addr, len);
 473}
 474
 475static __rte_always_inline void
 476vhost_log_cache_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
 477                        uint64_t offset, uint64_t len)
 478{
 479        if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL))) {
 480                if (unlikely(vq->log_guest_addr == 0))
 481                        return;
 482                __vhost_log_cache_write(dev, vq, vq->log_guest_addr + offset,
 483                                        len);
 484        }
 485}
 486
 487static __rte_always_inline void
 488vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
 489                     uint64_t offset, uint64_t len)
 490{
 491        if (unlikely(dev->features & (1ULL << VHOST_F_LOG_ALL))) {
 492                if (unlikely(vq->log_guest_addr == 0))
 493                        return;
 494                __vhost_log_write(dev, vq->log_guest_addr + offset, len);
 495        }
 496}
 497
 498static __rte_always_inline void
 499vhost_log_cache_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
 500                           uint64_t iova, uint64_t len)
 501{
 502        if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL))))
 503                return;
 504
 505        if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
 506                __vhost_log_cache_write_iova(dev, vq, iova, len);
 507        else
 508                __vhost_log_cache_write(dev, vq, iova, len);
 509}
 510
 511static __rte_always_inline void
 512vhost_log_write_iova(struct virtio_net *dev, struct vhost_virtqueue *vq,
 513                           uint64_t iova, uint64_t len)
 514{
 515        if (likely(!(dev->features & (1ULL << VHOST_F_LOG_ALL))))
 516                return;
 517
 518        if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
 519                __vhost_log_write_iova(dev, vq, iova, len);
 520        else
 521                __vhost_log_write(dev, iova, len);
 522}
 523
 524extern int vhost_config_log_level;
 525extern int vhost_data_log_level;
 526
 527#define VHOST_LOG_CONFIG(level, fmt, args...)                   \
 528        rte_log(RTE_LOG_ ## level, vhost_config_log_level,      \
 529                "VHOST_CONFIG: " fmt, ##args)
 530
 531#define VHOST_LOG_DATA(level, fmt, args...) \
 532        (void)((RTE_LOG_ ## level <= RTE_LOG_DP_LEVEL) ?        \
 533         rte_log(RTE_LOG_ ## level,  vhost_data_log_level,      \
 534                "VHOST_DATA : " fmt, ##args) :                  \
 535         0)
 536
 537#ifdef RTE_LIBRTE_VHOST_DEBUG
 538#define VHOST_MAX_PRINT_BUFF 6072
 539#define PRINT_PACKET(device, addr, size, header) do { \
 540        char *pkt_addr = (char *)(addr); \
 541        unsigned int index; \
 542        char packet[VHOST_MAX_PRINT_BUFF]; \
 543        \
 544        if ((header)) \
 545                snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Header size %d: ", (device->vid), (size)); \
 546        else \
 547                snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Packet size %d: ", (device->vid), (size)); \
 548        for (index = 0; index < (size); index++) { \
 549                snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), \
 550                        "%02hhx ", pkt_addr[index]); \
 551        } \
 552        snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), "\n"); \
 553        \
 554        VHOST_LOG_DATA(DEBUG, "%s", packet); \
 555} while (0)
 556#else
 557#define PRINT_PACKET(device, addr, size, header) do {} while (0)
 558#endif
 559
 560#define MAX_VHOST_DEVICE        1024
 561extern struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
 562
 563#define VHOST_BINARY_SEARCH_THRESH 256
 564
 565static __rte_always_inline int guest_page_addrcmp(const void *p1,
 566                                                const void *p2)
 567{
 568        const struct guest_page *page1 = (const struct guest_page *)p1;
 569        const struct guest_page *page2 = (const struct guest_page *)p2;
 570
 571        if (page1->guest_phys_addr > page2->guest_phys_addr)
 572                return 1;
 573        if (page1->guest_phys_addr < page2->guest_phys_addr)
 574                return -1;
 575
 576        return 0;
 577}
 578
 579static __rte_always_inline rte_iova_t
 580gpa_to_first_hpa(struct virtio_net *dev, uint64_t gpa,
 581        uint64_t gpa_size, uint64_t *hpa_size)
 582{
 583        uint32_t i;
 584        struct guest_page *page;
 585        struct guest_page key;
 586
 587        *hpa_size = gpa_size;
 588        if (dev->nr_guest_pages >= VHOST_BINARY_SEARCH_THRESH) {
 589                key.guest_phys_addr = gpa & ~(dev->guest_pages[0].size - 1);
 590                page = bsearch(&key, dev->guest_pages, dev->nr_guest_pages,
 591                               sizeof(struct guest_page), guest_page_addrcmp);
 592                if (page) {
 593                        if (gpa + gpa_size <=
 594                                        page->guest_phys_addr + page->size) {
 595                                return gpa - page->guest_phys_addr +
 596                                        page->host_phys_addr;
 597                        } else if (gpa < page->guest_phys_addr +
 598                                                page->size) {
 599                                *hpa_size = page->guest_phys_addr +
 600                                        page->size - gpa;
 601                                return gpa - page->guest_phys_addr +
 602                                        page->host_phys_addr;
 603                        }
 604                }
 605        } else {
 606                for (i = 0; i < dev->nr_guest_pages; i++) {
 607                        page = &dev->guest_pages[i];
 608
 609                        if (gpa >= page->guest_phys_addr) {
 610                                if (gpa + gpa_size <=
 611                                        page->guest_phys_addr + page->size) {
 612                                        return gpa - page->guest_phys_addr +
 613                                                page->host_phys_addr;
 614                                } else if (gpa < page->guest_phys_addr +
 615                                                        page->size) {
 616                                        *hpa_size = page->guest_phys_addr +
 617                                                page->size - gpa;
 618                                        return gpa - page->guest_phys_addr +
 619                                                page->host_phys_addr;
 620                                }
 621                        }
 622                }
 623        }
 624
 625        *hpa_size = 0;
 626        return 0;
 627}
 628
 629/* Convert guest physical address to host physical address */
 630static __rte_always_inline rte_iova_t
 631gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size)
 632{
 633        rte_iova_t hpa;
 634        uint64_t hpa_size;
 635
 636        hpa = gpa_to_first_hpa(dev, gpa, size, &hpa_size);
 637        return hpa_size == size ? hpa : 0;
 638}
 639
 640static __rte_always_inline uint64_t
 641hva_to_gpa(struct virtio_net *dev, uint64_t vva, uint64_t len)
 642{
 643        struct rte_vhost_mem_region *r;
 644        uint32_t i;
 645
 646        if (unlikely(!dev || !dev->mem))
 647                return 0;
 648
 649        for (i = 0; i < dev->mem->nregions; i++) {
 650                r = &dev->mem->regions[i];
 651
 652                if (vva >= r->host_user_addr &&
 653                    vva + len <  r->host_user_addr + r->size) {
 654                        return r->guest_phys_addr + vva - r->host_user_addr;
 655                }
 656        }
 657        return 0;
 658}
 659
 660static __rte_always_inline struct virtio_net *
 661get_device(int vid)
 662{
 663        struct virtio_net *dev = vhost_devices[vid];
 664
 665        if (unlikely(!dev)) {
 666                VHOST_LOG_CONFIG(ERR,
 667                        "(%d) device not found.\n", vid);
 668        }
 669
 670        return dev;
 671}
 672
 673int vhost_new_device(void);
 674void cleanup_device(struct virtio_net *dev, int destroy);
 675void reset_device(struct virtio_net *dev);
 676void vhost_destroy_device(int);
 677void vhost_destroy_device_notify(struct virtio_net *dev);
 678
 679void cleanup_vq(struct vhost_virtqueue *vq, int destroy);
 680void cleanup_vq_inflight(struct virtio_net *dev, struct vhost_virtqueue *vq);
 681void free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq);
 682
 683int alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx);
 684
 685void vhost_attach_vdpa_device(int vid, struct rte_vdpa_device *dev);
 686
 687void vhost_set_ifname(int, const char *if_name, unsigned int if_len);
 688void vhost_setup_virtio_net(int vid, bool enable, bool legacy_ol_flags);
 689void vhost_enable_extbuf(int vid);
 690void vhost_enable_linearbuf(int vid);
 691int vhost_enable_guest_notification(struct virtio_net *dev,
 692                struct vhost_virtqueue *vq, int enable);
 693
 694struct vhost_device_ops const *vhost_driver_callback_get(const char *path);
 695
 696/*
 697 * Backend-specific cleanup.
 698 *
 699 * TODO: fix it; we have one backend now
 700 */
 701void vhost_backend_cleanup(struct virtio_net *dev);
 702
 703uint64_t __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
 704                        uint64_t iova, uint64_t *len, uint8_t perm);
 705void *vhost_alloc_copy_ind_table(struct virtio_net *dev,
 706                        struct vhost_virtqueue *vq,
 707                        uint64_t desc_addr, uint64_t desc_len);
 708int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq);
 709uint64_t translate_log_addr(struct virtio_net *dev, struct vhost_virtqueue *vq,
 710                uint64_t log_addr);
 711void vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq);
 712
 713static __rte_always_inline uint64_t
 714vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
 715                        uint64_t iova, uint64_t *len, uint8_t perm)
 716{
 717        if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
 718                return rte_vhost_va_from_guest_pa(dev->mem, iova, len);
 719
 720        return __vhost_iova_to_vva(dev, vq, iova, len, perm);
 721}
 722
 723#define vhost_avail_event(vr) \
 724        (*(volatile uint16_t*)&(vr)->used->ring[(vr)->size])
 725#define vhost_used_event(vr) \
 726        (*(volatile uint16_t*)&(vr)->avail->ring[(vr)->size])
 727
 728/*
 729 * The following is used with VIRTIO_RING_F_EVENT_IDX.
 730 * Assuming a given event_idx value from the other size, if we have
 731 * just incremented index from old to new_idx, should we trigger an
 732 * event?
 733 */
 734static __rte_always_inline int
 735vhost_need_event(uint16_t event_idx, uint16_t new_idx, uint16_t old)
 736{
 737        return (uint16_t)(new_idx - event_idx - 1) < (uint16_t)(new_idx - old);
 738}
 739
 740static __rte_always_inline void
 741vhost_vring_call_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
 742{
 743        /* Flush used->idx update before we read avail->flags. */
 744        rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
 745
 746        /* Don't kick guest if we don't reach index specified by guest. */
 747        if (dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX)) {
 748                uint16_t old = vq->signalled_used;
 749                uint16_t new = vq->last_used_idx;
 750                bool signalled_used_valid = vq->signalled_used_valid;
 751
 752                vq->signalled_used = new;
 753                vq->signalled_used_valid = true;
 754
 755                VHOST_LOG_DATA(DEBUG, "%s: used_event_idx=%d, old=%d, new=%d\n",
 756                        __func__,
 757                        vhost_used_event(vq),
 758                        old, new);
 759
 760                if ((vhost_need_event(vhost_used_event(vq), new, old) &&
 761                                        (vq->callfd >= 0)) ||
 762                                unlikely(!signalled_used_valid)) {
 763                        eventfd_write(vq->callfd, (eventfd_t) 1);
 764                        if (dev->notify_ops->guest_notified)
 765                                dev->notify_ops->guest_notified(dev->vid);
 766                }
 767        } else {
 768                /* Kick the guest if necessary. */
 769                if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
 770                                && (vq->callfd >= 0)) {
 771                        eventfd_write(vq->callfd, (eventfd_t)1);
 772                        if (dev->notify_ops->guest_notified)
 773                                dev->notify_ops->guest_notified(dev->vid);
 774                }
 775        }
 776}
 777
 778static __rte_always_inline void
 779vhost_vring_call_packed(struct virtio_net *dev, struct vhost_virtqueue *vq)
 780{
 781        uint16_t old, new, off, off_wrap;
 782        bool signalled_used_valid, kick = false;
 783
 784        /* Flush used desc update. */
 785        rte_atomic_thread_fence(__ATOMIC_SEQ_CST);
 786
 787        if (!(dev->features & (1ULL << VIRTIO_RING_F_EVENT_IDX))) {
 788                if (vq->driver_event->flags !=
 789                                VRING_EVENT_F_DISABLE)
 790                        kick = true;
 791                goto kick;
 792        }
 793
 794        old = vq->signalled_used;
 795        new = vq->last_used_idx;
 796        vq->signalled_used = new;
 797        signalled_used_valid = vq->signalled_used_valid;
 798        vq->signalled_used_valid = true;
 799
 800        if (vq->driver_event->flags != VRING_EVENT_F_DESC) {
 801                if (vq->driver_event->flags != VRING_EVENT_F_DISABLE)
 802                        kick = true;
 803                goto kick;
 804        }
 805
 806        if (unlikely(!signalled_used_valid)) {
 807                kick = true;
 808                goto kick;
 809        }
 810
 811        rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
 812
 813        off_wrap = vq->driver_event->off_wrap;
 814        off = off_wrap & ~(1 << 15);
 815
 816        if (new <= old)
 817                old -= vq->size;
 818
 819        if (vq->used_wrap_counter != off_wrap >> 15)
 820                off -= vq->size;
 821
 822        if (vhost_need_event(off, new, old))
 823                kick = true;
 824kick:
 825        if (kick) {
 826                eventfd_write(vq->callfd, (eventfd_t)1);
 827                if (dev->notify_ops->guest_notified)
 828                        dev->notify_ops->guest_notified(dev->vid);
 829        }
 830}
 831
 832static __rte_always_inline void
 833free_ind_table(void *idesc)
 834{
 835        rte_free(idesc);
 836}
 837
 838static __rte_always_inline void
 839restore_mbuf(struct rte_mbuf *m)
 840{
 841        uint32_t mbuf_size, priv_size;
 842
 843        while (m) {
 844                priv_size = rte_pktmbuf_priv_size(m->pool);
 845                mbuf_size = sizeof(struct rte_mbuf) + priv_size;
 846                /* start of buffer is after mbuf structure and priv data */
 847
 848                m->buf_addr = (char *)m + mbuf_size;
 849                m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
 850                m = m->next;
 851        }
 852}
 853
 854static __rte_always_inline bool
 855mbuf_is_consumed(struct rte_mbuf *m)
 856{
 857        while (m) {
 858                if (rte_mbuf_refcnt_read(m) > 1)
 859                        return false;
 860                m = m->next;
 861        }
 862
 863        return true;
 864}
 865
 866#endif /* _VHOST_NET_CDEV_H_ */
 867