qemu/hw/virtio/vhost-vdpa.c
<<
>>
Prefs
   1/*
   2 * vhost-vdpa
   3 *
   4 *  Copyright(c) 2017-2018 Intel Corporation.
   5 *  Copyright(c) 2020 Red Hat, Inc.
   6 *
   7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
   8 * See the COPYING file in the top-level directory.
   9 *
  10 */
  11
  12#include "qemu/osdep.h"
  13#include <linux/vhost.h>
  14#include <linux/vfio.h>
  15#include <sys/eventfd.h>
  16#include <sys/ioctl.h>
  17#include "hw/virtio/vhost.h"
  18#include "hw/virtio/vhost-backend.h"
  19#include "hw/virtio/virtio-net.h"
  20#include "hw/virtio/vhost-vdpa.h"
  21#include "exec/address-spaces.h"
  22#include "qemu/main-loop.h"
  23#include "cpu.h"
  24#include "trace.h"
  25#include "qemu-common.h"
  26
  27static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section)
  28{
  29    return (!memory_region_is_ram(section->mr) &&
  30            !memory_region_is_iommu(section->mr)) ||
  31           /* vhost-vDPA doesn't allow MMIO to be mapped  */
  32            memory_region_is_ram_device(section->mr) ||
  33           /*
  34            * Sizing an enabled 64-bit BAR can cause spurious mappings to
  35            * addresses in the upper part of the 64-bit address space.  These
  36            * are never accessed by the CPU and beyond the address width of
  37            * some IOMMU hardware.  TODO: VDPA should tell us the IOMMU width.
  38            */
  39           section->offset_within_address_space & (1ULL << 63);
  40}
  41
  42static int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size,
  43                              void *vaddr, bool readonly)
  44{
  45    struct vhost_msg_v2 msg = {};
  46    int fd = v->device_fd;
  47    int ret = 0;
  48
  49    msg.type = v->msg_type;
  50    msg.iotlb.iova = iova;
  51    msg.iotlb.size = size;
  52    msg.iotlb.uaddr = (uint64_t)(uintptr_t)vaddr;
  53    msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW;
  54    msg.iotlb.type = VHOST_IOTLB_UPDATE;
  55
  56   trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.iotlb.iova, msg.iotlb.size,
  57                            msg.iotlb.uaddr, msg.iotlb.perm, msg.iotlb.type);
  58
  59    if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
  60        error_report("failed to write, fd=%d, errno=%d (%s)",
  61            fd, errno, strerror(errno));
  62        return -EIO ;
  63    }
  64
  65    return ret;
  66}
  67
  68static int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova,
  69                                hwaddr size)
  70{
  71    struct vhost_msg_v2 msg = {};
  72    int fd = v->device_fd;
  73    int ret = 0;
  74
  75    msg.type = v->msg_type;
  76    msg.iotlb.iova = iova;
  77    msg.iotlb.size = size;
  78    msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
  79
  80    trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.iotlb.iova,
  81                               msg.iotlb.size, msg.iotlb.type);
  82
  83    if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
  84        error_report("failed to write, fd=%d, errno=%d (%s)",
  85            fd, errno, strerror(errno));
  86        return -EIO ;
  87    }
  88
  89    return ret;
  90}
  91
  92static void vhost_vdpa_listener_begin(MemoryListener *listener)
  93{
  94    struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
  95    struct vhost_dev *dev = v->dev;
  96    struct vhost_msg_v2 msg = {};
  97    int fd = v->device_fd;
  98
  99    if (!(dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) {
 100        return;
 101    }
 102
 103    msg.type = v->msg_type;
 104    msg.iotlb.type = VHOST_IOTLB_BATCH_BEGIN;
 105
 106    if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
 107        error_report("failed to write, fd=%d, errno=%d (%s)",
 108                     fd, errno, strerror(errno));
 109    }
 110}
 111
 112static void vhost_vdpa_listener_commit(MemoryListener *listener)
 113{
 114    struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
 115    struct vhost_dev *dev = v->dev;
 116    struct vhost_msg_v2 msg = {};
 117    int fd = v->device_fd;
 118
 119    if (!(dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) {
 120        return;
 121    }
 122
 123    msg.type = v->msg_type;
 124    msg.iotlb.type = VHOST_IOTLB_BATCH_END;
 125
 126    if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
 127        error_report("failed to write, fd=%d, errno=%d (%s)",
 128                     fd, errno, strerror(errno));
 129    }
 130}
 131
 132static void vhost_vdpa_listener_region_add(MemoryListener *listener,
 133                                           MemoryRegionSection *section)
 134{
 135    struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
 136    hwaddr iova;
 137    Int128 llend, llsize;
 138    void *vaddr;
 139    int ret;
 140
 141    if (vhost_vdpa_listener_skipped_section(section)) {
 142        return;
 143    }
 144
 145    if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
 146                 (section->offset_within_region & ~TARGET_PAGE_MASK))) {
 147        error_report("%s received unaligned region", __func__);
 148        return;
 149    }
 150
 151    iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
 152    llend = int128_make64(section->offset_within_address_space);
 153    llend = int128_add(llend, section->size);
 154    llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
 155
 156    if (int128_ge(int128_make64(iova), llend)) {
 157        return;
 158    }
 159
 160    memory_region_ref(section->mr);
 161
 162    /* Here we assume that memory_region_is_ram(section->mr)==true */
 163
 164    vaddr = memory_region_get_ram_ptr(section->mr) +
 165            section->offset_within_region +
 166            (iova - section->offset_within_address_space);
 167
 168    trace_vhost_vdpa_listener_region_add(v, iova, int128_get64(llend),
 169                                         vaddr, section->readonly);
 170
 171    llsize = int128_sub(llend, int128_make64(iova));
 172
 173    ret = vhost_vdpa_dma_map(v, iova, int128_get64(llsize),
 174                             vaddr, section->readonly);
 175    if (ret) {
 176        error_report("vhost vdpa map fail!");
 177        goto fail;
 178    }
 179
 180    return;
 181
 182fail:
 183    /*
 184     * On the initfn path, store the first error in the container so we
 185     * can gracefully fail.  Runtime, there's not much we can do other
 186     * than throw a hardware error.
 187     */
 188    error_report("vhost-vdpa: DMA mapping failed, unable to continue");
 189    return;
 190
 191}
 192
 193static void vhost_vdpa_listener_region_del(MemoryListener *listener,
 194                                           MemoryRegionSection *section)
 195{
 196    struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
 197    hwaddr iova;
 198    Int128 llend, llsize;
 199    int ret;
 200
 201    if (vhost_vdpa_listener_skipped_section(section)) {
 202        return;
 203    }
 204
 205    if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
 206                 (section->offset_within_region & ~TARGET_PAGE_MASK))) {
 207        error_report("%s received unaligned region", __func__);
 208        return;
 209    }
 210
 211    iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
 212    llend = int128_make64(section->offset_within_address_space);
 213    llend = int128_add(llend, section->size);
 214    llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
 215
 216    trace_vhost_vdpa_listener_region_del(v, iova, int128_get64(llend));
 217
 218    if (int128_ge(int128_make64(iova), llend)) {
 219        return;
 220    }
 221
 222    llsize = int128_sub(llend, int128_make64(iova));
 223
 224    ret = vhost_vdpa_dma_unmap(v, iova, int128_get64(llsize));
 225    if (ret) {
 226        error_report("vhost_vdpa dma unmap error!");
 227    }
 228
 229    memory_region_unref(section->mr);
 230}
 231/*
 232 * IOTLB API is used by vhost-vpda which requires incremental updating
 233 * of the mapping. So we can not use generic vhost memory listener which
 234 * depends on the addnop().
 235 */
 236static const MemoryListener vhost_vdpa_memory_listener = {
 237    .begin = vhost_vdpa_listener_begin,
 238    .commit = vhost_vdpa_listener_commit,
 239    .region_add = vhost_vdpa_listener_region_add,
 240    .region_del = vhost_vdpa_listener_region_del,
 241};
 242
 243static int vhost_vdpa_call(struct vhost_dev *dev, unsigned long int request,
 244                             void *arg)
 245{
 246    struct vhost_vdpa *v = dev->opaque;
 247    int fd = v->device_fd;
 248    int ret;
 249
 250    assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
 251
 252    ret = ioctl(fd, request, arg);
 253    return ret < 0 ? -errno : ret;
 254}
 255
 256static void vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status)
 257{
 258    uint8_t s;
 259
 260    trace_vhost_vdpa_add_status(dev, status);
 261    if (vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s)) {
 262        return;
 263    }
 264
 265    s |= status;
 266
 267    vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s);
 268}
 269
 270static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
 271{
 272    struct vhost_vdpa *v;
 273    assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
 274    trace_vhost_vdpa_init(dev, opaque);
 275
 276    v = opaque;
 277    v->dev = dev;
 278    dev->opaque =  opaque ;
 279    v->listener = vhost_vdpa_memory_listener;
 280    v->msg_type = VHOST_IOTLB_MSG_V2;
 281
 282    vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
 283                               VIRTIO_CONFIG_S_DRIVER);
 284
 285    return 0;
 286}
 287
 288static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev,
 289                                            int queue_index)
 290{
 291    size_t page_size = qemu_real_host_page_size;
 292    struct vhost_vdpa *v = dev->opaque;
 293    VirtIODevice *vdev = dev->vdev;
 294    VhostVDPAHostNotifier *n;
 295
 296    n = &v->notifier[queue_index];
 297
 298    if (n->addr) {
 299        virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, false);
 300        object_unparent(OBJECT(&n->mr));
 301        munmap(n->addr, page_size);
 302        n->addr = NULL;
 303    }
 304}
 305
 306static void vhost_vdpa_host_notifiers_uninit(struct vhost_dev *dev, int n)
 307{
 308    int i;
 309
 310    for (i = 0; i < n; i++) {
 311        vhost_vdpa_host_notifier_uninit(dev, i);
 312    }
 313}
 314
 315static int vhost_vdpa_host_notifier_init(struct vhost_dev *dev, int queue_index)
 316{
 317    size_t page_size = qemu_real_host_page_size;
 318    struct vhost_vdpa *v = dev->opaque;
 319    VirtIODevice *vdev = dev->vdev;
 320    VhostVDPAHostNotifier *n;
 321    int fd = v->device_fd;
 322    void *addr;
 323    char *name;
 324
 325    vhost_vdpa_host_notifier_uninit(dev, queue_index);
 326
 327    n = &v->notifier[queue_index];
 328
 329    addr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd,
 330                queue_index * page_size);
 331    if (addr == MAP_FAILED) {
 332        goto err;
 333    }
 334
 335    name = g_strdup_printf("vhost-vdpa/host-notifier@%p mmaps[%d]",
 336                           v, queue_index);
 337    memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name,
 338                                      page_size, addr);
 339    g_free(name);
 340
 341    if (virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, true)) {
 342        munmap(addr, page_size);
 343        goto err;
 344    }
 345    n->addr = addr;
 346
 347    return 0;
 348
 349err:
 350    return -1;
 351}
 352
 353static void vhost_vdpa_host_notifiers_init(struct vhost_dev *dev)
 354{
 355    int i;
 356
 357    for (i = dev->vq_index; i < dev->vq_index + dev->nvqs; i++) {
 358        if (vhost_vdpa_host_notifier_init(dev, i)) {
 359            goto err;
 360        }
 361    }
 362
 363    return;
 364
 365err:
 366    vhost_vdpa_host_notifiers_uninit(dev, i);
 367    return;
 368}
 369
 370static int vhost_vdpa_cleanup(struct vhost_dev *dev)
 371{
 372    struct vhost_vdpa *v;
 373    assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
 374    v = dev->opaque;
 375    trace_vhost_vdpa_cleanup(dev, v);
 376    vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
 377    memory_listener_unregister(&v->listener);
 378
 379    dev->opaque = NULL;
 380    return 0;
 381}
 382
 383static int vhost_vdpa_memslots_limit(struct vhost_dev *dev)
 384{
 385    trace_vhost_vdpa_memslots_limit(dev, INT_MAX);
 386    return INT_MAX;
 387}
 388
 389static int vhost_vdpa_set_mem_table(struct vhost_dev *dev,
 390                                    struct vhost_memory *mem)
 391{
 392    trace_vhost_vdpa_set_mem_table(dev, mem->nregions, mem->padding);
 393    if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_MEM_TABLE) &&
 394        trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_REGIONS)) {
 395        int i;
 396        for (i = 0; i < mem->nregions; i++) {
 397            trace_vhost_vdpa_dump_regions(dev, i,
 398                                          mem->regions[i].guest_phys_addr,
 399                                          mem->regions[i].memory_size,
 400                                          mem->regions[i].userspace_addr,
 401                                          mem->regions[i].flags_padding);
 402        }
 403    }
 404    if (mem->padding) {
 405        return -1;
 406    }
 407
 408    return 0;
 409}
 410
 411static int vhost_vdpa_set_features(struct vhost_dev *dev,
 412                                   uint64_t features)
 413{
 414    int ret;
 415    trace_vhost_vdpa_set_features(dev, features);
 416    ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features);
 417    uint8_t status = 0;
 418    if (ret) {
 419        return ret;
 420    }
 421    vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
 422    vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &status);
 423
 424    return !(status & VIRTIO_CONFIG_S_FEATURES_OK);
 425}
 426
 427static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev)
 428{
 429    uint64_t features;
 430    uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 |
 431        0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH;
 432    int r;
 433
 434    if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) {
 435        return 0;
 436    }
 437
 438    features &= f;
 439    r = vhost_vdpa_call(dev, VHOST_SET_BACKEND_FEATURES, &features);
 440    if (r) {
 441        return 0;
 442    }
 443
 444    dev->backend_cap = features;
 445
 446    return 0;
 447}
 448
 449static int vhost_vdpa_get_device_id(struct vhost_dev *dev,
 450                                    uint32_t *device_id)
 451{
 452    int ret;
 453    ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_DEVICE_ID, device_id);
 454    trace_vhost_vdpa_get_device_id(dev, *device_id);
 455    return ret;
 456}
 457
 458static int vhost_vdpa_reset_device(struct vhost_dev *dev)
 459{
 460    int ret;
 461    uint8_t status = 0;
 462
 463    ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
 464    trace_vhost_vdpa_reset_device(dev, status);
 465    return ret;
 466}
 467
 468static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx)
 469{
 470    assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
 471
 472    trace_vhost_vdpa_get_vq_index(dev, idx, idx - dev->vq_index);
 473    return idx - dev->vq_index;
 474}
 475
 476static int vhost_vdpa_set_vring_ready(struct vhost_dev *dev)
 477{
 478    int i;
 479    trace_vhost_vdpa_set_vring_ready(dev);
 480    for (i = 0; i < dev->nvqs; ++i) {
 481        struct vhost_vring_state state = {
 482            .index = dev->vq_index + i,
 483            .num = 1,
 484        };
 485        vhost_vdpa_call(dev, VHOST_VDPA_SET_VRING_ENABLE, &state);
 486    }
 487    return 0;
 488}
 489
 490static void vhost_vdpa_dump_config(struct vhost_dev *dev, const uint8_t *config,
 491                                   uint32_t config_len)
 492{
 493    int b, len;
 494    char line[QEMU_HEXDUMP_LINE_LEN];
 495
 496    for (b = 0; b < config_len; b += 16) {
 497        len = config_len - b;
 498        qemu_hexdump_line(line, b, config, len, false);
 499        trace_vhost_vdpa_dump_config(dev, line);
 500    }
 501}
 502
 503static int vhost_vdpa_set_config(struct vhost_dev *dev, const uint8_t *data,
 504                                   uint32_t offset, uint32_t size,
 505                                   uint32_t flags)
 506{
 507    struct vhost_vdpa_config *config;
 508    int ret;
 509    unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
 510
 511    trace_vhost_vdpa_set_config(dev, offset, size, flags);
 512    config = g_malloc(size + config_size);
 513    config->off = offset;
 514    config->len = size;
 515    memcpy(config->buf, data, size);
 516    if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_CONFIG) &&
 517        trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
 518        vhost_vdpa_dump_config(dev, data, size);
 519    }
 520    ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG, config);
 521    g_free(config);
 522    return ret;
 523}
 524
 525static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config,
 526                                   uint32_t config_len, Error **errp)
 527{
 528    struct vhost_vdpa_config *v_config;
 529    unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
 530    int ret;
 531
 532    trace_vhost_vdpa_get_config(dev, config, config_len);
 533    v_config = g_malloc(config_len + config_size);
 534    v_config->len = config_len;
 535    v_config->off = 0;
 536    ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_CONFIG, v_config);
 537    memcpy(config, v_config->buf, config_len);
 538    g_free(v_config);
 539    if (trace_event_get_state_backends(TRACE_VHOST_VDPA_GET_CONFIG) &&
 540        trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
 541        vhost_vdpa_dump_config(dev, config, config_len);
 542    }
 543    return ret;
 544 }
 545
 546static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
 547{
 548    struct vhost_vdpa *v = dev->opaque;
 549    trace_vhost_vdpa_dev_start(dev, started);
 550    if (started) {
 551        uint8_t status = 0;
 552        memory_listener_register(&v->listener, &address_space_memory);
 553        vhost_vdpa_host_notifiers_init(dev);
 554        vhost_vdpa_set_vring_ready(dev);
 555        vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
 556        vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &status);
 557
 558        return !(status & VIRTIO_CONFIG_S_DRIVER_OK);
 559    } else {
 560        vhost_vdpa_reset_device(dev);
 561        vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
 562                                   VIRTIO_CONFIG_S_DRIVER);
 563        vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
 564        memory_listener_unregister(&v->listener);
 565
 566        return 0;
 567    }
 568}
 569
 570static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base,
 571                                     struct vhost_log *log)
 572{
 573    trace_vhost_vdpa_set_log_base(dev, base, log->size, log->refcnt, log->fd,
 574                                  log->log);
 575    return vhost_vdpa_call(dev, VHOST_SET_LOG_BASE, &base);
 576}
 577
 578static int vhost_vdpa_set_vring_addr(struct vhost_dev *dev,
 579                                       struct vhost_vring_addr *addr)
 580{
 581    trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags,
 582                                    addr->desc_user_addr, addr->used_user_addr,
 583                                    addr->avail_user_addr,
 584                                    addr->log_guest_addr);
 585    return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr);
 586}
 587
 588static int vhost_vdpa_set_vring_num(struct vhost_dev *dev,
 589                                      struct vhost_vring_state *ring)
 590{
 591    trace_vhost_vdpa_set_vring_num(dev, ring->index, ring->num);
 592    return vhost_vdpa_call(dev, VHOST_SET_VRING_NUM, ring);
 593}
 594
 595static int vhost_vdpa_set_vring_base(struct vhost_dev *dev,
 596                                       struct vhost_vring_state *ring)
 597{
 598    trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num);
 599    return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring);
 600}
 601
 602static int vhost_vdpa_get_vring_base(struct vhost_dev *dev,
 603                                       struct vhost_vring_state *ring)
 604{
 605    int ret;
 606
 607    ret = vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring);
 608    trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num);
 609    return ret;
 610}
 611
 612static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev,
 613                                       struct vhost_vring_file *file)
 614{
 615    trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd);
 616    return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file);
 617}
 618
 619static int vhost_vdpa_set_vring_call(struct vhost_dev *dev,
 620                                       struct vhost_vring_file *file)
 621{
 622    trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd);
 623    return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
 624}
 625
 626static int vhost_vdpa_get_features(struct vhost_dev *dev,
 627                                     uint64_t *features)
 628{
 629    int ret;
 630
 631    ret = vhost_vdpa_call(dev, VHOST_GET_FEATURES, features);
 632    trace_vhost_vdpa_get_features(dev, *features);
 633    return ret;
 634}
 635
 636static int vhost_vdpa_set_owner(struct vhost_dev *dev)
 637{
 638    trace_vhost_vdpa_set_owner(dev);
 639    return vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL);
 640}
 641
 642static int vhost_vdpa_vq_get_addr(struct vhost_dev *dev,
 643                    struct vhost_vring_addr *addr, struct vhost_virtqueue *vq)
 644{
 645    assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
 646    addr->desc_user_addr = (uint64_t)(unsigned long)vq->desc_phys;
 647    addr->avail_user_addr = (uint64_t)(unsigned long)vq->avail_phys;
 648    addr->used_user_addr = (uint64_t)(unsigned long)vq->used_phys;
 649    trace_vhost_vdpa_vq_get_addr(dev, vq, addr->desc_user_addr,
 650                                 addr->avail_user_addr, addr->used_user_addr);
 651    return 0;
 652}
 653
 654static bool  vhost_vdpa_force_iommu(struct vhost_dev *dev)
 655{
 656    return true;
 657}
 658
 659const VhostOps vdpa_ops = {
 660        .backend_type = VHOST_BACKEND_TYPE_VDPA,
 661        .vhost_backend_init = vhost_vdpa_init,
 662        .vhost_backend_cleanup = vhost_vdpa_cleanup,
 663        .vhost_set_log_base = vhost_vdpa_set_log_base,
 664        .vhost_set_vring_addr = vhost_vdpa_set_vring_addr,
 665        .vhost_set_vring_num = vhost_vdpa_set_vring_num,
 666        .vhost_set_vring_base = vhost_vdpa_set_vring_base,
 667        .vhost_get_vring_base = vhost_vdpa_get_vring_base,
 668        .vhost_set_vring_kick = vhost_vdpa_set_vring_kick,
 669        .vhost_set_vring_call = vhost_vdpa_set_vring_call,
 670        .vhost_get_features = vhost_vdpa_get_features,
 671        .vhost_set_backend_cap = vhost_vdpa_set_backend_cap,
 672        .vhost_set_owner = vhost_vdpa_set_owner,
 673        .vhost_set_vring_endian = NULL,
 674        .vhost_backend_memslots_limit = vhost_vdpa_memslots_limit,
 675        .vhost_set_mem_table = vhost_vdpa_set_mem_table,
 676        .vhost_set_features = vhost_vdpa_set_features,
 677        .vhost_reset_device = vhost_vdpa_reset_device,
 678        .vhost_get_vq_index = vhost_vdpa_get_vq_index,
 679        .vhost_get_config  = vhost_vdpa_get_config,
 680        .vhost_set_config = vhost_vdpa_set_config,
 681        .vhost_requires_shm_log = NULL,
 682        .vhost_migration_done = NULL,
 683        .vhost_backend_can_merge = NULL,
 684        .vhost_net_set_mtu = NULL,
 685        .vhost_set_iotlb_callback = NULL,
 686        .vhost_send_device_iotlb_msg = NULL,
 687        .vhost_dev_start = vhost_vdpa_dev_start,
 688        .vhost_get_device_id = vhost_vdpa_get_device_id,
 689        .vhost_vq_get_addr = vhost_vdpa_vq_get_addr,
 690        .vhost_force_iommu = vhost_vdpa_force_iommu,
 691};
 692