qemu/net/vhost-vdpa.c
<<
>>
Prefs
   1/*
   2 * vhost-vdpa.c
   3 *
   4 * Copyright(c) 2017-2018 Intel Corporation.
   5 * Copyright(c) 2020 Red Hat, Inc.
   6 *
   7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
   8 * See the COPYING file in the top-level directory.
   9 *
  10 */
  11
  12#include "qemu/osdep.h"
  13#include "clients.h"
  14#include "hw/virtio/virtio-net.h"
  15#include "net/vhost_net.h"
  16#include "net/vhost-vdpa.h"
  17#include "hw/virtio/vhost-vdpa.h"
  18#include "qemu/config-file.h"
  19#include "qemu/error-report.h"
  20#include "qemu/log.h"
  21#include "qemu/memalign.h"
  22#include "qemu/option.h"
  23#include "qapi/error.h"
  24#include <linux/vhost.h>
  25#include <sys/ioctl.h>
  26#include <err.h>
  27#include "standard-headers/linux/virtio_net.h"
  28#include "monitor/monitor.h"
  29#include "migration/migration.h"
  30#include "migration/misc.h"
  31#include "hw/virtio/vhost.h"
  32
  33/* Todo:need to add the multiqueue support here */
  34typedef struct VhostVDPAState {
  35    NetClientState nc;
  36    struct vhost_vdpa vhost_vdpa;
  37    Notifier migration_state;
  38    VHostNetState *vhost_net;
  39
  40    /* Control commands shadow buffers */
  41    void *cvq_cmd_out_buffer;
  42    virtio_net_ctrl_ack *status;
  43
  44    /* The device always have SVQ enabled */
  45    bool always_svq;
  46
  47    /* The device can isolate CVQ in its own ASID */
  48    bool cvq_isolated;
  49
  50    bool started;
  51} VhostVDPAState;
  52
  53/*
  54 * The array is sorted alphabetically in ascending order,
  55 * with the exception of VHOST_INVALID_FEATURE_BIT,
  56 * which should always be the last entry.
  57 */
  58const int vdpa_feature_bits[] = {
  59    VIRTIO_F_ANY_LAYOUT,
  60    VIRTIO_F_IOMMU_PLATFORM,
  61    VIRTIO_F_NOTIFY_ON_EMPTY,
  62    VIRTIO_F_RING_PACKED,
  63    VIRTIO_F_RING_RESET,
  64    VIRTIO_F_VERSION_1,
  65    VIRTIO_NET_F_CSUM,
  66    VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
  67    VIRTIO_NET_F_CTRL_MAC_ADDR,
  68    VIRTIO_NET_F_CTRL_RX,
  69    VIRTIO_NET_F_CTRL_RX_EXTRA,
  70    VIRTIO_NET_F_CTRL_VLAN,
  71    VIRTIO_NET_F_CTRL_VQ,
  72    VIRTIO_NET_F_GSO,
  73    VIRTIO_NET_F_GUEST_CSUM,
  74    VIRTIO_NET_F_GUEST_ECN,
  75    VIRTIO_NET_F_GUEST_TSO4,
  76    VIRTIO_NET_F_GUEST_TSO6,
  77    VIRTIO_NET_F_GUEST_UFO,
  78    VIRTIO_NET_F_HASH_REPORT,
  79    VIRTIO_NET_F_HOST_ECN,
  80    VIRTIO_NET_F_HOST_TSO4,
  81    VIRTIO_NET_F_HOST_TSO6,
  82    VIRTIO_NET_F_HOST_UFO,
  83    VIRTIO_NET_F_MQ,
  84    VIRTIO_NET_F_MRG_RXBUF,
  85    VIRTIO_NET_F_MTU,
  86    VIRTIO_NET_F_RSS,
  87    VIRTIO_NET_F_STATUS,
  88    VIRTIO_RING_F_EVENT_IDX,
  89    VIRTIO_RING_F_INDIRECT_DESC,
  90
  91    /* VHOST_INVALID_FEATURE_BIT should always be the last entry */
  92    VHOST_INVALID_FEATURE_BIT
  93};
  94
  95/** Supported device specific feature bits with SVQ */
  96static const uint64_t vdpa_svq_device_features =
  97    BIT_ULL(VIRTIO_NET_F_CSUM) |
  98    BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) |
  99    BIT_ULL(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) |
 100    BIT_ULL(VIRTIO_NET_F_MTU) |
 101    BIT_ULL(VIRTIO_NET_F_MAC) |
 102    BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) |
 103    BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) |
 104    BIT_ULL(VIRTIO_NET_F_GUEST_ECN) |
 105    BIT_ULL(VIRTIO_NET_F_GUEST_UFO) |
 106    BIT_ULL(VIRTIO_NET_F_HOST_TSO4) |
 107    BIT_ULL(VIRTIO_NET_F_HOST_TSO6) |
 108    BIT_ULL(VIRTIO_NET_F_HOST_ECN) |
 109    BIT_ULL(VIRTIO_NET_F_HOST_UFO) |
 110    BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) |
 111    BIT_ULL(VIRTIO_NET_F_STATUS) |
 112    BIT_ULL(VIRTIO_NET_F_CTRL_VQ) |
 113    BIT_ULL(VIRTIO_NET_F_CTRL_RX) |
 114    BIT_ULL(VIRTIO_NET_F_CTRL_RX_EXTRA) |
 115    BIT_ULL(VIRTIO_NET_F_MQ) |
 116    BIT_ULL(VIRTIO_F_ANY_LAYOUT) |
 117    BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) |
 118    /* VHOST_F_LOG_ALL is exposed by SVQ */
 119    BIT_ULL(VHOST_F_LOG_ALL) |
 120    BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
 121    BIT_ULL(VIRTIO_NET_F_STANDBY) |
 122    BIT_ULL(VIRTIO_NET_F_SPEED_DUPLEX);
 123
 124#define VHOST_VDPA_NET_CVQ_ASID 1
 125
 126VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
 127{
 128    VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
 129    assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
 130    return s->vhost_net;
 131}
 132
 133static size_t vhost_vdpa_net_cvq_cmd_len(void)
 134{
 135    /*
 136     * MAC_TABLE_SET is the ctrl command that produces the longer out buffer.
 137     * In buffer is always 1 byte, so it should fit here
 138     */
 139    return sizeof(struct virtio_net_ctrl_hdr) +
 140           2 * sizeof(struct virtio_net_ctrl_mac) +
 141           MAC_TABLE_ENTRIES * ETH_ALEN;
 142}
 143
 144static size_t vhost_vdpa_net_cvq_cmd_page_len(void)
 145{
 146    return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size());
 147}
 148
 149static bool vhost_vdpa_net_valid_svq_features(uint64_t features, Error **errp)
 150{
 151    uint64_t invalid_dev_features =
 152        features & ~vdpa_svq_device_features &
 153        /* Transport are all accepted at this point */
 154        ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START,
 155                         VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START);
 156
 157    if (invalid_dev_features) {
 158        error_setg(errp, "vdpa svq does not work with features 0x%" PRIx64,
 159                   invalid_dev_features);
 160        return false;
 161    }
 162
 163    return vhost_svq_valid_features(features, errp);
 164}
 165
 166static int vhost_vdpa_net_check_device_id(struct vhost_net *net)
 167{
 168    uint32_t device_id;
 169    int ret;
 170    struct vhost_dev *hdev;
 171
 172    hdev = (struct vhost_dev *)&net->dev;
 173    ret = hdev->vhost_ops->vhost_get_device_id(hdev, &device_id);
 174    if (device_id != VIRTIO_ID_NET) {
 175        return -ENOTSUP;
 176    }
 177    return ret;
 178}
 179
 180static int vhost_vdpa_add(NetClientState *ncs, void *be,
 181                          int queue_pair_index, int nvqs)
 182{
 183    VhostNetOptions options;
 184    struct vhost_net *net = NULL;
 185    VhostVDPAState *s;
 186    int ret;
 187
 188    options.backend_type = VHOST_BACKEND_TYPE_VDPA;
 189    assert(ncs->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
 190    s = DO_UPCAST(VhostVDPAState, nc, ncs);
 191    options.net_backend = ncs;
 192    options.opaque      = be;
 193    options.busyloop_timeout = 0;
 194    options.nvqs = nvqs;
 195
 196    net = vhost_net_init(&options);
 197    if (!net) {
 198        error_report("failed to init vhost_net for queue");
 199        goto err_init;
 200    }
 201    s->vhost_net = net;
 202    ret = vhost_vdpa_net_check_device_id(net);
 203    if (ret) {
 204        goto err_check;
 205    }
 206    return 0;
 207err_check:
 208    vhost_net_cleanup(net);
 209    g_free(net);
 210err_init:
 211    return -1;
 212}
 213
 214static void vhost_vdpa_cleanup(NetClientState *nc)
 215{
 216    VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
 217
 218    /*
 219     * If a peer NIC is attached, do not cleanup anything.
 220     * Cleanup will happen as a part of qemu_cleanup() -> net_cleanup()
 221     * when the guest is shutting down.
 222     */
 223    if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_NIC) {
 224        return;
 225    }
 226    munmap(s->cvq_cmd_out_buffer, vhost_vdpa_net_cvq_cmd_page_len());
 227    munmap(s->status, vhost_vdpa_net_cvq_cmd_page_len());
 228    if (s->vhost_net) {
 229        vhost_net_cleanup(s->vhost_net);
 230        g_free(s->vhost_net);
 231        s->vhost_net = NULL;
 232    }
 233     if (s->vhost_vdpa.device_fd >= 0) {
 234        qemu_close(s->vhost_vdpa.device_fd);
 235        s->vhost_vdpa.device_fd = -1;
 236    }
 237}
 238
 239static bool vhost_vdpa_has_vnet_hdr(NetClientState *nc)
 240{
 241    assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
 242
 243    return true;
 244}
 245
 246static bool vhost_vdpa_has_ufo(NetClientState *nc)
 247{
 248    assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
 249    VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
 250    uint64_t features = 0;
 251    features |= (1ULL << VIRTIO_NET_F_HOST_UFO);
 252    features = vhost_net_get_features(s->vhost_net, features);
 253    return !!(features & (1ULL << VIRTIO_NET_F_HOST_UFO));
 254
 255}
 256
 257static bool vhost_vdpa_check_peer_type(NetClientState *nc, ObjectClass *oc,
 258                                       Error **errp)
 259{
 260    const char *driver = object_class_get_name(oc);
 261
 262    if (!g_str_has_prefix(driver, "virtio-net-")) {
 263        error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*");
 264        return false;
 265    }
 266
 267    return true;
 268}
 269
 270/** Dummy receive in case qemu falls back to userland tap networking */
 271static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf,
 272                                  size_t size)
 273{
 274    return size;
 275}
 276
 277/** From any vdpa net client, get the netclient of the first queue pair */
 278static VhostVDPAState *vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s)
 279{
 280    NICState *nic = qemu_get_nic(s->nc.peer);
 281    NetClientState *nc0 = qemu_get_peer(nic->ncs, 0);
 282
 283    return DO_UPCAST(VhostVDPAState, nc, nc0);
 284}
 285
 286static void vhost_vdpa_net_log_global_enable(VhostVDPAState *s, bool enable)
 287{
 288    struct vhost_vdpa *v = &s->vhost_vdpa;
 289    VirtIONet *n;
 290    VirtIODevice *vdev;
 291    int data_queue_pairs, cvq, r;
 292
 293    /* We are only called on the first data vqs and only if x-svq is not set */
 294    if (s->vhost_vdpa.shadow_vqs_enabled == enable) {
 295        return;
 296    }
 297
 298    vdev = v->dev->vdev;
 299    n = VIRTIO_NET(vdev);
 300    if (!n->vhost_started) {
 301        return;
 302    }
 303
 304    data_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
 305    cvq = virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) ?
 306                                  n->max_ncs - n->max_queue_pairs : 0;
 307    /*
 308     * TODO: vhost_net_stop does suspend, get_base and reset. We can be smarter
 309     * in the future and resume the device if read-only operations between
 310     * suspend and reset goes wrong.
 311     */
 312    vhost_net_stop(vdev, n->nic->ncs, data_queue_pairs, cvq);
 313
 314    /* Start will check migration setup_or_active to configure or not SVQ */
 315    r = vhost_net_start(vdev, n->nic->ncs, data_queue_pairs, cvq);
 316    if (unlikely(r < 0)) {
 317        error_report("unable to start vhost net: %s(%d)", g_strerror(-r), -r);
 318    }
 319}
 320
 321static void vdpa_net_migration_state_notifier(Notifier *notifier, void *data)
 322{
 323    MigrationState *migration = data;
 324    VhostVDPAState *s = container_of(notifier, VhostVDPAState,
 325                                     migration_state);
 326
 327    if (migration_in_setup(migration)) {
 328        vhost_vdpa_net_log_global_enable(s, true);
 329    } else if (migration_has_failed(migration)) {
 330        vhost_vdpa_net_log_global_enable(s, false);
 331    }
 332}
 333
 334static void vhost_vdpa_net_data_start_first(VhostVDPAState *s)
 335{
 336    struct vhost_vdpa *v = &s->vhost_vdpa;
 337
 338    add_migration_state_change_notifier(&s->migration_state);
 339    if (v->shadow_vqs_enabled) {
 340        v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
 341                                           v->iova_range.last);
 342    }
 343}
 344
 345static int vhost_vdpa_net_data_start(NetClientState *nc)
 346{
 347    VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
 348    struct vhost_vdpa *v = &s->vhost_vdpa;
 349
 350    assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
 351
 352    if (s->always_svq ||
 353        migration_is_setup_or_active(migrate_get_current()->state)) {
 354        v->shadow_vqs_enabled = true;
 355        v->shadow_data = true;
 356    } else {
 357        v->shadow_vqs_enabled = false;
 358        v->shadow_data = false;
 359    }
 360
 361    if (v->index == 0) {
 362        vhost_vdpa_net_data_start_first(s);
 363        return 0;
 364    }
 365
 366    if (v->shadow_vqs_enabled) {
 367        VhostVDPAState *s0 = vhost_vdpa_net_first_nc_vdpa(s);
 368        v->iova_tree = s0->vhost_vdpa.iova_tree;
 369    }
 370
 371    return 0;
 372}
 373
 374static void vhost_vdpa_net_client_stop(NetClientState *nc)
 375{
 376    VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
 377    struct vhost_dev *dev;
 378
 379    assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
 380
 381    if (s->vhost_vdpa.index == 0) {
 382        remove_migration_state_change_notifier(&s->migration_state);
 383    }
 384
 385    dev = s->vhost_vdpa.dev;
 386    if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
 387        g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
 388    }
 389}
 390
 391static NetClientInfo net_vhost_vdpa_info = {
 392        .type = NET_CLIENT_DRIVER_VHOST_VDPA,
 393        .size = sizeof(VhostVDPAState),
 394        .receive = vhost_vdpa_receive,
 395        .start = vhost_vdpa_net_data_start,
 396        .stop = vhost_vdpa_net_client_stop,
 397        .cleanup = vhost_vdpa_cleanup,
 398        .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
 399        .has_ufo = vhost_vdpa_has_ufo,
 400        .check_peer_type = vhost_vdpa_check_peer_type,
 401};
 402
 403static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index,
 404                                          Error **errp)
 405{
 406    struct vhost_vring_state state = {
 407        .index = vq_index,
 408    };
 409    int r = ioctl(device_fd, VHOST_VDPA_GET_VRING_GROUP, &state);
 410
 411    if (unlikely(r < 0)) {
 412        r = -errno;
 413        error_setg_errno(errp, errno, "Cannot get VQ %u group", vq_index);
 414        return r;
 415    }
 416
 417    return state.num;
 418}
 419
 420static int vhost_vdpa_set_address_space_id(struct vhost_vdpa *v,
 421                                           unsigned vq_group,
 422                                           unsigned asid_num)
 423{
 424    struct vhost_vring_state asid = {
 425        .index = vq_group,
 426        .num = asid_num,
 427    };
 428    int r;
 429
 430    r = ioctl(v->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid);
 431    if (unlikely(r < 0)) {
 432        error_report("Can't set vq group %u asid %u, errno=%d (%s)",
 433                     asid.index, asid.num, errno, g_strerror(errno));
 434    }
 435    return r;
 436}
 437
 438static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
 439{
 440    VhostIOVATree *tree = v->iova_tree;
 441    DMAMap needle = {
 442        /*
 443         * No need to specify size or to look for more translations since
 444         * this contiguous chunk was allocated by us.
 445         */
 446        .translated_addr = (hwaddr)(uintptr_t)addr,
 447    };
 448    const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle);
 449    int r;
 450
 451    if (unlikely(!map)) {
 452        error_report("Cannot locate expected map");
 453        return;
 454    }
 455
 456    r = vhost_vdpa_dma_unmap(v, v->address_space_id, map->iova, map->size + 1);
 457    if (unlikely(r != 0)) {
 458        error_report("Device cannot unmap: %s(%d)", g_strerror(r), r);
 459    }
 460
 461    vhost_iova_tree_remove(tree, *map);
 462}
 463
 464/** Map CVQ buffer. */
 465static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size,
 466                                  bool write)
 467{
 468    DMAMap map = {};
 469    int r;
 470
 471    map.translated_addr = (hwaddr)(uintptr_t)buf;
 472    map.size = size - 1;
 473    map.perm = write ? IOMMU_RW : IOMMU_RO,
 474    r = vhost_iova_tree_map_alloc(v->iova_tree, &map);
 475    if (unlikely(r != IOVA_OK)) {
 476        error_report("Cannot map injected element");
 477        return r;
 478    }
 479
 480    r = vhost_vdpa_dma_map(v, v->address_space_id, map.iova,
 481                           vhost_vdpa_net_cvq_cmd_page_len(), buf, !write);
 482    if (unlikely(r < 0)) {
 483        goto dma_map_err;
 484    }
 485
 486    return 0;
 487
 488dma_map_err:
 489    vhost_iova_tree_remove(v->iova_tree, map);
 490    return r;
 491}
 492
 493static int vhost_vdpa_net_cvq_start(NetClientState *nc)
 494{
 495    VhostVDPAState *s, *s0;
 496    struct vhost_vdpa *v;
 497    int64_t cvq_group;
 498    int r;
 499    Error *err = NULL;
 500
 501    assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
 502
 503    s = DO_UPCAST(VhostVDPAState, nc, nc);
 504    v = &s->vhost_vdpa;
 505
 506    s0 = vhost_vdpa_net_first_nc_vdpa(s);
 507    v->shadow_data = s0->vhost_vdpa.shadow_vqs_enabled;
 508    v->shadow_vqs_enabled = s->always_svq;
 509    s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID;
 510
 511    if (s->vhost_vdpa.shadow_data) {
 512        /* SVQ is already configured for all virtqueues */
 513        goto out;
 514    }
 515
 516    /*
 517     * If we early return in these cases SVQ will not be enabled. The migration
 518     * will be blocked as long as vhost-vdpa backends will not offer _F_LOG.
 519     */
 520    if (!vhost_vdpa_net_valid_svq_features(v->dev->features, NULL)) {
 521        return 0;
 522    }
 523
 524    if (!s->cvq_isolated) {
 525        return 0;
 526    }
 527
 528    cvq_group = vhost_vdpa_get_vring_group(v->device_fd,
 529                                           v->dev->vq_index_end - 1,
 530                                           &err);
 531    if (unlikely(cvq_group < 0)) {
 532        error_report_err(err);
 533        return cvq_group;
 534    }
 535
 536    r = vhost_vdpa_set_address_space_id(v, cvq_group, VHOST_VDPA_NET_CVQ_ASID);
 537    if (unlikely(r < 0)) {
 538        return r;
 539    }
 540
 541    v->shadow_vqs_enabled = true;
 542    s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID;
 543
 544out:
 545    if (!s->vhost_vdpa.shadow_vqs_enabled) {
 546        return 0;
 547    }
 548
 549    if (s0->vhost_vdpa.iova_tree) {
 550        /*
 551         * SVQ is already configured for all virtqueues.  Reuse IOVA tree for
 552         * simplicity, whether CVQ shares ASID with guest or not, because:
 553         * - Memory listener need access to guest's memory addresses allocated
 554         *   in the IOVA tree.
 555         * - There should be plenty of IOVA address space for both ASID not to
 556         *   worry about collisions between them.  Guest's translations are
 557         *   still validated with virtio virtqueue_pop so there is no risk for
 558         *   the guest to access memory that it shouldn't.
 559         *
 560         * To allocate a iova tree per ASID is doable but it complicates the
 561         * code and it is not worth it for the moment.
 562         */
 563        v->iova_tree = s0->vhost_vdpa.iova_tree;
 564    } else {
 565        v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
 566                                           v->iova_range.last);
 567    }
 568
 569    r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer,
 570                               vhost_vdpa_net_cvq_cmd_page_len(), false);
 571    if (unlikely(r < 0)) {
 572        return r;
 573    }
 574
 575    r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->status,
 576                               vhost_vdpa_net_cvq_cmd_page_len(), true);
 577    if (unlikely(r < 0)) {
 578        vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
 579    }
 580
 581    return r;
 582}
 583
 584static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
 585{
 586    VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
 587
 588    assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
 589
 590    if (s->vhost_vdpa.shadow_vqs_enabled) {
 591        vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
 592        vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status);
 593    }
 594
 595    vhost_vdpa_net_client_stop(nc);
 596}
 597
 598static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
 599                                      size_t in_len)
 600{
 601    /* Buffers for the device */
 602    const struct iovec out = {
 603        .iov_base = s->cvq_cmd_out_buffer,
 604        .iov_len = out_len,
 605    };
 606    const struct iovec in = {
 607        .iov_base = s->status,
 608        .iov_len = sizeof(virtio_net_ctrl_ack),
 609    };
 610    VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
 611    int r;
 612
 613    r = vhost_svq_add(svq, &out, 1, &in, 1, NULL);
 614    if (unlikely(r != 0)) {
 615        if (unlikely(r == -ENOSPC)) {
 616            qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
 617                          __func__);
 618        }
 619        return r;
 620    }
 621
 622    /*
 623     * We can poll here since we've had BQL from the time we sent the
 624     * descriptor. Also, we need to take the answer before SVQ pulls by itself,
 625     * when BQL is released
 626     */
 627    return vhost_svq_poll(svq);
 628}
 629
 630static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
 631                                       uint8_t cmd, const struct iovec *data_sg,
 632                                       size_t data_num)
 633{
 634    const struct virtio_net_ctrl_hdr ctrl = {
 635        .class = class,
 636        .cmd = cmd,
 637    };
 638    size_t data_size = iov_size(data_sg, data_num);
 639
 640    assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
 641
 642    /* pack the CVQ command header */
 643    memcpy(s->cvq_cmd_out_buffer, &ctrl, sizeof(ctrl));
 644
 645    /* pack the CVQ command command-specific-data */
 646    iov_to_buf(data_sg, data_num, 0,
 647               s->cvq_cmd_out_buffer + sizeof(ctrl), data_size);
 648
 649    return vhost_vdpa_net_cvq_add(s, data_size + sizeof(ctrl),
 650                                  sizeof(virtio_net_ctrl_ack));
 651}
 652
 653static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n)
 654{
 655    if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
 656        const struct iovec data = {
 657            .iov_base = (void *)n->mac,
 658            .iov_len = sizeof(n->mac),
 659        };
 660        ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MAC,
 661                                                  VIRTIO_NET_CTRL_MAC_ADDR_SET,
 662                                                  &data, 1);
 663        if (unlikely(dev_written < 0)) {
 664            return dev_written;
 665        }
 666        if (*s->status != VIRTIO_NET_OK) {
 667            return -EIO;
 668        }
 669    }
 670
 671    /*
 672     * According to VirtIO standard, "The device MUST have an
 673     * empty MAC filtering table on reset.".
 674     *
 675     * Therefore, there is no need to send this CVQ command if the
 676     * driver also sets an empty MAC filter table, which aligns with
 677     * the device's defaults.
 678     *
 679     * Note that the device's defaults can mismatch the driver's
 680     * configuration only at live migration.
 681     */
 682    if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX) ||
 683        n->mac_table.in_use == 0) {
 684        return 0;
 685    }
 686
 687    uint32_t uni_entries = n->mac_table.first_multi,
 688             uni_macs_size = uni_entries * ETH_ALEN,
 689             mul_entries = n->mac_table.in_use - uni_entries,
 690             mul_macs_size = mul_entries * ETH_ALEN;
 691    struct virtio_net_ctrl_mac uni = {
 692        .entries = cpu_to_le32(uni_entries),
 693    };
 694    struct virtio_net_ctrl_mac mul = {
 695        .entries = cpu_to_le32(mul_entries),
 696    };
 697    const struct iovec data[] = {
 698        {
 699            .iov_base = &uni,
 700            .iov_len = sizeof(uni),
 701        }, {
 702            .iov_base = n->mac_table.macs,
 703            .iov_len = uni_macs_size,
 704        }, {
 705            .iov_base = &mul,
 706            .iov_len = sizeof(mul),
 707        }, {
 708            .iov_base = &n->mac_table.macs[uni_macs_size],
 709            .iov_len = mul_macs_size,
 710        },
 711    };
 712    ssize_t dev_written = vhost_vdpa_net_load_cmd(s,
 713                                VIRTIO_NET_CTRL_MAC,
 714                                VIRTIO_NET_CTRL_MAC_TABLE_SET,
 715                                data, ARRAY_SIZE(data));
 716    if (unlikely(dev_written < 0)) {
 717        return dev_written;
 718    }
 719    if (*s->status != VIRTIO_NET_OK) {
 720        return -EIO;
 721    }
 722
 723    return 0;
 724}
 725
 726static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
 727                                  const VirtIONet *n)
 728{
 729    struct virtio_net_ctrl_mq mq;
 730    ssize_t dev_written;
 731
 732    if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_MQ)) {
 733        return 0;
 734    }
 735
 736    mq.virtqueue_pairs = cpu_to_le16(n->curr_queue_pairs);
 737    const struct iovec data = {
 738        .iov_base = &mq,
 739        .iov_len = sizeof(mq),
 740    };
 741    dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MQ,
 742                                          VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET,
 743                                          &data, 1);
 744    if (unlikely(dev_written < 0)) {
 745        return dev_written;
 746    }
 747    if (*s->status != VIRTIO_NET_OK) {
 748        return -EIO;
 749    }
 750
 751    return 0;
 752}
 753
 754static int vhost_vdpa_net_load_offloads(VhostVDPAState *s,
 755                                        const VirtIONet *n)
 756{
 757    uint64_t offloads;
 758    ssize_t dev_written;
 759
 760    if (!virtio_vdev_has_feature(&n->parent_obj,
 761                                 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
 762        return 0;
 763    }
 764
 765    if (n->curr_guest_offloads == virtio_net_supported_guest_offloads(n)) {
 766        /*
 767         * According to VirtIO standard, "Upon feature negotiation
 768         * corresponding offload gets enabled to preserve
 769         * backward compatibility.".
 770         *
 771         * Therefore, there is no need to send this CVQ command if the
 772         * driver also enables all supported offloads, which aligns with
 773         * the device's defaults.
 774         *
 775         * Note that the device's defaults can mismatch the driver's
 776         * configuration only at live migration.
 777         */
 778        return 0;
 779    }
 780
 781    offloads = cpu_to_le64(n->curr_guest_offloads);
 782    const struct iovec data = {
 783        .iov_base = &offloads,
 784        .iov_len = sizeof(offloads),
 785    };
 786    dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
 787                                          VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET,
 788                                          &data, 1);
 789    if (unlikely(dev_written < 0)) {
 790        return dev_written;
 791    }
 792    if (*s->status != VIRTIO_NET_OK) {
 793        return -EIO;
 794    }
 795
 796    return 0;
 797}
 798
 799static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s,
 800                                       uint8_t cmd,
 801                                       uint8_t on)
 802{
 803    const struct iovec data = {
 804        .iov_base = &on,
 805        .iov_len = sizeof(on),
 806    };
 807    return vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_RX,
 808                                   cmd, &data, 1);
 809}
 810
 811static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
 812                                  const VirtIONet *n)
 813{
 814    ssize_t dev_written;
 815
 816    if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX)) {
 817        return 0;
 818    }
 819
 820    /*
 821     * According to virtio_net_reset(), device turns promiscuous mode
 822     * on by default.
 823     *
 824     * Addtionally, according to VirtIO standard, "Since there are
 825     * no guarantees, it can use a hash filter or silently switch to
 826     * allmulti or promiscuous mode if it is given too many addresses.".
 827     * QEMU marks `n->mac_table.uni_overflow` if guest sets too many
 828     * non-multicast MAC addresses, indicating that promiscuous mode
 829     * should be enabled.
 830     *
 831     * Therefore, QEMU should only send this CVQ command if the
 832     * `n->mac_table.uni_overflow` is not marked and `n->promisc` is off,
 833     * which sets promiscuous mode on, different from the device's defaults.
 834     *
 835     * Note that the device's defaults can mismatch the driver's
 836     * configuration only at live migration.
 837     */
 838    if (!n->mac_table.uni_overflow && !n->promisc) {
 839        dev_written = vhost_vdpa_net_load_rx_mode(s,
 840                                            VIRTIO_NET_CTRL_RX_PROMISC, 0);
 841        if (unlikely(dev_written < 0)) {
 842            return dev_written;
 843        }
 844        if (*s->status != VIRTIO_NET_OK) {
 845            return -EIO;
 846        }
 847    }
 848
 849    /*
 850     * According to virtio_net_reset(), device turns all-multicast mode
 851     * off by default.
 852     *
 853     * According to VirtIO standard, "Since there are no guarantees,
 854     * it can use a hash filter or silently switch to allmulti or
 855     * promiscuous mode if it is given too many addresses.". QEMU marks
 856     * `n->mac_table.multi_overflow` if guest sets too many
 857     * non-multicast MAC addresses.
 858     *
 859     * Therefore, QEMU should only send this CVQ command if the
 860     * `n->mac_table.multi_overflow` is marked or `n->allmulti` is on,
 861     * which sets all-multicast mode on, different from the device's defaults.
 862     *
 863     * Note that the device's defaults can mismatch the driver's
 864     * configuration only at live migration.
 865     */
 866    if (n->mac_table.multi_overflow || n->allmulti) {
 867        dev_written = vhost_vdpa_net_load_rx_mode(s,
 868                                            VIRTIO_NET_CTRL_RX_ALLMULTI, 1);
 869        if (unlikely(dev_written < 0)) {
 870            return dev_written;
 871        }
 872        if (*s->status != VIRTIO_NET_OK) {
 873            return -EIO;
 874        }
 875    }
 876
 877    if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX_EXTRA)) {
 878        return 0;
 879    }
 880
 881    /*
 882     * According to virtio_net_reset(), device turns all-unicast mode
 883     * off by default.
 884     *
 885     * Therefore, QEMU should only send this CVQ command if the driver
 886     * sets all-unicast mode on, different from the device's defaults.
 887     *
 888     * Note that the device's defaults can mismatch the driver's
 889     * configuration only at live migration.
 890     */
 891    if (n->alluni) {
 892        dev_written = vhost_vdpa_net_load_rx_mode(s,
 893                                            VIRTIO_NET_CTRL_RX_ALLUNI, 1);
 894        if (dev_written < 0) {
 895            return dev_written;
 896        }
 897        if (*s->status != VIRTIO_NET_OK) {
 898            return -EIO;
 899        }
 900    }
 901
 902    /*
 903     * According to virtio_net_reset(), device turns non-multicast mode
 904     * off by default.
 905     *
 906     * Therefore, QEMU should only send this CVQ command if the driver
 907     * sets non-multicast mode on, different from the device's defaults.
 908     *
 909     * Note that the device's defaults can mismatch the driver's
 910     * configuration only at live migration.
 911     */
 912    if (n->nomulti) {
 913        dev_written = vhost_vdpa_net_load_rx_mode(s,
 914                                            VIRTIO_NET_CTRL_RX_NOMULTI, 1);
 915        if (dev_written < 0) {
 916            return dev_written;
 917        }
 918        if (*s->status != VIRTIO_NET_OK) {
 919            return -EIO;
 920        }
 921    }
 922
 923    /*
 924     * According to virtio_net_reset(), device turns non-unicast mode
 925     * off by default.
 926     *
 927     * Therefore, QEMU should only send this CVQ command if the driver
 928     * sets non-unicast mode on, different from the device's defaults.
 929     *
 930     * Note that the device's defaults can mismatch the driver's
 931     * configuration only at live migration.
 932     */
 933    if (n->nouni) {
 934        dev_written = vhost_vdpa_net_load_rx_mode(s,
 935                                            VIRTIO_NET_CTRL_RX_NOUNI, 1);
 936        if (dev_written < 0) {
 937            return dev_written;
 938        }
 939        if (*s->status != VIRTIO_NET_OK) {
 940            return -EIO;
 941        }
 942    }
 943
 944    /*
 945     * According to virtio_net_reset(), device turns non-broadcast mode
 946     * off by default.
 947     *
 948     * Therefore, QEMU should only send this CVQ command if the driver
 949     * sets non-broadcast mode on, different from the device's defaults.
 950     *
 951     * Note that the device's defaults can mismatch the driver's
 952     * configuration only at live migration.
 953     */
 954    if (n->nobcast) {
 955        dev_written = vhost_vdpa_net_load_rx_mode(s,
 956                                            VIRTIO_NET_CTRL_RX_NOBCAST, 1);
 957        if (dev_written < 0) {
 958            return dev_written;
 959        }
 960        if (*s->status != VIRTIO_NET_OK) {
 961            return -EIO;
 962        }
 963    }
 964
 965    return 0;
 966}
 967
 968static int vhost_vdpa_net_load(NetClientState *nc)
 969{
 970    VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
 971    struct vhost_vdpa *v = &s->vhost_vdpa;
 972    const VirtIONet *n;
 973    int r;
 974
 975    assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
 976
 977    if (!v->shadow_vqs_enabled) {
 978        return 0;
 979    }
 980
 981    n = VIRTIO_NET(v->dev->vdev);
 982    r = vhost_vdpa_net_load_mac(s, n);
 983    if (unlikely(r < 0)) {
 984        return r;
 985    }
 986    r = vhost_vdpa_net_load_mq(s, n);
 987    if (unlikely(r)) {
 988        return r;
 989    }
 990    r = vhost_vdpa_net_load_offloads(s, n);
 991    if (unlikely(r)) {
 992        return r;
 993    }
 994    r = vhost_vdpa_net_load_rx(s, n);
 995    if (unlikely(r)) {
 996        return r;
 997    }
 998
 999    return 0;
1000}
1001
1002static NetClientInfo net_vhost_vdpa_cvq_info = {
1003    .type = NET_CLIENT_DRIVER_VHOST_VDPA,
1004    .size = sizeof(VhostVDPAState),
1005    .receive = vhost_vdpa_receive,
1006    .start = vhost_vdpa_net_cvq_start,
1007    .load = vhost_vdpa_net_load,
1008    .stop = vhost_vdpa_net_cvq_stop,
1009    .cleanup = vhost_vdpa_cleanup,
1010    .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
1011    .has_ufo = vhost_vdpa_has_ufo,
1012    .check_peer_type = vhost_vdpa_check_peer_type,
1013};
1014
1015/*
1016 * Forward the excessive VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command to
1017 * vdpa device.
1018 *
1019 * Considering that QEMU cannot send the entire filter table to the
1020 * vdpa device, it should send the VIRTIO_NET_CTRL_RX_PROMISC CVQ
1021 * command to enable promiscuous mode to receive all packets,
1022 * according to VirtIO standard, "Since there are no guarantees,
1023 * it can use a hash filter or silently switch to allmulti or
1024 * promiscuous mode if it is given too many addresses.".
1025 *
1026 * Since QEMU ignores MAC addresses beyond `MAC_TABLE_ENTRIES` and
1027 * marks `n->mac_table.x_overflow` accordingly, it should have
1028 * the same effect on the device model to receive
1029 * (`MAC_TABLE_ENTRIES` + 1) or more non-multicast MAC addresses.
1030 * The same applies to multicast MAC addresses.
1031 *
1032 * Therefore, QEMU can provide the device model with a fake
1033 * VIRTIO_NET_CTRL_MAC_TABLE_SET command with (`MAC_TABLE_ENTRIES` + 1)
1034 * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1) multicast
1035 * MAC addresses. This ensures that the device model marks
1036 * `n->mac_table.uni_overflow` and `n->mac_table.multi_overflow`,
1037 * allowing all packets to be received, which aligns with the
1038 * state of the vdpa device.
1039 */
1040static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s,
1041                                                       VirtQueueElement *elem,
1042                                                       struct iovec *out)
1043{
1044    struct virtio_net_ctrl_mac mac_data, *mac_ptr;
1045    struct virtio_net_ctrl_hdr *hdr_ptr;
1046    uint32_t cursor;
1047    ssize_t r;
1048
1049    /* parse the non-multicast MAC address entries from CVQ command */
1050    cursor = sizeof(*hdr_ptr);
1051    r = iov_to_buf(elem->out_sg, elem->out_num, cursor,
1052                   &mac_data, sizeof(mac_data));
1053    if (unlikely(r != sizeof(mac_data))) {
1054        /*
1055         * If the CVQ command is invalid, we should simulate the vdpa device
1056         * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1057         */
1058        *s->status = VIRTIO_NET_ERR;
1059        return sizeof(*s->status);
1060    }
1061    cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN;
1062
1063    /* parse the multicast MAC address entries from CVQ command */
1064    r = iov_to_buf(elem->out_sg, elem->out_num, cursor,
1065                   &mac_data, sizeof(mac_data));
1066    if (r != sizeof(mac_data)) {
1067        /*
1068         * If the CVQ command is invalid, we should simulate the vdpa device
1069         * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1070         */
1071        *s->status = VIRTIO_NET_ERR;
1072        return sizeof(*s->status);
1073    }
1074    cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN;
1075
1076    /* validate the CVQ command */
1077    if (iov_size(elem->out_sg, elem->out_num) != cursor) {
1078        /*
1079         * If the CVQ command is invalid, we should simulate the vdpa device
1080         * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1081         */
1082        *s->status = VIRTIO_NET_ERR;
1083        return sizeof(*s->status);
1084    }
1085
1086    /*
1087     * According to VirtIO standard, "Since there are no guarantees,
1088     * it can use a hash filter or silently switch to allmulti or
1089     * promiscuous mode if it is given too many addresses.".
1090     *
1091     * Therefore, considering that QEMU is unable to send the entire
1092     * filter table to the vdpa device, it should send the
1093     * VIRTIO_NET_CTRL_RX_PROMISC CVQ command to enable promiscuous mode
1094     */
1095    r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_PROMISC, 1);
1096    if (unlikely(r < 0)) {
1097        return r;
1098    }
1099    if (*s->status != VIRTIO_NET_OK) {
1100        return sizeof(*s->status);
1101    }
1102
1103    /*
1104     * QEMU should also send a fake VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ
1105     * command to the device model, including (`MAC_TABLE_ENTRIES` + 1)
1106     * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1)
1107     * multicast MAC addresses.
1108     *
1109     * By doing so, the device model can mark `n->mac_table.uni_overflow`
1110     * and `n->mac_table.multi_overflow`, enabling all packets to be
1111     * received, which aligns with the state of the vdpa device.
1112     */
1113    cursor = 0;
1114    uint32_t fake_uni_entries = MAC_TABLE_ENTRIES + 1,
1115             fake_mul_entries = MAC_TABLE_ENTRIES + 1,
1116             fake_cvq_size = sizeof(struct virtio_net_ctrl_hdr) +
1117                             sizeof(mac_data) + fake_uni_entries * ETH_ALEN +
1118                             sizeof(mac_data) + fake_mul_entries * ETH_ALEN;
1119
1120    assert(fake_cvq_size < vhost_vdpa_net_cvq_cmd_page_len());
1121    out->iov_len = fake_cvq_size;
1122
1123    /* pack the header for fake CVQ command */
1124    hdr_ptr = out->iov_base + cursor;
1125    hdr_ptr->class = VIRTIO_NET_CTRL_MAC;
1126    hdr_ptr->cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
1127    cursor += sizeof(*hdr_ptr);
1128
1129    /*
1130     * Pack the non-multicast MAC addresses part for fake CVQ command.
1131     *
1132     * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
1133     * addresses provieded in CVQ command. Therefore, only the entries
1134     * field need to be prepared in the CVQ command.
1135     */
1136    mac_ptr = out->iov_base + cursor;
1137    mac_ptr->entries = cpu_to_le32(fake_uni_entries);
1138    cursor += sizeof(*mac_ptr) + fake_uni_entries * ETH_ALEN;
1139
1140    /*
1141     * Pack the multicast MAC addresses part for fake CVQ command.
1142     *
1143     * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
1144     * addresses provieded in CVQ command. Therefore, only the entries
1145     * field need to be prepared in the CVQ command.
1146     */
1147    mac_ptr = out->iov_base + cursor;
1148    mac_ptr->entries = cpu_to_le32(fake_mul_entries);
1149
1150    /*
1151     * Simulating QEMU poll a vdpa device used buffer
1152     * for VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1153     */
1154    return sizeof(*s->status);
1155}
1156
1157/**
1158 * Validate and copy control virtqueue commands.
1159 *
1160 * Following QEMU guidelines, we offer a copy of the buffers to the device to
1161 * prevent TOCTOU bugs.
1162 */
1163static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
1164                                            VirtQueueElement *elem,
1165                                            void *opaque)
1166{
1167    VhostVDPAState *s = opaque;
1168    size_t in_len;
1169    const struct virtio_net_ctrl_hdr *ctrl;
1170    virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
1171    /* Out buffer sent to both the vdpa device and the device model */
1172    struct iovec out = {
1173        .iov_base = s->cvq_cmd_out_buffer,
1174    };
1175    /* in buffer used for device model */
1176    const struct iovec in = {
1177        .iov_base = &status,
1178        .iov_len = sizeof(status),
1179    };
1180    ssize_t dev_written = -EINVAL;
1181
1182    out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0,
1183                             s->cvq_cmd_out_buffer,
1184                             vhost_vdpa_net_cvq_cmd_page_len());
1185
1186    ctrl = s->cvq_cmd_out_buffer;
1187    if (ctrl->class == VIRTIO_NET_CTRL_ANNOUNCE) {
1188        /*
1189         * Guest announce capability is emulated by qemu, so don't forward to
1190         * the device.
1191         */
1192        dev_written = sizeof(status);
1193        *s->status = VIRTIO_NET_OK;
1194    } else if (unlikely(ctrl->class == VIRTIO_NET_CTRL_MAC &&
1195                        ctrl->cmd == VIRTIO_NET_CTRL_MAC_TABLE_SET &&
1196                        iov_size(elem->out_sg, elem->out_num) > out.iov_len)) {
1197        /*
1198         * Due to the size limitation of the out buffer sent to the vdpa device,
1199         * which is determined by vhost_vdpa_net_cvq_cmd_page_len(), excessive
1200         * MAC addresses set by the driver for the filter table can cause
1201         * truncation of the CVQ command in QEMU. As a result, the vdpa device
1202         * rejects the flawed CVQ command.
1203         *
1204         * Therefore, QEMU must handle this situation instead of sending
1205         * the CVQ command direclty.
1206         */
1207        dev_written = vhost_vdpa_net_excessive_mac_filter_cvq_add(s, elem,
1208                                                                  &out);
1209        if (unlikely(dev_written < 0)) {
1210            goto out;
1211        }
1212    } else {
1213        dev_written = vhost_vdpa_net_cvq_add(s, out.iov_len, sizeof(status));
1214        if (unlikely(dev_written < 0)) {
1215            goto out;
1216        }
1217    }
1218
1219    if (unlikely(dev_written < sizeof(status))) {
1220        error_report("Insufficient written data (%zu)", dev_written);
1221        goto out;
1222    }
1223
1224    if (*s->status != VIRTIO_NET_OK) {
1225        goto out;
1226    }
1227
1228    status = VIRTIO_NET_ERR;
1229    virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, &out, 1);
1230    if (status != VIRTIO_NET_OK) {
1231        error_report("Bad CVQ processing in model");
1232    }
1233
1234out:
1235    in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status,
1236                          sizeof(status));
1237    if (unlikely(in_len < sizeof(status))) {
1238        error_report("Bad device CVQ written length");
1239    }
1240    vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status)));
1241    /*
1242     * `elem` belongs to vhost_vdpa_net_handle_ctrl_avail() only when
1243     * the function successfully forwards the CVQ command, indicated
1244     * by a non-negative value of `dev_written`. Otherwise, it still
1245     * belongs to SVQ.
1246     * This function should only free the `elem` when it owns.
1247     */
1248    if (dev_written >= 0) {
1249        g_free(elem);
1250    }
1251    return dev_written < 0 ? dev_written : 0;
1252}
1253
1254static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {
1255    .avail_handler = vhost_vdpa_net_handle_ctrl_avail,
1256};
1257
1258/**
1259 * Probe if CVQ is isolated
1260 *
1261 * @device_fd         The vdpa device fd
1262 * @features          Features offered by the device.
1263 * @cvq_index         The control vq pair index
1264 *
1265 * Returns <0 in case of failure, 0 if false and 1 if true.
1266 */
1267static int vhost_vdpa_probe_cvq_isolation(int device_fd, uint64_t features,
1268                                          int cvq_index, Error **errp)
1269{
1270    uint64_t backend_features;
1271    int64_t cvq_group;
1272    uint8_t status = VIRTIO_CONFIG_S_ACKNOWLEDGE |
1273                     VIRTIO_CONFIG_S_DRIVER |
1274                     VIRTIO_CONFIG_S_FEATURES_OK;
1275    int r;
1276
1277    ERRP_GUARD();
1278
1279    r = ioctl(device_fd, VHOST_GET_BACKEND_FEATURES, &backend_features);
1280    if (unlikely(r < 0)) {
1281        error_setg_errno(errp, errno, "Cannot get vdpa backend_features");
1282        return r;
1283    }
1284
1285    if (!(backend_features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID))) {
1286        return 0;
1287    }
1288
1289    r = ioctl(device_fd, VHOST_SET_FEATURES, &features);
1290    if (unlikely(r)) {
1291        error_setg_errno(errp, errno, "Cannot set features");
1292    }
1293
1294    r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1295    if (unlikely(r)) {
1296        error_setg_errno(errp, -r, "Cannot set device features");
1297        goto out;
1298    }
1299
1300    cvq_group = vhost_vdpa_get_vring_group(device_fd, cvq_index, errp);
1301    if (unlikely(cvq_group < 0)) {
1302        if (cvq_group != -ENOTSUP) {
1303            r = cvq_group;
1304            goto out;
1305        }
1306
1307        /*
1308         * The kernel report VHOST_BACKEND_F_IOTLB_ASID if the vdpa frontend
1309         * support ASID even if the parent driver does not.  The CVQ cannot be
1310         * isolated in this case.
1311         */
1312        error_free(*errp);
1313        *errp = NULL;
1314        r = 0;
1315        goto out;
1316    }
1317
1318    for (int i = 0; i < cvq_index; ++i) {
1319        int64_t group = vhost_vdpa_get_vring_group(device_fd, i, errp);
1320        if (unlikely(group < 0)) {
1321            r = group;
1322            goto out;
1323        }
1324
1325        if (group == (int64_t)cvq_group) {
1326            r = 0;
1327            goto out;
1328        }
1329    }
1330
1331    r = 1;
1332
1333out:
1334    status = 0;
1335    ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1336    return r;
1337}
1338
1339static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
1340                                       const char *device,
1341                                       const char *name,
1342                                       int vdpa_device_fd,
1343                                       int queue_pair_index,
1344                                       int nvqs,
1345                                       bool is_datapath,
1346                                       bool svq,
1347                                       struct vhost_vdpa_iova_range iova_range,
1348                                       uint64_t features,
1349                                       Error **errp)
1350{
1351    NetClientState *nc = NULL;
1352    VhostVDPAState *s;
1353    int ret = 0;
1354    assert(name);
1355    int cvq_isolated;
1356
1357    if (is_datapath) {
1358        nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device,
1359                                 name);
1360    } else {
1361        cvq_isolated = vhost_vdpa_probe_cvq_isolation(vdpa_device_fd, features,
1362                                                      queue_pair_index * 2,
1363                                                      errp);
1364        if (unlikely(cvq_isolated < 0)) {
1365            return NULL;
1366        }
1367
1368        nc = qemu_new_net_control_client(&net_vhost_vdpa_cvq_info, peer,
1369                                         device, name);
1370    }
1371    qemu_set_info_str(nc, TYPE_VHOST_VDPA);
1372    s = DO_UPCAST(VhostVDPAState, nc, nc);
1373
1374    s->vhost_vdpa.device_fd = vdpa_device_fd;
1375    s->vhost_vdpa.index = queue_pair_index;
1376    s->always_svq = svq;
1377    s->migration_state.notify = vdpa_net_migration_state_notifier;
1378    s->vhost_vdpa.shadow_vqs_enabled = svq;
1379    s->vhost_vdpa.iova_range = iova_range;
1380    s->vhost_vdpa.shadow_data = svq;
1381    if (queue_pair_index == 0) {
1382        vhost_vdpa_net_valid_svq_features(features,
1383                                          &s->vhost_vdpa.migration_blocker);
1384    } else if (!is_datapath) {
1385        s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
1386                                     PROT_READ | PROT_WRITE,
1387                                     MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1388        s->status = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
1389                         PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
1390                         -1, 0);
1391
1392        s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops;
1393        s->vhost_vdpa.shadow_vq_ops_opaque = s;
1394        s->cvq_isolated = cvq_isolated;
1395
1396        /*
1397         * TODO: We cannot migrate devices with CVQ and no x-svq enabled as
1398         * there is no way to set the device state (MAC, MQ, etc) before
1399         * starting the datapath.
1400         *
1401         * Migration blocker ownership now belongs to s->vhost_vdpa.
1402         */
1403        if (!svq) {
1404            error_setg(&s->vhost_vdpa.migration_blocker,
1405                       "net vdpa cannot migrate with CVQ feature");
1406        }
1407    }
1408    ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
1409    if (ret) {
1410        qemu_del_net_client(nc);
1411        return NULL;
1412    }
1413    return nc;
1414}
1415
1416static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp)
1417{
1418    int ret = ioctl(fd, VHOST_GET_FEATURES, features);
1419    if (unlikely(ret < 0)) {
1420        error_setg_errno(errp, errno,
1421                         "Fail to query features from vhost-vDPA device");
1422    }
1423    return ret;
1424}
1425
1426static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features,
1427                                          int *has_cvq, Error **errp)
1428{
1429    unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
1430    g_autofree struct vhost_vdpa_config *config = NULL;
1431    __virtio16 *max_queue_pairs;
1432    int ret;
1433
1434    if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) {
1435        *has_cvq = 1;
1436    } else {
1437        *has_cvq = 0;
1438    }
1439
1440    if (features & (1 << VIRTIO_NET_F_MQ)) {
1441        config = g_malloc0(config_size + sizeof(*max_queue_pairs));
1442        config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs);
1443        config->len = sizeof(*max_queue_pairs);
1444
1445        ret = ioctl(fd, VHOST_VDPA_GET_CONFIG, config);
1446        if (ret) {
1447            error_setg(errp, "Fail to get config from vhost-vDPA device");
1448            return -ret;
1449        }
1450
1451        max_queue_pairs = (__virtio16 *)&config->buf;
1452
1453        return lduw_le_p(max_queue_pairs);
1454    }
1455
1456    return 1;
1457}
1458
1459int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
1460                        NetClientState *peer, Error **errp)
1461{
1462    const NetdevVhostVDPAOptions *opts;
1463    uint64_t features;
1464    int vdpa_device_fd;
1465    g_autofree NetClientState **ncs = NULL;
1466    struct vhost_vdpa_iova_range iova_range;
1467    NetClientState *nc;
1468    int queue_pairs, r, i = 0, has_cvq = 0;
1469
1470    assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
1471    opts = &netdev->u.vhost_vdpa;
1472    if (!opts->vhostdev && !opts->vhostfd) {
1473        error_setg(errp,
1474                   "vhost-vdpa: neither vhostdev= nor vhostfd= was specified");
1475        return -1;
1476    }
1477
1478    if (opts->vhostdev && opts->vhostfd) {
1479        error_setg(errp,
1480                   "vhost-vdpa: vhostdev= and vhostfd= are mutually exclusive");
1481        return -1;
1482    }
1483
1484    if (opts->vhostdev) {
1485        vdpa_device_fd = qemu_open(opts->vhostdev, O_RDWR, errp);
1486        if (vdpa_device_fd == -1) {
1487            return -errno;
1488        }
1489    } else {
1490        /* has_vhostfd */
1491        vdpa_device_fd = monitor_fd_param(monitor_cur(), opts->vhostfd, errp);
1492        if (vdpa_device_fd == -1) {
1493            error_prepend(errp, "vhost-vdpa: unable to parse vhostfd: ");
1494            return -1;
1495        }
1496    }
1497
1498    r = vhost_vdpa_get_features(vdpa_device_fd, &features, errp);
1499    if (unlikely(r < 0)) {
1500        goto err;
1501    }
1502
1503    queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, features,
1504                                                 &has_cvq, errp);
1505    if (queue_pairs < 0) {
1506        qemu_close(vdpa_device_fd);
1507        return queue_pairs;
1508    }
1509
1510    r = vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
1511    if (unlikely(r < 0)) {
1512        error_setg(errp, "vhost-vdpa: get iova range failed: %s",
1513                   strerror(-r));
1514        goto err;
1515    }
1516
1517    if (opts->x_svq && !vhost_vdpa_net_valid_svq_features(features, errp)) {
1518        goto err;
1519    }
1520
1521    ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
1522
1523    for (i = 0; i < queue_pairs; i++) {
1524        ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1525                                     vdpa_device_fd, i, 2, true, opts->x_svq,
1526                                     iova_range, features, errp);
1527        if (!ncs[i])
1528            goto err;
1529    }
1530
1531    if (has_cvq) {
1532        nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1533                                 vdpa_device_fd, i, 1, false,
1534                                 opts->x_svq, iova_range, features, errp);
1535        if (!nc)
1536            goto err;
1537    }
1538
1539    return 0;
1540
1541err:
1542    if (i) {
1543        for (i--; i >= 0; i--) {
1544            qemu_del_net_client(ncs[i]);
1545        }
1546    }
1547
1548    qemu_close(vdpa_device_fd);
1549
1550    return -1;
1551}
1552