linux/drivers/vdpa/vdpa_sim/vdpa_sim.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * VDPA networking device simulator.
   4 *
   5 * Copyright (c) 2020, Red Hat Inc. All rights reserved.
   6 *     Author: Jason Wang <jasowang@redhat.com>
   7 *
   8 */
   9
  10#include <linux/init.h>
  11#include <linux/module.h>
  12#include <linux/device.h>
  13#include <linux/kernel.h>
  14#include <linux/fs.h>
  15#include <linux/poll.h>
  16#include <linux/slab.h>
  17#include <linux/sched.h>
  18#include <linux/wait.h>
  19#include <linux/uuid.h>
  20#include <linux/iommu.h>
  21#include <linux/dma-mapping.h>
  22#include <linux/sysfs.h>
  23#include <linux/file.h>
  24#include <linux/etherdevice.h>
  25#include <linux/vringh.h>
  26#include <linux/vdpa.h>
  27#include <linux/virtio_byteorder.h>
  28#include <linux/vhost_iotlb.h>
  29#include <uapi/linux/virtio_config.h>
  30#include <uapi/linux/virtio_net.h>
  31
  32#define DRV_VERSION  "0.1"
  33#define DRV_AUTHOR   "Jason Wang <jasowang@redhat.com>"
  34#define DRV_DESC     "vDPA Device Simulator"
  35#define DRV_LICENSE  "GPL v2"
  36
  37static int batch_mapping = 1;
  38module_param(batch_mapping, int, 0444);
  39MODULE_PARM_DESC(batch_mapping, "Batched mapping 1 -Enable; 0 - Disable");
  40
  41struct vdpasim_virtqueue {
  42        struct vringh vring;
  43        struct vringh_kiov iov;
  44        unsigned short head;
  45        bool ready;
  46        u64 desc_addr;
  47        u64 device_addr;
  48        u64 driver_addr;
  49        u32 num;
  50        void *private;
  51        irqreturn_t (*cb)(void *data);
  52};
  53
  54#define VDPASIM_QUEUE_ALIGN PAGE_SIZE
  55#define VDPASIM_QUEUE_MAX 256
  56#define VDPASIM_DEVICE_ID 0x1
  57#define VDPASIM_VENDOR_ID 0
  58#define VDPASIM_VQ_NUM 0x2
  59#define VDPASIM_NAME "vdpasim-netdev"
  60
  61static u64 vdpasim_features = (1ULL << VIRTIO_F_ANY_LAYOUT) |
  62                              (1ULL << VIRTIO_F_VERSION_1)  |
  63                              (1ULL << VIRTIO_F_ACCESS_PLATFORM);
  64
  65/* State of each vdpasim device */
  66struct vdpasim {
  67        struct vdpa_device vdpa;
  68        struct vdpasim_virtqueue vqs[VDPASIM_VQ_NUM];
  69        struct work_struct work;
  70        /* spinlock to synchronize virtqueue state */
  71        spinlock_t lock;
  72        struct virtio_net_config config;
  73        struct vhost_iotlb *iommu;
  74        void *buffer;
  75        u32 status;
  76        u32 generation;
  77        u64 features;
  78        /* spinlock to synchronize iommu table */
  79        spinlock_t iommu_lock;
  80};
  81
  82/* TODO: cross-endian support */
  83static inline bool vdpasim_is_little_endian(struct vdpasim *vdpasim)
  84{
  85        return virtio_legacy_is_little_endian() ||
  86                (vdpasim->features & (1ULL << VIRTIO_F_VERSION_1));
  87}
  88
  89static inline u16 vdpasim16_to_cpu(struct vdpasim *vdpasim, __virtio16 val)
  90{
  91        return __virtio16_to_cpu(vdpasim_is_little_endian(vdpasim), val);
  92}
  93
  94static inline __virtio16 cpu_to_vdpasim16(struct vdpasim *vdpasim, u16 val)
  95{
  96        return __cpu_to_virtio16(vdpasim_is_little_endian(vdpasim), val);
  97}
  98
  99static struct vdpasim *vdpasim_dev;
 100
 101static struct vdpasim *vdpa_to_sim(struct vdpa_device *vdpa)
 102{
 103        return container_of(vdpa, struct vdpasim, vdpa);
 104}
 105
 106static struct vdpasim *dev_to_sim(struct device *dev)
 107{
 108        struct vdpa_device *vdpa = dev_to_vdpa(dev);
 109
 110        return vdpa_to_sim(vdpa);
 111}
 112
 113static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
 114{
 115        struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
 116
 117        vringh_init_iotlb(&vq->vring, vdpasim_features,
 118                          VDPASIM_QUEUE_MAX, false,
 119                          (struct vring_desc *)(uintptr_t)vq->desc_addr,
 120                          (struct vring_avail *)
 121                          (uintptr_t)vq->driver_addr,
 122                          (struct vring_used *)
 123                          (uintptr_t)vq->device_addr);
 124}
 125
 126static void vdpasim_vq_reset(struct vdpasim_virtqueue *vq)
 127{
 128        vq->ready = false;
 129        vq->desc_addr = 0;
 130        vq->driver_addr = 0;
 131        vq->device_addr = 0;
 132        vq->cb = NULL;
 133        vq->private = NULL;
 134        vringh_init_iotlb(&vq->vring, vdpasim_features, VDPASIM_QUEUE_MAX,
 135                          false, NULL, NULL, NULL);
 136}
 137
 138static void vdpasim_reset(struct vdpasim *vdpasim)
 139{
 140        int i;
 141
 142        for (i = 0; i < VDPASIM_VQ_NUM; i++)
 143                vdpasim_vq_reset(&vdpasim->vqs[i]);
 144
 145        spin_lock(&vdpasim->iommu_lock);
 146        vhost_iotlb_reset(vdpasim->iommu);
 147        spin_unlock(&vdpasim->iommu_lock);
 148
 149        vdpasim->features = 0;
 150        vdpasim->status = 0;
 151        ++vdpasim->generation;
 152}
 153
 154static void vdpasim_work(struct work_struct *work)
 155{
 156        struct vdpasim *vdpasim = container_of(work, struct
 157                                                 vdpasim, work);
 158        struct vdpasim_virtqueue *txq = &vdpasim->vqs[1];
 159        struct vdpasim_virtqueue *rxq = &vdpasim->vqs[0];
 160        ssize_t read, write;
 161        size_t total_write;
 162        int pkts = 0;
 163        int err;
 164
 165        spin_lock(&vdpasim->lock);
 166
 167        if (!(vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK))
 168                goto out;
 169
 170        if (!txq->ready || !rxq->ready)
 171                goto out;
 172
 173        while (true) {
 174                total_write = 0;
 175                err = vringh_getdesc_iotlb(&txq->vring, &txq->iov, NULL,
 176                                           &txq->head, GFP_ATOMIC);
 177                if (err <= 0)
 178                        break;
 179
 180                err = vringh_getdesc_iotlb(&rxq->vring, NULL, &rxq->iov,
 181                                           &rxq->head, GFP_ATOMIC);
 182                if (err <= 0) {
 183                        vringh_complete_iotlb(&txq->vring, txq->head, 0);
 184                        break;
 185                }
 186
 187                while (true) {
 188                        read = vringh_iov_pull_iotlb(&txq->vring, &txq->iov,
 189                                                     vdpasim->buffer,
 190                                                     PAGE_SIZE);
 191                        if (read <= 0)
 192                                break;
 193
 194                        write = vringh_iov_push_iotlb(&rxq->vring, &rxq->iov,
 195                                                      vdpasim->buffer, read);
 196                        if (write <= 0)
 197                                break;
 198
 199                        total_write += write;
 200                }
 201
 202                /* Make sure data is wrote before advancing index */
 203                smp_wmb();
 204
 205                vringh_complete_iotlb(&txq->vring, txq->head, 0);
 206                vringh_complete_iotlb(&rxq->vring, rxq->head, total_write);
 207
 208                /* Make sure used is visible before rasing the interrupt. */
 209                smp_wmb();
 210
 211                local_bh_disable();
 212                if (txq->cb)
 213                        txq->cb(txq->private);
 214                if (rxq->cb)
 215                        rxq->cb(rxq->private);
 216                local_bh_enable();
 217
 218                if (++pkts > 4) {
 219                        schedule_work(&vdpasim->work);
 220                        goto out;
 221                }
 222        }
 223
 224out:
 225        spin_unlock(&vdpasim->lock);
 226}
 227
 228static int dir_to_perm(enum dma_data_direction dir)
 229{
 230        int perm = -EFAULT;
 231
 232        switch (dir) {
 233        case DMA_FROM_DEVICE:
 234                perm = VHOST_MAP_WO;
 235                break;
 236        case DMA_TO_DEVICE:
 237                perm = VHOST_MAP_RO;
 238                break;
 239        case DMA_BIDIRECTIONAL:
 240                perm = VHOST_MAP_RW;
 241                break;
 242        default:
 243                break;
 244        }
 245
 246        return perm;
 247}
 248
 249static dma_addr_t vdpasim_map_page(struct device *dev, struct page *page,
 250                                   unsigned long offset, size_t size,
 251                                   enum dma_data_direction dir,
 252                                   unsigned long attrs)
 253{
 254        struct vdpasim *vdpasim = dev_to_sim(dev);
 255        struct vhost_iotlb *iommu = vdpasim->iommu;
 256        u64 pa = (page_to_pfn(page) << PAGE_SHIFT) + offset;
 257        int ret, perm = dir_to_perm(dir);
 258
 259        if (perm < 0)
 260                return DMA_MAPPING_ERROR;
 261
 262        /* For simplicity, use identical mapping to avoid e.g iova
 263         * allocator.
 264         */
 265        spin_lock(&vdpasim->iommu_lock);
 266        ret = vhost_iotlb_add_range(iommu, pa, pa + size - 1,
 267                                    pa, dir_to_perm(dir));
 268        spin_unlock(&vdpasim->iommu_lock);
 269        if (ret)
 270                return DMA_MAPPING_ERROR;
 271
 272        return (dma_addr_t)(pa);
 273}
 274
 275static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr,
 276                               size_t size, enum dma_data_direction dir,
 277                               unsigned long attrs)
 278{
 279        struct vdpasim *vdpasim = dev_to_sim(dev);
 280        struct vhost_iotlb *iommu = vdpasim->iommu;
 281
 282        spin_lock(&vdpasim->iommu_lock);
 283        vhost_iotlb_del_range(iommu, (u64)dma_addr,
 284                              (u64)dma_addr + size - 1);
 285        spin_unlock(&vdpasim->iommu_lock);
 286}
 287
 288static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
 289                                    dma_addr_t *dma_addr, gfp_t flag,
 290                                    unsigned long attrs)
 291{
 292        struct vdpasim *vdpasim = dev_to_sim(dev);
 293        struct vhost_iotlb *iommu = vdpasim->iommu;
 294        void *addr = kmalloc(size, flag);
 295        int ret;
 296
 297        spin_lock(&vdpasim->iommu_lock);
 298        if (!addr) {
 299                *dma_addr = DMA_MAPPING_ERROR;
 300        } else {
 301                u64 pa = virt_to_phys(addr);
 302
 303                ret = vhost_iotlb_add_range(iommu, (u64)pa,
 304                                            (u64)pa + size - 1,
 305                                            pa, VHOST_MAP_RW);
 306                if (ret) {
 307                        *dma_addr = DMA_MAPPING_ERROR;
 308                        kfree(addr);
 309                        addr = NULL;
 310                } else
 311                        *dma_addr = (dma_addr_t)pa;
 312        }
 313        spin_unlock(&vdpasim->iommu_lock);
 314
 315        return addr;
 316}
 317
 318static void vdpasim_free_coherent(struct device *dev, size_t size,
 319                                  void *vaddr, dma_addr_t dma_addr,
 320                                  unsigned long attrs)
 321{
 322        struct vdpasim *vdpasim = dev_to_sim(dev);
 323        struct vhost_iotlb *iommu = vdpasim->iommu;
 324
 325        spin_lock(&vdpasim->iommu_lock);
 326        vhost_iotlb_del_range(iommu, (u64)dma_addr,
 327                              (u64)dma_addr + size - 1);
 328        spin_unlock(&vdpasim->iommu_lock);
 329
 330        kfree(phys_to_virt((uintptr_t)dma_addr));
 331}
 332
 333static const struct dma_map_ops vdpasim_dma_ops = {
 334        .map_page = vdpasim_map_page,
 335        .unmap_page = vdpasim_unmap_page,
 336        .alloc = vdpasim_alloc_coherent,
 337        .free = vdpasim_free_coherent,
 338};
 339
 340static const struct vdpa_config_ops vdpasim_net_config_ops;
 341static const struct vdpa_config_ops vdpasim_net_batch_config_ops;
 342
 343static struct vdpasim *vdpasim_create(void)
 344{
 345        const struct vdpa_config_ops *ops;
 346        struct vdpasim *vdpasim;
 347        struct device *dev;
 348        int ret = -ENOMEM;
 349
 350        if (batch_mapping)
 351                ops = &vdpasim_net_batch_config_ops;
 352        else
 353                ops = &vdpasim_net_config_ops;
 354
 355        vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops, VDPASIM_VQ_NUM);
 356        if (!vdpasim)
 357                goto err_alloc;
 358
 359        INIT_WORK(&vdpasim->work, vdpasim_work);
 360        spin_lock_init(&vdpasim->lock);
 361        spin_lock_init(&vdpasim->iommu_lock);
 362
 363        dev = &vdpasim->vdpa.dev;
 364        dev->coherent_dma_mask = DMA_BIT_MASK(64);
 365        set_dma_ops(dev, &vdpasim_dma_ops);
 366
 367        vdpasim->iommu = vhost_iotlb_alloc(2048, 0);
 368        if (!vdpasim->iommu)
 369                goto err_iommu;
 370
 371        vdpasim->buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
 372        if (!vdpasim->buffer)
 373                goto err_iommu;
 374
 375        eth_random_addr(vdpasim->config.mac);
 376
 377        vringh_set_iotlb(&vdpasim->vqs[0].vring, vdpasim->iommu);
 378        vringh_set_iotlb(&vdpasim->vqs[1].vring, vdpasim->iommu);
 379
 380        vdpasim->vdpa.dma_dev = dev;
 381        ret = vdpa_register_device(&vdpasim->vdpa);
 382        if (ret)
 383                goto err_iommu;
 384
 385        return vdpasim;
 386
 387err_iommu:
 388        put_device(dev);
 389err_alloc:
 390        return ERR_PTR(ret);
 391}
 392
 393static int vdpasim_set_vq_address(struct vdpa_device *vdpa, u16 idx,
 394                                  u64 desc_area, u64 driver_area,
 395                                  u64 device_area)
 396{
 397        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 398        struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
 399
 400        vq->desc_addr = desc_area;
 401        vq->driver_addr = driver_area;
 402        vq->device_addr = device_area;
 403
 404        return 0;
 405}
 406
 407static void vdpasim_set_vq_num(struct vdpa_device *vdpa, u16 idx, u32 num)
 408{
 409        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 410        struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
 411
 412        vq->num = num;
 413}
 414
 415static void vdpasim_kick_vq(struct vdpa_device *vdpa, u16 idx)
 416{
 417        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 418        struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
 419
 420        if (vq->ready)
 421                schedule_work(&vdpasim->work);
 422}
 423
 424static void vdpasim_set_vq_cb(struct vdpa_device *vdpa, u16 idx,
 425                              struct vdpa_callback *cb)
 426{
 427        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 428        struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
 429
 430        vq->cb = cb->callback;
 431        vq->private = cb->private;
 432}
 433
 434static void vdpasim_set_vq_ready(struct vdpa_device *vdpa, u16 idx, bool ready)
 435{
 436        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 437        struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
 438
 439        spin_lock(&vdpasim->lock);
 440        vq->ready = ready;
 441        if (vq->ready)
 442                vdpasim_queue_ready(vdpasim, idx);
 443        spin_unlock(&vdpasim->lock);
 444}
 445
 446static bool vdpasim_get_vq_ready(struct vdpa_device *vdpa, u16 idx)
 447{
 448        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 449        struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
 450
 451        return vq->ready;
 452}
 453
 454static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx,
 455                                const struct vdpa_vq_state *state)
 456{
 457        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 458        struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
 459        struct vringh *vrh = &vq->vring;
 460
 461        spin_lock(&vdpasim->lock);
 462        vrh->last_avail_idx = state->avail_index;
 463        spin_unlock(&vdpasim->lock);
 464
 465        return 0;
 466}
 467
 468static int vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx,
 469                                struct vdpa_vq_state *state)
 470{
 471        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 472        struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
 473        struct vringh *vrh = &vq->vring;
 474
 475        state->avail_index = vrh->last_avail_idx;
 476        return 0;
 477}
 478
 479static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa)
 480{
 481        return VDPASIM_QUEUE_ALIGN;
 482}
 483
 484static u64 vdpasim_get_features(struct vdpa_device *vdpa)
 485{
 486        return vdpasim_features;
 487}
 488
 489static int vdpasim_set_features(struct vdpa_device *vdpa, u64 features)
 490{
 491        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 492        struct virtio_net_config *config = &vdpasim->config;
 493
 494        /* DMA mapping must be done by driver */
 495        if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM)))
 496                return -EINVAL;
 497
 498        vdpasim->features = features & vdpasim_features;
 499
 500        /* We generally only know whether guest is using the legacy interface
 501         * here, so generally that's the earliest we can set config fields.
 502         * Note: We actually require VIRTIO_F_ACCESS_PLATFORM above which
 503         * implies VIRTIO_F_VERSION_1, but let's not try to be clever here.
 504         */
 505
 506        config->mtu = cpu_to_vdpasim16(vdpasim, 1500);
 507        config->status = cpu_to_vdpasim16(vdpasim, VIRTIO_NET_S_LINK_UP);
 508        return 0;
 509}
 510
 511static void vdpasim_set_config_cb(struct vdpa_device *vdpa,
 512                                  struct vdpa_callback *cb)
 513{
 514        /* We don't support config interrupt */
 515}
 516
 517static u16 vdpasim_get_vq_num_max(struct vdpa_device *vdpa)
 518{
 519        return VDPASIM_QUEUE_MAX;
 520}
 521
 522static u32 vdpasim_get_device_id(struct vdpa_device *vdpa)
 523{
 524        return VDPASIM_DEVICE_ID;
 525}
 526
 527static u32 vdpasim_get_vendor_id(struct vdpa_device *vdpa)
 528{
 529        return VDPASIM_VENDOR_ID;
 530}
 531
 532static u8 vdpasim_get_status(struct vdpa_device *vdpa)
 533{
 534        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 535        u8 status;
 536
 537        spin_lock(&vdpasim->lock);
 538        status = vdpasim->status;
 539        spin_unlock(&vdpasim->lock);
 540
 541        return status;
 542}
 543
 544static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status)
 545{
 546        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 547
 548        spin_lock(&vdpasim->lock);
 549        vdpasim->status = status;
 550        if (status == 0)
 551                vdpasim_reset(vdpasim);
 552        spin_unlock(&vdpasim->lock);
 553}
 554
 555static void vdpasim_get_config(struct vdpa_device *vdpa, unsigned int offset,
 556                             void *buf, unsigned int len)
 557{
 558        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 559
 560        if (offset + len < sizeof(struct virtio_net_config))
 561                memcpy(buf, (u8 *)&vdpasim->config + offset, len);
 562}
 563
 564static void vdpasim_set_config(struct vdpa_device *vdpa, unsigned int offset,
 565                             const void *buf, unsigned int len)
 566{
 567        /* No writable config supportted by vdpasim */
 568}
 569
 570static u32 vdpasim_get_generation(struct vdpa_device *vdpa)
 571{
 572        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 573
 574        return vdpasim->generation;
 575}
 576
 577static int vdpasim_set_map(struct vdpa_device *vdpa,
 578                           struct vhost_iotlb *iotlb)
 579{
 580        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 581        struct vhost_iotlb_map *map;
 582        u64 start = 0ULL, last = 0ULL - 1;
 583        int ret;
 584
 585        spin_lock(&vdpasim->iommu_lock);
 586        vhost_iotlb_reset(vdpasim->iommu);
 587
 588        for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
 589             map = vhost_iotlb_itree_next(map, start, last)) {
 590                ret = vhost_iotlb_add_range(vdpasim->iommu, map->start,
 591                                            map->last, map->addr, map->perm);
 592                if (ret)
 593                        goto err;
 594        }
 595        spin_unlock(&vdpasim->iommu_lock);
 596        return 0;
 597
 598err:
 599        vhost_iotlb_reset(vdpasim->iommu);
 600        spin_unlock(&vdpasim->iommu_lock);
 601        return ret;
 602}
 603
 604static int vdpasim_dma_map(struct vdpa_device *vdpa, u64 iova, u64 size,
 605                           u64 pa, u32 perm)
 606{
 607        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 608        int ret;
 609
 610        spin_lock(&vdpasim->iommu_lock);
 611        ret = vhost_iotlb_add_range(vdpasim->iommu, iova, iova + size - 1, pa,
 612                                    perm);
 613        spin_unlock(&vdpasim->iommu_lock);
 614
 615        return ret;
 616}
 617
 618static int vdpasim_dma_unmap(struct vdpa_device *vdpa, u64 iova, u64 size)
 619{
 620        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 621
 622        spin_lock(&vdpasim->iommu_lock);
 623        vhost_iotlb_del_range(vdpasim->iommu, iova, iova + size - 1);
 624        spin_unlock(&vdpasim->iommu_lock);
 625
 626        return 0;
 627}
 628
 629static void vdpasim_free(struct vdpa_device *vdpa)
 630{
 631        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 632
 633        cancel_work_sync(&vdpasim->work);
 634        kfree(vdpasim->buffer);
 635        if (vdpasim->iommu)
 636                vhost_iotlb_free(vdpasim->iommu);
 637}
 638
 639static const struct vdpa_config_ops vdpasim_net_config_ops = {
 640        .set_vq_address         = vdpasim_set_vq_address,
 641        .set_vq_num             = vdpasim_set_vq_num,
 642        .kick_vq                = vdpasim_kick_vq,
 643        .set_vq_cb              = vdpasim_set_vq_cb,
 644        .set_vq_ready           = vdpasim_set_vq_ready,
 645        .get_vq_ready           = vdpasim_get_vq_ready,
 646        .set_vq_state           = vdpasim_set_vq_state,
 647        .get_vq_state           = vdpasim_get_vq_state,
 648        .get_vq_align           = vdpasim_get_vq_align,
 649        .get_features           = vdpasim_get_features,
 650        .set_features           = vdpasim_set_features,
 651        .set_config_cb          = vdpasim_set_config_cb,
 652        .get_vq_num_max         = vdpasim_get_vq_num_max,
 653        .get_device_id          = vdpasim_get_device_id,
 654        .get_vendor_id          = vdpasim_get_vendor_id,
 655        .get_status             = vdpasim_get_status,
 656        .set_status             = vdpasim_set_status,
 657        .get_config             = vdpasim_get_config,
 658        .set_config             = vdpasim_set_config,
 659        .get_generation         = vdpasim_get_generation,
 660        .dma_map                = vdpasim_dma_map,
 661        .dma_unmap              = vdpasim_dma_unmap,
 662        .free                   = vdpasim_free,
 663};
 664
 665static const struct vdpa_config_ops vdpasim_net_batch_config_ops = {
 666        .set_vq_address         = vdpasim_set_vq_address,
 667        .set_vq_num             = vdpasim_set_vq_num,
 668        .kick_vq                = vdpasim_kick_vq,
 669        .set_vq_cb              = vdpasim_set_vq_cb,
 670        .set_vq_ready           = vdpasim_set_vq_ready,
 671        .get_vq_ready           = vdpasim_get_vq_ready,
 672        .set_vq_state           = vdpasim_set_vq_state,
 673        .get_vq_state           = vdpasim_get_vq_state,
 674        .get_vq_align           = vdpasim_get_vq_align,
 675        .get_features           = vdpasim_get_features,
 676        .set_features           = vdpasim_set_features,
 677        .set_config_cb          = vdpasim_set_config_cb,
 678        .get_vq_num_max         = vdpasim_get_vq_num_max,
 679        .get_device_id          = vdpasim_get_device_id,
 680        .get_vendor_id          = vdpasim_get_vendor_id,
 681        .get_status             = vdpasim_get_status,
 682        .set_status             = vdpasim_set_status,
 683        .get_config             = vdpasim_get_config,
 684        .set_config             = vdpasim_set_config,
 685        .get_generation         = vdpasim_get_generation,
 686        .set_map                = vdpasim_set_map,
 687        .free                   = vdpasim_free,
 688};
 689
 690static int __init vdpasim_dev_init(void)
 691{
 692        vdpasim_dev = vdpasim_create();
 693
 694        if (!IS_ERR(vdpasim_dev))
 695                return 0;
 696
 697        return PTR_ERR(vdpasim_dev);
 698}
 699
 700static void __exit vdpasim_dev_exit(void)
 701{
 702        struct vdpa_device *vdpa = &vdpasim_dev->vdpa;
 703
 704        vdpa_unregister_device(vdpa);
 705}
 706
 707module_init(vdpasim_dev_init)
 708module_exit(vdpasim_dev_exit)
 709
 710MODULE_VERSION(DRV_VERSION);
 711MODULE_LICENSE(DRV_LICENSE);
 712MODULE_AUTHOR(DRV_AUTHOR);
 713MODULE_DESCRIPTION(DRV_DESC);
 714