linux/drivers/vdpa/vdpa_sim/vdpa_sim.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * VDPA device simulator core.
   4 *
   5 * Copyright (c) 2020, Red Hat Inc. All rights reserved.
   6 *     Author: Jason Wang <jasowang@redhat.com>
   7 *
   8 */
   9
  10#include <linux/init.h>
  11#include <linux/module.h>
  12#include <linux/device.h>
  13#include <linux/kernel.h>
  14#include <linux/slab.h>
  15#include <linux/sched.h>
  16#include <linux/dma-map-ops.h>
  17#include <linux/vringh.h>
  18#include <linux/vdpa.h>
  19#include <linux/vhost_iotlb.h>
  20
  21#include "vdpa_sim.h"
  22
  23#define DRV_VERSION  "0.1"
  24#define DRV_AUTHOR   "Jason Wang <jasowang@redhat.com>"
  25#define DRV_DESC     "vDPA Device Simulator core"
  26#define DRV_LICENSE  "GPL v2"
  27
  28static int batch_mapping = 1;
  29module_param(batch_mapping, int, 0444);
  30MODULE_PARM_DESC(batch_mapping, "Batched mapping 1 -Enable; 0 - Disable");
  31
  32static int max_iotlb_entries = 2048;
  33module_param(max_iotlb_entries, int, 0444);
  34MODULE_PARM_DESC(max_iotlb_entries,
  35                 "Maximum number of iotlb entries. 0 means unlimited. (default: 2048)");
  36
  37#define VDPASIM_QUEUE_ALIGN PAGE_SIZE
  38#define VDPASIM_QUEUE_MAX 256
  39#define VDPASIM_VENDOR_ID 0
  40
  41static struct vdpasim *vdpa_to_sim(struct vdpa_device *vdpa)
  42{
  43        return container_of(vdpa, struct vdpasim, vdpa);
  44}
  45
  46static struct vdpasim *dev_to_sim(struct device *dev)
  47{
  48        struct vdpa_device *vdpa = dev_to_vdpa(dev);
  49
  50        return vdpa_to_sim(vdpa);
  51}
  52
  53static void vdpasim_vq_notify(struct vringh *vring)
  54{
  55        struct vdpasim_virtqueue *vq =
  56                container_of(vring, struct vdpasim_virtqueue, vring);
  57
  58        if (!vq->cb)
  59                return;
  60
  61        vq->cb(vq->private);
  62}
  63
  64static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
  65{
  66        struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
  67
  68        vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features,
  69                          VDPASIM_QUEUE_MAX, false,
  70                          (struct vring_desc *)(uintptr_t)vq->desc_addr,
  71                          (struct vring_avail *)
  72                          (uintptr_t)vq->driver_addr,
  73                          (struct vring_used *)
  74                          (uintptr_t)vq->device_addr);
  75
  76        vq->vring.notify = vdpasim_vq_notify;
  77}
  78
  79static void vdpasim_vq_reset(struct vdpasim *vdpasim,
  80                             struct vdpasim_virtqueue *vq)
  81{
  82        vq->ready = false;
  83        vq->desc_addr = 0;
  84        vq->driver_addr = 0;
  85        vq->device_addr = 0;
  86        vq->cb = NULL;
  87        vq->private = NULL;
  88        vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features,
  89                          VDPASIM_QUEUE_MAX, false, NULL, NULL, NULL);
  90
  91        vq->vring.notify = NULL;
  92}
  93
  94static void vdpasim_reset(struct vdpasim *vdpasim)
  95{
  96        int i;
  97
  98        for (i = 0; i < vdpasim->dev_attr.nvqs; i++)
  99                vdpasim_vq_reset(vdpasim, &vdpasim->vqs[i]);
 100
 101        spin_lock(&vdpasim->iommu_lock);
 102        vhost_iotlb_reset(vdpasim->iommu);
 103        spin_unlock(&vdpasim->iommu_lock);
 104
 105        vdpasim->features = 0;
 106        vdpasim->status = 0;
 107        ++vdpasim->generation;
 108}
 109
 110static int dir_to_perm(enum dma_data_direction dir)
 111{
 112        int perm = -EFAULT;
 113
 114        switch (dir) {
 115        case DMA_FROM_DEVICE:
 116                perm = VHOST_MAP_WO;
 117                break;
 118        case DMA_TO_DEVICE:
 119                perm = VHOST_MAP_RO;
 120                break;
 121        case DMA_BIDIRECTIONAL:
 122                perm = VHOST_MAP_RW;
 123                break;
 124        default:
 125                break;
 126        }
 127
 128        return perm;
 129}
 130
 131static dma_addr_t vdpasim_map_page(struct device *dev, struct page *page,
 132                                   unsigned long offset, size_t size,
 133                                   enum dma_data_direction dir,
 134                                   unsigned long attrs)
 135{
 136        struct vdpasim *vdpasim = dev_to_sim(dev);
 137        struct vhost_iotlb *iommu = vdpasim->iommu;
 138        u64 pa = (page_to_pfn(page) << PAGE_SHIFT) + offset;
 139        int ret, perm = dir_to_perm(dir);
 140
 141        if (perm < 0)
 142                return DMA_MAPPING_ERROR;
 143
 144        /* For simplicity, use identical mapping to avoid e.g iova
 145         * allocator.
 146         */
 147        spin_lock(&vdpasim->iommu_lock);
 148        ret = vhost_iotlb_add_range(iommu, pa, pa + size - 1,
 149                                    pa, dir_to_perm(dir));
 150        spin_unlock(&vdpasim->iommu_lock);
 151        if (ret)
 152                return DMA_MAPPING_ERROR;
 153
 154        return (dma_addr_t)(pa);
 155}
 156
 157static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr,
 158                               size_t size, enum dma_data_direction dir,
 159                               unsigned long attrs)
 160{
 161        struct vdpasim *vdpasim = dev_to_sim(dev);
 162        struct vhost_iotlb *iommu = vdpasim->iommu;
 163
 164        spin_lock(&vdpasim->iommu_lock);
 165        vhost_iotlb_del_range(iommu, (u64)dma_addr,
 166                              (u64)dma_addr + size - 1);
 167        spin_unlock(&vdpasim->iommu_lock);
 168}
 169
 170static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
 171                                    dma_addr_t *dma_addr, gfp_t flag,
 172                                    unsigned long attrs)
 173{
 174        struct vdpasim *vdpasim = dev_to_sim(dev);
 175        struct vhost_iotlb *iommu = vdpasim->iommu;
 176        void *addr = kmalloc(size, flag);
 177        int ret;
 178
 179        spin_lock(&vdpasim->iommu_lock);
 180        if (!addr) {
 181                *dma_addr = DMA_MAPPING_ERROR;
 182        } else {
 183                u64 pa = virt_to_phys(addr);
 184
 185                ret = vhost_iotlb_add_range(iommu, (u64)pa,
 186                                            (u64)pa + size - 1,
 187                                            pa, VHOST_MAP_RW);
 188                if (ret) {
 189                        *dma_addr = DMA_MAPPING_ERROR;
 190                        kfree(addr);
 191                        addr = NULL;
 192                } else
 193                        *dma_addr = (dma_addr_t)pa;
 194        }
 195        spin_unlock(&vdpasim->iommu_lock);
 196
 197        return addr;
 198}
 199
 200static void vdpasim_free_coherent(struct device *dev, size_t size,
 201                                  void *vaddr, dma_addr_t dma_addr,
 202                                  unsigned long attrs)
 203{
 204        struct vdpasim *vdpasim = dev_to_sim(dev);
 205        struct vhost_iotlb *iommu = vdpasim->iommu;
 206
 207        spin_lock(&vdpasim->iommu_lock);
 208        vhost_iotlb_del_range(iommu, (u64)dma_addr,
 209                              (u64)dma_addr + size - 1);
 210        spin_unlock(&vdpasim->iommu_lock);
 211
 212        kfree(phys_to_virt((uintptr_t)dma_addr));
 213}
 214
 215static const struct dma_map_ops vdpasim_dma_ops = {
 216        .map_page = vdpasim_map_page,
 217        .unmap_page = vdpasim_unmap_page,
 218        .alloc = vdpasim_alloc_coherent,
 219        .free = vdpasim_free_coherent,
 220};
 221
 222static const struct vdpa_config_ops vdpasim_config_ops;
 223static const struct vdpa_config_ops vdpasim_batch_config_ops;
 224
 225struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
 226{
 227        const struct vdpa_config_ops *ops;
 228        struct vdpasim *vdpasim;
 229        struct device *dev;
 230        int i, ret = -ENOMEM;
 231
 232        if (batch_mapping)
 233                ops = &vdpasim_batch_config_ops;
 234        else
 235                ops = &vdpasim_config_ops;
 236
 237        vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops,
 238                                    dev_attr->nvqs);
 239        if (!vdpasim)
 240                goto err_alloc;
 241
 242        vdpasim->dev_attr = *dev_attr;
 243        INIT_WORK(&vdpasim->work, dev_attr->work_fn);
 244        spin_lock_init(&vdpasim->lock);
 245        spin_lock_init(&vdpasim->iommu_lock);
 246
 247        dev = &vdpasim->vdpa.dev;
 248        dev->dma_mask = &dev->coherent_dma_mask;
 249        if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
 250                goto err_iommu;
 251        set_dma_ops(dev, &vdpasim_dma_ops);
 252
 253        vdpasim->config = kzalloc(dev_attr->config_size, GFP_KERNEL);
 254        if (!vdpasim->config)
 255                goto err_iommu;
 256
 257        vdpasim->vqs = kcalloc(dev_attr->nvqs, sizeof(struct vdpasim_virtqueue),
 258                               GFP_KERNEL);
 259        if (!vdpasim->vqs)
 260                goto err_iommu;
 261
 262        vdpasim->iommu = vhost_iotlb_alloc(max_iotlb_entries, 0);
 263        if (!vdpasim->iommu)
 264                goto err_iommu;
 265
 266        vdpasim->buffer = kvmalloc(dev_attr->buffer_size, GFP_KERNEL);
 267        if (!vdpasim->buffer)
 268                goto err_iommu;
 269
 270        for (i = 0; i < dev_attr->nvqs; i++)
 271                vringh_set_iotlb(&vdpasim->vqs[i].vring, vdpasim->iommu);
 272
 273        vdpasim->vdpa.dma_dev = dev;
 274
 275        return vdpasim;
 276
 277err_iommu:
 278        put_device(dev);
 279err_alloc:
 280        return ERR_PTR(ret);
 281}
 282EXPORT_SYMBOL_GPL(vdpasim_create);
 283
 284static int vdpasim_set_vq_address(struct vdpa_device *vdpa, u16 idx,
 285                                  u64 desc_area, u64 driver_area,
 286                                  u64 device_area)
 287{
 288        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 289        struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
 290
 291        vq->desc_addr = desc_area;
 292        vq->driver_addr = driver_area;
 293        vq->device_addr = device_area;
 294
 295        return 0;
 296}
 297
 298static void vdpasim_set_vq_num(struct vdpa_device *vdpa, u16 idx, u32 num)
 299{
 300        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 301        struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
 302
 303        vq->num = num;
 304}
 305
 306static void vdpasim_kick_vq(struct vdpa_device *vdpa, u16 idx)
 307{
 308        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 309        struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
 310
 311        if (vq->ready)
 312                schedule_work(&vdpasim->work);
 313}
 314
 315static void vdpasim_set_vq_cb(struct vdpa_device *vdpa, u16 idx,
 316                              struct vdpa_callback *cb)
 317{
 318        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 319        struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
 320
 321        vq->cb = cb->callback;
 322        vq->private = cb->private;
 323}
 324
 325static void vdpasim_set_vq_ready(struct vdpa_device *vdpa, u16 idx, bool ready)
 326{
 327        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 328        struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
 329
 330        spin_lock(&vdpasim->lock);
 331        vq->ready = ready;
 332        if (vq->ready)
 333                vdpasim_queue_ready(vdpasim, idx);
 334        spin_unlock(&vdpasim->lock);
 335}
 336
 337static bool vdpasim_get_vq_ready(struct vdpa_device *vdpa, u16 idx)
 338{
 339        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 340        struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
 341
 342        return vq->ready;
 343}
 344
 345static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx,
 346                                const struct vdpa_vq_state *state)
 347{
 348        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 349        struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
 350        struct vringh *vrh = &vq->vring;
 351
 352        spin_lock(&vdpasim->lock);
 353        vrh->last_avail_idx = state->avail_index;
 354        spin_unlock(&vdpasim->lock);
 355
 356        return 0;
 357}
 358
 359static int vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx,
 360                                struct vdpa_vq_state *state)
 361{
 362        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 363        struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
 364        struct vringh *vrh = &vq->vring;
 365
 366        state->avail_index = vrh->last_avail_idx;
 367        return 0;
 368}
 369
 370static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa)
 371{
 372        return VDPASIM_QUEUE_ALIGN;
 373}
 374
 375static u64 vdpasim_get_features(struct vdpa_device *vdpa)
 376{
 377        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 378
 379        return vdpasim->dev_attr.supported_features;
 380}
 381
 382static int vdpasim_set_features(struct vdpa_device *vdpa, u64 features)
 383{
 384        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 385
 386        /* DMA mapping must be done by driver */
 387        if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM)))
 388                return -EINVAL;
 389
 390        vdpasim->features = features & vdpasim->dev_attr.supported_features;
 391
 392        return 0;
 393}
 394
 395static void vdpasim_set_config_cb(struct vdpa_device *vdpa,
 396                                  struct vdpa_callback *cb)
 397{
 398        /* We don't support config interrupt */
 399}
 400
 401static u16 vdpasim_get_vq_num_max(struct vdpa_device *vdpa)
 402{
 403        return VDPASIM_QUEUE_MAX;
 404}
 405
 406static u32 vdpasim_get_device_id(struct vdpa_device *vdpa)
 407{
 408        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 409
 410        return vdpasim->dev_attr.id;
 411}
 412
 413static u32 vdpasim_get_vendor_id(struct vdpa_device *vdpa)
 414{
 415        return VDPASIM_VENDOR_ID;
 416}
 417
 418static u8 vdpasim_get_status(struct vdpa_device *vdpa)
 419{
 420        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 421        u8 status;
 422
 423        spin_lock(&vdpasim->lock);
 424        status = vdpasim->status;
 425        spin_unlock(&vdpasim->lock);
 426
 427        return status;
 428}
 429
 430static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status)
 431{
 432        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 433
 434        spin_lock(&vdpasim->lock);
 435        vdpasim->status = status;
 436        if (status == 0)
 437                vdpasim_reset(vdpasim);
 438        spin_unlock(&vdpasim->lock);
 439}
 440
 441static void vdpasim_get_config(struct vdpa_device *vdpa, unsigned int offset,
 442                             void *buf, unsigned int len)
 443{
 444        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 445
 446        if (offset + len > vdpasim->dev_attr.config_size)
 447                return;
 448
 449        if (vdpasim->dev_attr.get_config)
 450                vdpasim->dev_attr.get_config(vdpasim, vdpasim->config);
 451
 452        memcpy(buf, vdpasim->config + offset, len);
 453}
 454
 455static void vdpasim_set_config(struct vdpa_device *vdpa, unsigned int offset,
 456                             const void *buf, unsigned int len)
 457{
 458        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 459
 460        if (offset + len > vdpasim->dev_attr.config_size)
 461                return;
 462
 463        memcpy(vdpasim->config + offset, buf, len);
 464
 465        if (vdpasim->dev_attr.set_config)
 466                vdpasim->dev_attr.set_config(vdpasim, vdpasim->config);
 467}
 468
 469static u32 vdpasim_get_generation(struct vdpa_device *vdpa)
 470{
 471        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 472
 473        return vdpasim->generation;
 474}
 475
 476static struct vdpa_iova_range vdpasim_get_iova_range(struct vdpa_device *vdpa)
 477{
 478        struct vdpa_iova_range range = {
 479                .first = 0ULL,
 480                .last = ULLONG_MAX,
 481        };
 482
 483        return range;
 484}
 485
 486static int vdpasim_set_map(struct vdpa_device *vdpa,
 487                           struct vhost_iotlb *iotlb)
 488{
 489        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 490        struct vhost_iotlb_map *map;
 491        u64 start = 0ULL, last = 0ULL - 1;
 492        int ret;
 493
 494        spin_lock(&vdpasim->iommu_lock);
 495        vhost_iotlb_reset(vdpasim->iommu);
 496
 497        for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
 498             map = vhost_iotlb_itree_next(map, start, last)) {
 499                ret = vhost_iotlb_add_range(vdpasim->iommu, map->start,
 500                                            map->last, map->addr, map->perm);
 501                if (ret)
 502                        goto err;
 503        }
 504        spin_unlock(&vdpasim->iommu_lock);
 505        return 0;
 506
 507err:
 508        vhost_iotlb_reset(vdpasim->iommu);
 509        spin_unlock(&vdpasim->iommu_lock);
 510        return ret;
 511}
 512
 513static int vdpasim_dma_map(struct vdpa_device *vdpa, u64 iova, u64 size,
 514                           u64 pa, u32 perm)
 515{
 516        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 517        int ret;
 518
 519        spin_lock(&vdpasim->iommu_lock);
 520        ret = vhost_iotlb_add_range(vdpasim->iommu, iova, iova + size - 1, pa,
 521                                    perm);
 522        spin_unlock(&vdpasim->iommu_lock);
 523
 524        return ret;
 525}
 526
 527static int vdpasim_dma_unmap(struct vdpa_device *vdpa, u64 iova, u64 size)
 528{
 529        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 530
 531        spin_lock(&vdpasim->iommu_lock);
 532        vhost_iotlb_del_range(vdpasim->iommu, iova, iova + size - 1);
 533        spin_unlock(&vdpasim->iommu_lock);
 534
 535        return 0;
 536}
 537
 538static void vdpasim_free(struct vdpa_device *vdpa)
 539{
 540        struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
 541
 542        cancel_work_sync(&vdpasim->work);
 543        kvfree(vdpasim->buffer);
 544        if (vdpasim->iommu)
 545                vhost_iotlb_free(vdpasim->iommu);
 546        kfree(vdpasim->vqs);
 547        kfree(vdpasim->config);
 548}
 549
 550static const struct vdpa_config_ops vdpasim_config_ops = {
 551        .set_vq_address         = vdpasim_set_vq_address,
 552        .set_vq_num             = vdpasim_set_vq_num,
 553        .kick_vq                = vdpasim_kick_vq,
 554        .set_vq_cb              = vdpasim_set_vq_cb,
 555        .set_vq_ready           = vdpasim_set_vq_ready,
 556        .get_vq_ready           = vdpasim_get_vq_ready,
 557        .set_vq_state           = vdpasim_set_vq_state,
 558        .get_vq_state           = vdpasim_get_vq_state,
 559        .get_vq_align           = vdpasim_get_vq_align,
 560        .get_features           = vdpasim_get_features,
 561        .set_features           = vdpasim_set_features,
 562        .set_config_cb          = vdpasim_set_config_cb,
 563        .get_vq_num_max         = vdpasim_get_vq_num_max,
 564        .get_device_id          = vdpasim_get_device_id,
 565        .get_vendor_id          = vdpasim_get_vendor_id,
 566        .get_status             = vdpasim_get_status,
 567        .set_status             = vdpasim_set_status,
 568        .get_config             = vdpasim_get_config,
 569        .set_config             = vdpasim_set_config,
 570        .get_generation         = vdpasim_get_generation,
 571        .get_iova_range         = vdpasim_get_iova_range,
 572        .dma_map                = vdpasim_dma_map,
 573        .dma_unmap              = vdpasim_dma_unmap,
 574        .free                   = vdpasim_free,
 575};
 576
 577static const struct vdpa_config_ops vdpasim_batch_config_ops = {
 578        .set_vq_address         = vdpasim_set_vq_address,
 579        .set_vq_num             = vdpasim_set_vq_num,
 580        .kick_vq                = vdpasim_kick_vq,
 581        .set_vq_cb              = vdpasim_set_vq_cb,
 582        .set_vq_ready           = vdpasim_set_vq_ready,
 583        .get_vq_ready           = vdpasim_get_vq_ready,
 584        .set_vq_state           = vdpasim_set_vq_state,
 585        .get_vq_state           = vdpasim_get_vq_state,
 586        .get_vq_align           = vdpasim_get_vq_align,
 587        .get_features           = vdpasim_get_features,
 588        .set_features           = vdpasim_set_features,
 589        .set_config_cb          = vdpasim_set_config_cb,
 590        .get_vq_num_max         = vdpasim_get_vq_num_max,
 591        .get_device_id          = vdpasim_get_device_id,
 592        .get_vendor_id          = vdpasim_get_vendor_id,
 593        .get_status             = vdpasim_get_status,
 594        .set_status             = vdpasim_set_status,
 595        .get_config             = vdpasim_get_config,
 596        .set_config             = vdpasim_set_config,
 597        .get_generation         = vdpasim_get_generation,
 598        .get_iova_range         = vdpasim_get_iova_range,
 599        .set_map                = vdpasim_set_map,
 600        .free                   = vdpasim_free,
 601};
 602
 603MODULE_VERSION(DRV_VERSION);
 604MODULE_LICENSE(DRV_LICENSE);
 605MODULE_AUTHOR(DRV_AUTHOR);
 606MODULE_DESCRIPTION(DRV_DESC);
 607