linux/drivers/virtio/virtio_vdpa.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * VIRTIO based driver for vDPA device
   4 *
   5 * Copyright (c) 2020, Red Hat. All rights reserved.
   6 *     Author: Jason Wang <jasowang@redhat.com>
   7 *
   8 */
   9
  10#include <linux/init.h>
  11#include <linux/module.h>
  12#include <linux/device.h>
  13#include <linux/kernel.h>
  14#include <linux/slab.h>
  15#include <linux/uuid.h>
  16#include <linux/virtio.h>
  17#include <linux/vdpa.h>
  18#include <linux/virtio_config.h>
  19#include <linux/virtio_ring.h>
  20
  21#define MOD_VERSION  "0.1"
  22#define MOD_AUTHOR   "Jason Wang <jasowang@redhat.com>"
  23#define MOD_DESC     "vDPA bus driver for virtio devices"
  24#define MOD_LICENSE  "GPL v2"
  25
  26struct virtio_vdpa_device {
  27        struct virtio_device vdev;
  28        struct vdpa_device *vdpa;
  29        u64 features;
  30
  31        /* The lock to protect virtqueue list */
  32        spinlock_t lock;
  33        /* List of virtio_vdpa_vq_info */
  34        struct list_head virtqueues;
  35};
  36
  37struct virtio_vdpa_vq_info {
  38        /* the actual virtqueue */
  39        struct virtqueue *vq;
  40
  41        /* the list node for the virtqueues list */
  42        struct list_head node;
  43};
  44
  45static inline struct virtio_vdpa_device *
  46to_virtio_vdpa_device(struct virtio_device *dev)
  47{
  48        return container_of(dev, struct virtio_vdpa_device, vdev);
  49}
  50
  51static struct vdpa_device *vd_get_vdpa(struct virtio_device *vdev)
  52{
  53        return to_virtio_vdpa_device(vdev)->vdpa;
  54}
  55
  56static void virtio_vdpa_get(struct virtio_device *vdev, unsigned offset,
  57                            void *buf, unsigned len)
  58{
  59        struct vdpa_device *vdpa = vd_get_vdpa(vdev);
  60
  61        vdpa_get_config(vdpa, offset, buf, len);
  62}
  63
  64static void virtio_vdpa_set(struct virtio_device *vdev, unsigned offset,
  65                            const void *buf, unsigned len)
  66{
  67        struct vdpa_device *vdpa = vd_get_vdpa(vdev);
  68        const struct vdpa_config_ops *ops = vdpa->config;
  69
  70        ops->set_config(vdpa, offset, buf, len);
  71}
  72
  73static u32 virtio_vdpa_generation(struct virtio_device *vdev)
  74{
  75        struct vdpa_device *vdpa = vd_get_vdpa(vdev);
  76        const struct vdpa_config_ops *ops = vdpa->config;
  77
  78        if (ops->get_generation)
  79                return ops->get_generation(vdpa);
  80
  81        return 0;
  82}
  83
  84static u8 virtio_vdpa_get_status(struct virtio_device *vdev)
  85{
  86        struct vdpa_device *vdpa = vd_get_vdpa(vdev);
  87        const struct vdpa_config_ops *ops = vdpa->config;
  88
  89        return ops->get_status(vdpa);
  90}
  91
  92static void virtio_vdpa_set_status(struct virtio_device *vdev, u8 status)
  93{
  94        struct vdpa_device *vdpa = vd_get_vdpa(vdev);
  95        const struct vdpa_config_ops *ops = vdpa->config;
  96
  97        return ops->set_status(vdpa, status);
  98}
  99
 100static void virtio_vdpa_reset(struct virtio_device *vdev)
 101{
 102        struct vdpa_device *vdpa = vd_get_vdpa(vdev);
 103
 104        vdpa_reset(vdpa);
 105}
 106
 107static bool virtio_vdpa_notify(struct virtqueue *vq)
 108{
 109        struct vdpa_device *vdpa = vd_get_vdpa(vq->vdev);
 110        const struct vdpa_config_ops *ops = vdpa->config;
 111
 112        ops->kick_vq(vdpa, vq->index);
 113
 114        return true;
 115}
 116
 117static irqreturn_t virtio_vdpa_config_cb(void *private)
 118{
 119        struct virtio_vdpa_device *vd_dev = private;
 120
 121        virtio_config_changed(&vd_dev->vdev);
 122
 123        return IRQ_HANDLED;
 124}
 125
 126static irqreturn_t virtio_vdpa_virtqueue_cb(void *private)
 127{
 128        struct virtio_vdpa_vq_info *info = private;
 129
 130        return vring_interrupt(0, info->vq);
 131}
 132
 133static struct virtqueue *
 134virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
 135                     void (*callback)(struct virtqueue *vq),
 136                     const char *name, bool ctx)
 137{
 138        struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev);
 139        struct vdpa_device *vdpa = vd_get_vdpa(vdev);
 140        const struct vdpa_config_ops *ops = vdpa->config;
 141        struct virtio_vdpa_vq_info *info;
 142        struct vdpa_callback cb;
 143        struct virtqueue *vq;
 144        u64 desc_addr, driver_addr, device_addr;
 145        unsigned long flags;
 146        u32 align, num;
 147        int err;
 148
 149        if (!name)
 150                return NULL;
 151
 152        /* Queue shouldn't already be set up. */
 153        if (ops->get_vq_ready(vdpa, index))
 154                return ERR_PTR(-ENOENT);
 155
 156        /* Allocate and fill out our active queue description */
 157        info = kmalloc(sizeof(*info), GFP_KERNEL);
 158        if (!info)
 159                return ERR_PTR(-ENOMEM);
 160
 161        num = ops->get_vq_num_max(vdpa);
 162        if (num == 0) {
 163                err = -ENOENT;
 164                goto error_new_virtqueue;
 165        }
 166
 167        /* Create the vring */
 168        align = ops->get_vq_align(vdpa);
 169        vq = vring_create_virtqueue(index, num, align, vdev,
 170                                    true, true, ctx,
 171                                    virtio_vdpa_notify, callback, name);
 172        if (!vq) {
 173                err = -ENOMEM;
 174                goto error_new_virtqueue;
 175        }
 176
 177        /* Setup virtqueue callback */
 178        cb.callback = virtio_vdpa_virtqueue_cb;
 179        cb.private = info;
 180        ops->set_vq_cb(vdpa, index, &cb);
 181        ops->set_vq_num(vdpa, index, virtqueue_get_vring_size(vq));
 182
 183        desc_addr = virtqueue_get_desc_addr(vq);
 184        driver_addr = virtqueue_get_avail_addr(vq);
 185        device_addr = virtqueue_get_used_addr(vq);
 186
 187        if (ops->set_vq_address(vdpa, index,
 188                                desc_addr, driver_addr,
 189                                device_addr)) {
 190                err = -EINVAL;
 191                goto err_vq;
 192        }
 193
 194        ops->set_vq_ready(vdpa, index, 1);
 195
 196        vq->priv = info;
 197        info->vq = vq;
 198
 199        spin_lock_irqsave(&vd_dev->lock, flags);
 200        list_add(&info->node, &vd_dev->virtqueues);
 201        spin_unlock_irqrestore(&vd_dev->lock, flags);
 202
 203        return vq;
 204
 205err_vq:
 206        vring_del_virtqueue(vq);
 207error_new_virtqueue:
 208        ops->set_vq_ready(vdpa, index, 0);
 209        /* VDPA driver should make sure vq is stopeed here */
 210        WARN_ON(ops->get_vq_ready(vdpa, index));
 211        kfree(info);
 212        return ERR_PTR(err);
 213}
 214
 215static void virtio_vdpa_del_vq(struct virtqueue *vq)
 216{
 217        struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vq->vdev);
 218        struct vdpa_device *vdpa = vd_dev->vdpa;
 219        const struct vdpa_config_ops *ops = vdpa->config;
 220        struct virtio_vdpa_vq_info *info = vq->priv;
 221        unsigned int index = vq->index;
 222        unsigned long flags;
 223
 224        spin_lock_irqsave(&vd_dev->lock, flags);
 225        list_del(&info->node);
 226        spin_unlock_irqrestore(&vd_dev->lock, flags);
 227
 228        /* Select and deactivate the queue */
 229        ops->set_vq_ready(vdpa, index, 0);
 230        WARN_ON(ops->get_vq_ready(vdpa, index));
 231
 232        vring_del_virtqueue(vq);
 233
 234        kfree(info);
 235}
 236
 237static void virtio_vdpa_del_vqs(struct virtio_device *vdev)
 238{
 239        struct virtqueue *vq, *n;
 240
 241        list_for_each_entry_safe(vq, n, &vdev->vqs, list)
 242                virtio_vdpa_del_vq(vq);
 243}
 244
 245static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned nvqs,
 246                                struct virtqueue *vqs[],
 247                                vq_callback_t *callbacks[],
 248                                const char * const names[],
 249                                const bool *ctx,
 250                                struct irq_affinity *desc)
 251{
 252        struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev);
 253        struct vdpa_device *vdpa = vd_get_vdpa(vdev);
 254        const struct vdpa_config_ops *ops = vdpa->config;
 255        struct vdpa_callback cb;
 256        int i, err, queue_idx = 0;
 257
 258        for (i = 0; i < nvqs; ++i) {
 259                if (!names[i]) {
 260                        vqs[i] = NULL;
 261                        continue;
 262                }
 263
 264                vqs[i] = virtio_vdpa_setup_vq(vdev, queue_idx++,
 265                                              callbacks[i], names[i], ctx ?
 266                                              ctx[i] : false);
 267                if (IS_ERR(vqs[i])) {
 268                        err = PTR_ERR(vqs[i]);
 269                        goto err_setup_vq;
 270                }
 271        }
 272
 273        cb.callback = virtio_vdpa_config_cb;
 274        cb.private = vd_dev;
 275        ops->set_config_cb(vdpa, &cb);
 276
 277        return 0;
 278
 279err_setup_vq:
 280        virtio_vdpa_del_vqs(vdev);
 281        return err;
 282}
 283
 284static u64 virtio_vdpa_get_features(struct virtio_device *vdev)
 285{
 286        struct vdpa_device *vdpa = vd_get_vdpa(vdev);
 287        const struct vdpa_config_ops *ops = vdpa->config;
 288
 289        return ops->get_features(vdpa);
 290}
 291
 292static int virtio_vdpa_finalize_features(struct virtio_device *vdev)
 293{
 294        struct vdpa_device *vdpa = vd_get_vdpa(vdev);
 295
 296        /* Give virtio_ring a chance to accept features. */
 297        vring_transport_features(vdev);
 298
 299        return vdpa_set_features(vdpa, vdev->features);
 300}
 301
 302static const char *virtio_vdpa_bus_name(struct virtio_device *vdev)
 303{
 304        struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev);
 305        struct vdpa_device *vdpa = vd_dev->vdpa;
 306
 307        return dev_name(&vdpa->dev);
 308}
 309
 310static const struct virtio_config_ops virtio_vdpa_config_ops = {
 311        .get            = virtio_vdpa_get,
 312        .set            = virtio_vdpa_set,
 313        .generation     = virtio_vdpa_generation,
 314        .get_status     = virtio_vdpa_get_status,
 315        .set_status     = virtio_vdpa_set_status,
 316        .reset          = virtio_vdpa_reset,
 317        .find_vqs       = virtio_vdpa_find_vqs,
 318        .del_vqs        = virtio_vdpa_del_vqs,
 319        .get_features   = virtio_vdpa_get_features,
 320        .finalize_features = virtio_vdpa_finalize_features,
 321        .bus_name       = virtio_vdpa_bus_name,
 322};
 323
 324static void virtio_vdpa_release_dev(struct device *_d)
 325{
 326        struct virtio_device *vdev =
 327               container_of(_d, struct virtio_device, dev);
 328        struct virtio_vdpa_device *vd_dev =
 329               container_of(vdev, struct virtio_vdpa_device, vdev);
 330
 331        kfree(vd_dev);
 332}
 333
 334static int virtio_vdpa_probe(struct vdpa_device *vdpa)
 335{
 336        const struct vdpa_config_ops *ops = vdpa->config;
 337        struct virtio_vdpa_device *vd_dev, *reg_dev = NULL;
 338        int ret = -EINVAL;
 339
 340        vd_dev = kzalloc(sizeof(*vd_dev), GFP_KERNEL);
 341        if (!vd_dev)
 342                return -ENOMEM;
 343
 344        vd_dev->vdev.dev.parent = vdpa_get_dma_dev(vdpa);
 345        vd_dev->vdev.dev.release = virtio_vdpa_release_dev;
 346        vd_dev->vdev.config = &virtio_vdpa_config_ops;
 347        vd_dev->vdpa = vdpa;
 348        INIT_LIST_HEAD(&vd_dev->virtqueues);
 349        spin_lock_init(&vd_dev->lock);
 350
 351        vd_dev->vdev.id.device = ops->get_device_id(vdpa);
 352        if (vd_dev->vdev.id.device == 0)
 353                goto err;
 354
 355        vd_dev->vdev.id.vendor = ops->get_vendor_id(vdpa);
 356        ret = register_virtio_device(&vd_dev->vdev);
 357        reg_dev = vd_dev;
 358        if (ret)
 359                goto err;
 360
 361        vdpa_set_drvdata(vdpa, vd_dev);
 362
 363        return 0;
 364
 365err:
 366        if (reg_dev)
 367                put_device(&vd_dev->vdev.dev);
 368        else
 369                kfree(vd_dev);
 370        return ret;
 371}
 372
 373static void virtio_vdpa_remove(struct vdpa_device *vdpa)
 374{
 375        struct virtio_vdpa_device *vd_dev = vdpa_get_drvdata(vdpa);
 376
 377        unregister_virtio_device(&vd_dev->vdev);
 378}
 379
 380static struct vdpa_driver virtio_vdpa_driver = {
 381        .driver = {
 382                .name   = "virtio_vdpa",
 383        },
 384        .probe  = virtio_vdpa_probe,
 385        .remove = virtio_vdpa_remove,
 386};
 387
 388module_vdpa_driver(virtio_vdpa_driver);
 389
 390MODULE_VERSION(MOD_VERSION);
 391MODULE_LICENSE(MOD_LICENSE);
 392MODULE_AUTHOR(MOD_AUTHOR);
 393MODULE_DESCRIPTION(MOD_DESC);
 394