linux/drivers/vdpa/virtio_pci/vp_vdpa.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * vDPA bridge driver for modern virtio-pci device
   4 *
   5 * Copyright (c) 2020, Red Hat Inc. All rights reserved.
   6 * Author: Jason Wang <jasowang@redhat.com>
   7 *
   8 * Based on virtio_pci_modern.c.
   9 */
  10
  11#include <linux/interrupt.h>
  12#include <linux/module.h>
  13#include <linux/pci.h>
  14#include <linux/vdpa.h>
  15#include <linux/virtio.h>
  16#include <linux/virtio_config.h>
  17#include <linux/virtio_ring.h>
  18#include <linux/virtio_pci.h>
  19#include <linux/virtio_pci_modern.h>
  20
  21#define VP_VDPA_QUEUE_MAX 256
  22#define VP_VDPA_DRIVER_NAME "vp_vdpa"
  23#define VP_VDPA_NAME_SIZE 256
  24
  25struct vp_vring {
  26        void __iomem *notify;
  27        char msix_name[VP_VDPA_NAME_SIZE];
  28        struct vdpa_callback cb;
  29        resource_size_t notify_pa;
  30        int irq;
  31};
  32
  33struct vp_vdpa {
  34        struct vdpa_device vdpa;
  35        struct virtio_pci_modern_device mdev;
  36        struct vp_vring *vring;
  37        struct vdpa_callback config_cb;
  38        char msix_name[VP_VDPA_NAME_SIZE];
  39        int config_irq;
  40        int queues;
  41        int vectors;
  42};
  43
  44static struct vp_vdpa *vdpa_to_vp(struct vdpa_device *vdpa)
  45{
  46        return container_of(vdpa, struct vp_vdpa, vdpa);
  47}
  48
  49static struct virtio_pci_modern_device *vdpa_to_mdev(struct vdpa_device *vdpa)
  50{
  51        struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
  52
  53        return &vp_vdpa->mdev;
  54}
  55
  56static u64 vp_vdpa_get_features(struct vdpa_device *vdpa)
  57{
  58        struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
  59
  60        return vp_modern_get_features(mdev);
  61}
  62
  63static int vp_vdpa_set_features(struct vdpa_device *vdpa, u64 features)
  64{
  65        struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
  66
  67        vp_modern_set_features(mdev, features);
  68
  69        return 0;
  70}
  71
  72static u8 vp_vdpa_get_status(struct vdpa_device *vdpa)
  73{
  74        struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
  75
  76        return vp_modern_get_status(mdev);
  77}
  78
  79static void vp_vdpa_free_irq(struct vp_vdpa *vp_vdpa)
  80{
  81        struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
  82        struct pci_dev *pdev = mdev->pci_dev;
  83        int i;
  84
  85        for (i = 0; i < vp_vdpa->queues; i++) {
  86                if (vp_vdpa->vring[i].irq != VIRTIO_MSI_NO_VECTOR) {
  87                        vp_modern_queue_vector(mdev, i, VIRTIO_MSI_NO_VECTOR);
  88                        devm_free_irq(&pdev->dev, vp_vdpa->vring[i].irq,
  89                                      &vp_vdpa->vring[i]);
  90                        vp_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR;
  91                }
  92        }
  93
  94        if (vp_vdpa->config_irq != VIRTIO_MSI_NO_VECTOR) {
  95                vp_modern_config_vector(mdev, VIRTIO_MSI_NO_VECTOR);
  96                devm_free_irq(&pdev->dev, vp_vdpa->config_irq, vp_vdpa);
  97                vp_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR;
  98        }
  99
 100        if (vp_vdpa->vectors) {
 101                pci_free_irq_vectors(pdev);
 102                vp_vdpa->vectors = 0;
 103        }
 104}
 105
 106static irqreturn_t vp_vdpa_vq_handler(int irq, void *arg)
 107{
 108        struct vp_vring *vring = arg;
 109
 110        if (vring->cb.callback)
 111                return vring->cb.callback(vring->cb.private);
 112
 113        return IRQ_HANDLED;
 114}
 115
 116static irqreturn_t vp_vdpa_config_handler(int irq, void *arg)
 117{
 118        struct vp_vdpa *vp_vdpa = arg;
 119
 120        if (vp_vdpa->config_cb.callback)
 121                return vp_vdpa->config_cb.callback(vp_vdpa->config_cb.private);
 122
 123        return IRQ_HANDLED;
 124}
 125
 126static int vp_vdpa_request_irq(struct vp_vdpa *vp_vdpa)
 127{
 128        struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
 129        struct pci_dev *pdev = mdev->pci_dev;
 130        int i, ret, irq;
 131        int queues = vp_vdpa->queues;
 132        int vectors = queues + 1;
 133
 134        ret = pci_alloc_irq_vectors(pdev, vectors, vectors, PCI_IRQ_MSIX);
 135        if (ret != vectors) {
 136                dev_err(&pdev->dev,
 137                        "vp_vdpa: fail to allocate irq vectors want %d but %d\n",
 138                        vectors, ret);
 139                return ret;
 140        }
 141
 142        vp_vdpa->vectors = vectors;
 143
 144        for (i = 0; i < queues; i++) {
 145                snprintf(vp_vdpa->vring[i].msix_name, VP_VDPA_NAME_SIZE,
 146                        "vp-vdpa[%s]-%d\n", pci_name(pdev), i);
 147                irq = pci_irq_vector(pdev, i);
 148                ret = devm_request_irq(&pdev->dev, irq,
 149                                       vp_vdpa_vq_handler,
 150                                       0, vp_vdpa->vring[i].msix_name,
 151                                       &vp_vdpa->vring[i]);
 152                if (ret) {
 153                        dev_err(&pdev->dev,
 154                                "vp_vdpa: fail to request irq for vq %d\n", i);
 155                        goto err;
 156                }
 157                vp_modern_queue_vector(mdev, i, i);
 158                vp_vdpa->vring[i].irq = irq;
 159        }
 160
 161        snprintf(vp_vdpa->msix_name, VP_VDPA_NAME_SIZE, "vp-vdpa[%s]-config\n",
 162                 pci_name(pdev));
 163        irq = pci_irq_vector(pdev, queues);
 164        ret = devm_request_irq(&pdev->dev, irq, vp_vdpa_config_handler, 0,
 165                               vp_vdpa->msix_name, vp_vdpa);
 166        if (ret) {
 167                dev_err(&pdev->dev,
 168                        "vp_vdpa: fail to request irq for vq %d\n", i);
 169                        goto err;
 170        }
 171        vp_modern_config_vector(mdev, queues);
 172        vp_vdpa->config_irq = irq;
 173
 174        return 0;
 175err:
 176        vp_vdpa_free_irq(vp_vdpa);
 177        return ret;
 178}
 179
 180static void vp_vdpa_set_status(struct vdpa_device *vdpa, u8 status)
 181{
 182        struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
 183        struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
 184        u8 s = vp_vdpa_get_status(vdpa);
 185
 186        if (status & VIRTIO_CONFIG_S_DRIVER_OK &&
 187            !(s & VIRTIO_CONFIG_S_DRIVER_OK)) {
 188                vp_vdpa_request_irq(vp_vdpa);
 189        }
 190
 191        vp_modern_set_status(mdev, status);
 192
 193        if (!(status & VIRTIO_CONFIG_S_DRIVER_OK) &&
 194            (s & VIRTIO_CONFIG_S_DRIVER_OK))
 195                vp_vdpa_free_irq(vp_vdpa);
 196}
 197
 198static u16 vp_vdpa_get_vq_num_max(struct vdpa_device *vdpa)
 199{
 200        return VP_VDPA_QUEUE_MAX;
 201}
 202
 203static int vp_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 qid,
 204                                struct vdpa_vq_state *state)
 205{
 206        /* Note that this is not supported by virtio specification, so
 207         * we return -EOPNOTSUPP here. This means we can't support live
 208         * migration, vhost device start/stop.
 209         */
 210        return -EOPNOTSUPP;
 211}
 212
 213static int vp_vdpa_set_vq_state_split(struct vdpa_device *vdpa,
 214                                      const struct vdpa_vq_state *state)
 215{
 216        const struct vdpa_vq_state_split *split = &state->split;
 217
 218        if (split->avail_index == 0)
 219                return 0;
 220
 221        return -EOPNOTSUPP;
 222}
 223
 224static int vp_vdpa_set_vq_state_packed(struct vdpa_device *vdpa,
 225                                       const struct vdpa_vq_state *state)
 226{
 227        const struct vdpa_vq_state_packed *packed = &state->packed;
 228
 229        if (packed->last_avail_counter == 1 &&
 230            packed->last_avail_idx == 0 &&
 231            packed->last_used_counter == 1 &&
 232            packed->last_used_idx == 0)
 233                return 0;
 234
 235        return -EOPNOTSUPP;
 236}
 237
 238static int vp_vdpa_set_vq_state(struct vdpa_device *vdpa, u16 qid,
 239                                const struct vdpa_vq_state *state)
 240{
 241        struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
 242
 243        /* Note that this is not supported by virtio specification.
 244         * But if the state is by chance equal to the device initial
 245         * state, we can let it go.
 246         */
 247        if ((vp_modern_get_status(mdev) & VIRTIO_CONFIG_S_FEATURES_OK) &&
 248            !vp_modern_get_queue_enable(mdev, qid)) {
 249                if (vp_modern_get_driver_features(mdev) &
 250                    BIT_ULL(VIRTIO_F_RING_PACKED))
 251                        return vp_vdpa_set_vq_state_packed(vdpa, state);
 252                else
 253                        return vp_vdpa_set_vq_state_split(vdpa, state);
 254        }
 255
 256        return -EOPNOTSUPP;
 257}
 258
 259static void vp_vdpa_set_vq_cb(struct vdpa_device *vdpa, u16 qid,
 260                              struct vdpa_callback *cb)
 261{
 262        struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
 263
 264        vp_vdpa->vring[qid].cb = *cb;
 265}
 266
 267static void vp_vdpa_set_vq_ready(struct vdpa_device *vdpa,
 268                                 u16 qid, bool ready)
 269{
 270        struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
 271
 272        vp_modern_set_queue_enable(mdev, qid, ready);
 273}
 274
 275static bool vp_vdpa_get_vq_ready(struct vdpa_device *vdpa, u16 qid)
 276{
 277        struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
 278
 279        return vp_modern_get_queue_enable(mdev, qid);
 280}
 281
 282static void vp_vdpa_set_vq_num(struct vdpa_device *vdpa, u16 qid,
 283                               u32 num)
 284{
 285        struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
 286
 287        vp_modern_set_queue_size(mdev, qid, num);
 288}
 289
 290static int vp_vdpa_set_vq_address(struct vdpa_device *vdpa, u16 qid,
 291                                  u64 desc_area, u64 driver_area,
 292                                  u64 device_area)
 293{
 294        struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
 295
 296        vp_modern_queue_address(mdev, qid, desc_area,
 297                                driver_area, device_area);
 298
 299        return 0;
 300}
 301
 302static void vp_vdpa_kick_vq(struct vdpa_device *vdpa, u16 qid)
 303{
 304        struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
 305
 306        vp_iowrite16(qid, vp_vdpa->vring[qid].notify);
 307}
 308
 309static u32 vp_vdpa_get_generation(struct vdpa_device *vdpa)
 310{
 311        struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
 312
 313        return vp_modern_generation(mdev);
 314}
 315
 316static u32 vp_vdpa_get_device_id(struct vdpa_device *vdpa)
 317{
 318        struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
 319
 320        return mdev->id.device;
 321}
 322
 323static u32 vp_vdpa_get_vendor_id(struct vdpa_device *vdpa)
 324{
 325        struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
 326
 327        return mdev->id.vendor;
 328}
 329
 330static u32 vp_vdpa_get_vq_align(struct vdpa_device *vdpa)
 331{
 332        return PAGE_SIZE;
 333}
 334
 335static size_t vp_vdpa_get_config_size(struct vdpa_device *vdpa)
 336{
 337        struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
 338
 339        return mdev->device_len;
 340}
 341
 342static void vp_vdpa_get_config(struct vdpa_device *vdpa,
 343                               unsigned int offset,
 344                               void *buf, unsigned int len)
 345{
 346        struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
 347        struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
 348        u8 old, new;
 349        u8 *p;
 350        int i;
 351
 352        do {
 353                old = vp_ioread8(&mdev->common->config_generation);
 354                p = buf;
 355                for (i = 0; i < len; i++)
 356                        *p++ = vp_ioread8(mdev->device + offset + i);
 357
 358                new = vp_ioread8(&mdev->common->config_generation);
 359        } while (old != new);
 360}
 361
 362static void vp_vdpa_set_config(struct vdpa_device *vdpa,
 363                               unsigned int offset, const void *buf,
 364                               unsigned int len)
 365{
 366        struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
 367        struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
 368        const u8 *p = buf;
 369        int i;
 370
 371        for (i = 0; i < len; i++)
 372                vp_iowrite8(*p++, mdev->device + offset + i);
 373}
 374
 375static void vp_vdpa_set_config_cb(struct vdpa_device *vdpa,
 376                                  struct vdpa_callback *cb)
 377{
 378        struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
 379
 380        vp_vdpa->config_cb = *cb;
 381}
 382
 383static struct vdpa_notification_area
 384vp_vdpa_get_vq_notification(struct vdpa_device *vdpa, u16 qid)
 385{
 386        struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
 387        struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
 388        struct vdpa_notification_area notify;
 389
 390        notify.addr = vp_vdpa->vring[qid].notify_pa;
 391        notify.size = mdev->notify_offset_multiplier;
 392
 393        return notify;
 394}
 395
 396static const struct vdpa_config_ops vp_vdpa_ops = {
 397        .get_features   = vp_vdpa_get_features,
 398        .set_features   = vp_vdpa_set_features,
 399        .get_status     = vp_vdpa_get_status,
 400        .set_status     = vp_vdpa_set_status,
 401        .get_vq_num_max = vp_vdpa_get_vq_num_max,
 402        .get_vq_state   = vp_vdpa_get_vq_state,
 403        .get_vq_notification = vp_vdpa_get_vq_notification,
 404        .set_vq_state   = vp_vdpa_set_vq_state,
 405        .set_vq_cb      = vp_vdpa_set_vq_cb,
 406        .set_vq_ready   = vp_vdpa_set_vq_ready,
 407        .get_vq_ready   = vp_vdpa_get_vq_ready,
 408        .set_vq_num     = vp_vdpa_set_vq_num,
 409        .set_vq_address = vp_vdpa_set_vq_address,
 410        .kick_vq        = vp_vdpa_kick_vq,
 411        .get_generation = vp_vdpa_get_generation,
 412        .get_device_id  = vp_vdpa_get_device_id,
 413        .get_vendor_id  = vp_vdpa_get_vendor_id,
 414        .get_vq_align   = vp_vdpa_get_vq_align,
 415        .get_config_size = vp_vdpa_get_config_size,
 416        .get_config     = vp_vdpa_get_config,
 417        .set_config     = vp_vdpa_set_config,
 418        .set_config_cb  = vp_vdpa_set_config_cb,
 419};
 420
 421static void vp_vdpa_free_irq_vectors(void *data)
 422{
 423        pci_free_irq_vectors(data);
 424}
 425
 426static int vp_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 427{
 428        struct virtio_pci_modern_device *mdev;
 429        struct device *dev = &pdev->dev;
 430        struct vp_vdpa *vp_vdpa;
 431        int ret, i;
 432
 433        ret = pcim_enable_device(pdev);
 434        if (ret)
 435                return ret;
 436
 437        vp_vdpa = vdpa_alloc_device(struct vp_vdpa, vdpa,
 438                                    dev, &vp_vdpa_ops, NULL);
 439        if (IS_ERR(vp_vdpa)) {
 440                dev_err(dev, "vp_vdpa: Failed to allocate vDPA structure\n");
 441                return PTR_ERR(vp_vdpa);
 442        }
 443
 444        mdev = &vp_vdpa->mdev;
 445        mdev->pci_dev = pdev;
 446
 447        ret = vp_modern_probe(mdev);
 448        if (ret) {
 449                dev_err(&pdev->dev, "Failed to probe modern PCI device\n");
 450                goto err;
 451        }
 452
 453        pci_set_master(pdev);
 454        pci_set_drvdata(pdev, vp_vdpa);
 455
 456        vp_vdpa->vdpa.dma_dev = &pdev->dev;
 457        vp_vdpa->queues = vp_modern_get_num_queues(mdev);
 458
 459        ret = devm_add_action_or_reset(dev, vp_vdpa_free_irq_vectors, pdev);
 460        if (ret) {
 461                dev_err(&pdev->dev,
 462                        "Failed for adding devres for freeing irq vectors\n");
 463                goto err;
 464        }
 465
 466        vp_vdpa->vring = devm_kcalloc(&pdev->dev, vp_vdpa->queues,
 467                                      sizeof(*vp_vdpa->vring),
 468                                      GFP_KERNEL);
 469        if (!vp_vdpa->vring) {
 470                ret = -ENOMEM;
 471                dev_err(&pdev->dev, "Fail to allocate virtqueues\n");
 472                goto err;
 473        }
 474
 475        for (i = 0; i < vp_vdpa->queues; i++) {
 476                vp_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR;
 477                vp_vdpa->vring[i].notify =
 478                        vp_modern_map_vq_notify(mdev, i,
 479                                                &vp_vdpa->vring[i].notify_pa);
 480                if (!vp_vdpa->vring[i].notify) {
 481                        ret = -EINVAL;
 482                        dev_warn(&pdev->dev, "Fail to map vq notify %d\n", i);
 483                        goto err;
 484                }
 485        }
 486        vp_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR;
 487
 488        ret = vdpa_register_device(&vp_vdpa->vdpa, vp_vdpa->queues);
 489        if (ret) {
 490                dev_err(&pdev->dev, "Failed to register to vdpa bus\n");
 491                goto err;
 492        }
 493
 494        return 0;
 495
 496err:
 497        put_device(&vp_vdpa->vdpa.dev);
 498        return ret;
 499}
 500
 501static void vp_vdpa_remove(struct pci_dev *pdev)
 502{
 503        struct vp_vdpa *vp_vdpa = pci_get_drvdata(pdev);
 504
 505        vdpa_unregister_device(&vp_vdpa->vdpa);
 506        vp_modern_remove(&vp_vdpa->mdev);
 507}
 508
 509static struct pci_driver vp_vdpa_driver = {
 510        .name           = "vp-vdpa",
 511        .id_table       = NULL, /* only dynamic ids */
 512        .probe          = vp_vdpa_probe,
 513        .remove         = vp_vdpa_remove,
 514};
 515
 516module_pci_driver(vp_vdpa_driver);
 517
 518MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>");
 519MODULE_DESCRIPTION("vp-vdpa");
 520MODULE_LICENSE("GPL");
 521MODULE_VERSION("1");
 522