linux/drivers/virtio/virtio_pci_common.c
<<
>>
Prefs
   1/*
   2 * Virtio PCI driver - common functionality for all device versions
   3 *
   4 * This module allows virtio devices to be used over a virtual PCI device.
   5 * This can be used with QEMU based VMMs like KVM or Xen.
   6 *
   7 * Copyright IBM Corp. 2007
   8 * Copyright Red Hat, Inc. 2014
   9 *
  10 * Authors:
  11 *  Anthony Liguori  <aliguori@us.ibm.com>
  12 *  Rusty Russell <rusty@rustcorp.com.au>
  13 *  Michael S. Tsirkin <mst@redhat.com>
  14 *
  15 * This work is licensed under the terms of the GNU GPL, version 2 or later.
  16 * See the COPYING file in the top-level directory.
  17 *
  18 */
  19
  20#include "virtio_pci_common.h"
  21
  22static bool force_legacy = false;
  23
  24#if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY)
  25module_param(force_legacy, bool, 0444);
  26MODULE_PARM_DESC(force_legacy,
  27                 "Force legacy mode for transitional virtio 1 devices");
  28#endif
  29
  30/* wait for pending irq handlers */
  31void vp_synchronize_vectors(struct virtio_device *vdev)
  32{
  33        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  34        int i;
  35
  36        if (vp_dev->intx_enabled)
  37                synchronize_irq(vp_dev->pci_dev->irq);
  38
  39        for (i = 0; i < vp_dev->msix_vectors; ++i)
  40                synchronize_irq(vp_dev->msix_entries[i].vector);
  41}
  42
  43/* the notify function used when creating a virt queue */
  44bool vp_notify(struct virtqueue *vq)
  45{
  46        /* we write the queue's selector into the notification register to
  47         * signal the other end */
  48        iowrite16(vq->index, (void __iomem *)vq->priv);
  49        return true;
  50}
  51
  52/* Handle a configuration change: Tell driver if it wants to know. */
  53static irqreturn_t vp_config_changed(int irq, void *opaque)
  54{
  55        struct virtio_pci_device *vp_dev = opaque;
  56
  57        virtio_config_changed(&vp_dev->vdev);
  58        return IRQ_HANDLED;
  59}
  60
  61/* Notify all virtqueues on an interrupt. */
  62static irqreturn_t vp_vring_interrupt(int irq, void *opaque)
  63{
  64        struct virtio_pci_device *vp_dev = opaque;
  65        struct virtio_pci_vq_info *info;
  66        irqreturn_t ret = IRQ_NONE;
  67        unsigned long flags;
  68
  69        spin_lock_irqsave(&vp_dev->lock, flags);
  70        list_for_each_entry(info, &vp_dev->virtqueues, node) {
  71                if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
  72                        ret = IRQ_HANDLED;
  73        }
  74        spin_unlock_irqrestore(&vp_dev->lock, flags);
  75
  76        return ret;
  77}
  78
  79/* A small wrapper to also acknowledge the interrupt when it's handled.
  80 * I really need an EIO hook for the vring so I can ack the interrupt once we
  81 * know that we'll be handling the IRQ but before we invoke the callback since
  82 * the callback may notify the host which results in the host attempting to
  83 * raise an interrupt that we would then mask once we acknowledged the
  84 * interrupt. */
  85static irqreturn_t vp_interrupt(int irq, void *opaque)
  86{
  87        struct virtio_pci_device *vp_dev = opaque;
  88        u8 isr;
  89
  90        /* reading the ISR has the effect of also clearing it so it's very
  91         * important to save off the value. */
  92        isr = ioread8(vp_dev->isr);
  93
  94        /* It's definitely not us if the ISR was not high */
  95        if (!isr)
  96                return IRQ_NONE;
  97
  98        /* Configuration change?  Tell driver if it wants to know. */
  99        if (isr & VIRTIO_PCI_ISR_CONFIG)
 100                vp_config_changed(irq, opaque);
 101
 102        return vp_vring_interrupt(irq, opaque);
 103}
 104
 105static void vp_free_vectors(struct virtio_device *vdev)
 106{
 107        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
 108        int i;
 109
 110        if (vp_dev->intx_enabled) {
 111                free_irq(vp_dev->pci_dev->irq, vp_dev);
 112                vp_dev->intx_enabled = 0;
 113        }
 114
 115        for (i = 0; i < vp_dev->msix_used_vectors; ++i)
 116                free_irq(vp_dev->msix_entries[i].vector, vp_dev);
 117
 118        for (i = 0; i < vp_dev->msix_vectors; i++)
 119                if (vp_dev->msix_affinity_masks[i])
 120                        free_cpumask_var(vp_dev->msix_affinity_masks[i]);
 121
 122        if (vp_dev->msix_enabled) {
 123                /* Disable the vector used for configuration */
 124                vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
 125
 126                pci_disable_msix(vp_dev->pci_dev);
 127                vp_dev->msix_enabled = 0;
 128        }
 129
 130        vp_dev->msix_vectors = 0;
 131        vp_dev->msix_used_vectors = 0;
 132        kfree(vp_dev->msix_names);
 133        vp_dev->msix_names = NULL;
 134        kfree(vp_dev->msix_entries);
 135        vp_dev->msix_entries = NULL;
 136        kfree(vp_dev->msix_affinity_masks);
 137        vp_dev->msix_affinity_masks = NULL;
 138}
 139
 140static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
 141                                   bool per_vq_vectors)
 142{
 143        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
 144        const char *name = dev_name(&vp_dev->vdev.dev);
 145        unsigned i, v;
 146        int err = -ENOMEM;
 147
 148        vp_dev->msix_vectors = nvectors;
 149
 150        vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries,
 151                                       GFP_KERNEL);
 152        if (!vp_dev->msix_entries)
 153                goto error;
 154        vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names,
 155                                     GFP_KERNEL);
 156        if (!vp_dev->msix_names)
 157                goto error;
 158        vp_dev->msix_affinity_masks
 159                = kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks,
 160                          GFP_KERNEL);
 161        if (!vp_dev->msix_affinity_masks)
 162                goto error;
 163        for (i = 0; i < nvectors; ++i)
 164                if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
 165                                        GFP_KERNEL))
 166                        goto error;
 167
 168        for (i = 0; i < nvectors; ++i)
 169                vp_dev->msix_entries[i].entry = i;
 170
 171        err = pci_enable_msix_exact(vp_dev->pci_dev,
 172                                    vp_dev->msix_entries, nvectors);
 173        if (err)
 174                goto error;
 175        vp_dev->msix_enabled = 1;
 176
 177        /* Set the vector used for configuration */
 178        v = vp_dev->msix_used_vectors;
 179        snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
 180                 "%s-config", name);
 181        err = request_irq(vp_dev->msix_entries[v].vector,
 182                          vp_config_changed, 0, vp_dev->msix_names[v],
 183                          vp_dev);
 184        if (err)
 185                goto error;
 186        ++vp_dev->msix_used_vectors;
 187
 188        v = vp_dev->config_vector(vp_dev, v);
 189        /* Verify we had enough resources to assign the vector */
 190        if (v == VIRTIO_MSI_NO_VECTOR) {
 191                err = -EBUSY;
 192                goto error;
 193        }
 194
 195        if (!per_vq_vectors) {
 196                /* Shared vector for all VQs */
 197                v = vp_dev->msix_used_vectors;
 198                snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
 199                         "%s-virtqueues", name);
 200                err = request_irq(vp_dev->msix_entries[v].vector,
 201                                  vp_vring_interrupt, 0, vp_dev->msix_names[v],
 202                                  vp_dev);
 203                if (err)
 204                        goto error;
 205                ++vp_dev->msix_used_vectors;
 206        }
 207        return 0;
 208error:
 209        vp_free_vectors(vdev);
 210        return err;
 211}
 212
 213static int vp_request_intx(struct virtio_device *vdev)
 214{
 215        int err;
 216        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
 217
 218        err = request_irq(vp_dev->pci_dev->irq, vp_interrupt,
 219                          IRQF_SHARED, dev_name(&vdev->dev), vp_dev);
 220        if (!err)
 221                vp_dev->intx_enabled = 1;
 222        return err;
 223}
 224
 225static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index,
 226                                     void (*callback)(struct virtqueue *vq),
 227                                     const char *name,
 228                                     u16 msix_vec)
 229{
 230        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
 231        struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL);
 232        struct virtqueue *vq;
 233        unsigned long flags;
 234
 235        /* fill out our structure that represents an active queue */
 236        if (!info)
 237                return ERR_PTR(-ENOMEM);
 238
 239        vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, msix_vec);
 240        if (IS_ERR(vq))
 241                goto out_info;
 242
 243        info->vq = vq;
 244        if (callback) {
 245                spin_lock_irqsave(&vp_dev->lock, flags);
 246                list_add(&info->node, &vp_dev->virtqueues);
 247                spin_unlock_irqrestore(&vp_dev->lock, flags);
 248        } else {
 249                INIT_LIST_HEAD(&info->node);
 250        }
 251
 252        vp_dev->vqs[index] = info;
 253        return vq;
 254
 255out_info:
 256        kfree(info);
 257        return vq;
 258}
 259
 260static void vp_del_vq(struct virtqueue *vq)
 261{
 262        struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
 263        struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
 264        unsigned long flags;
 265
 266        spin_lock_irqsave(&vp_dev->lock, flags);
 267        list_del(&info->node);
 268        spin_unlock_irqrestore(&vp_dev->lock, flags);
 269
 270        vp_dev->del_vq(info);
 271        kfree(info);
 272}
 273
 274/* the config->del_vqs() implementation */
 275void vp_del_vqs(struct virtio_device *vdev)
 276{
 277        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
 278        struct virtqueue *vq, *n;
 279        struct virtio_pci_vq_info *info;
 280
 281        list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
 282                info = vp_dev->vqs[vq->index];
 283                if (vp_dev->per_vq_vectors &&
 284                        info->msix_vector != VIRTIO_MSI_NO_VECTOR)
 285                        free_irq(vp_dev->msix_entries[info->msix_vector].vector,
 286                                 vq);
 287                vp_del_vq(vq);
 288        }
 289        vp_dev->per_vq_vectors = false;
 290
 291        vp_free_vectors(vdev);
 292        kfree(vp_dev->vqs);
 293        vp_dev->vqs = NULL;
 294}
 295
 296static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs,
 297                              struct virtqueue *vqs[],
 298                              vq_callback_t *callbacks[],
 299                              const char * const names[],
 300                              bool use_msix,
 301                              bool per_vq_vectors)
 302{
 303        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
 304        u16 msix_vec;
 305        int i, err, nvectors, allocated_vectors;
 306
 307        vp_dev->vqs = kmalloc(nvqs * sizeof *vp_dev->vqs, GFP_KERNEL);
 308        if (!vp_dev->vqs)
 309                return -ENOMEM;
 310
 311        if (!use_msix) {
 312                /* Old style: one normal interrupt for change and all vqs. */
 313                err = vp_request_intx(vdev);
 314                if (err)
 315                        goto error_find;
 316        } else {
 317                if (per_vq_vectors) {
 318                        /* Best option: one for change interrupt, one per vq. */
 319                        nvectors = 1;
 320                        for (i = 0; i < nvqs; ++i)
 321                                if (callbacks[i])
 322                                        ++nvectors;
 323                } else {
 324                        /* Second best: one for change, shared for all vqs. */
 325                        nvectors = 2;
 326                }
 327
 328                err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors);
 329                if (err)
 330                        goto error_find;
 331        }
 332
 333        vp_dev->per_vq_vectors = per_vq_vectors;
 334        allocated_vectors = vp_dev->msix_used_vectors;
 335        for (i = 0; i < nvqs; ++i) {
 336                if (!names[i]) {
 337                        vqs[i] = NULL;
 338                        continue;
 339                } else if (!callbacks[i] || !vp_dev->msix_enabled)
 340                        msix_vec = VIRTIO_MSI_NO_VECTOR;
 341                else if (vp_dev->per_vq_vectors)
 342                        msix_vec = allocated_vectors++;
 343                else
 344                        msix_vec = VP_MSIX_VQ_VECTOR;
 345                vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], msix_vec);
 346                if (IS_ERR(vqs[i])) {
 347                        err = PTR_ERR(vqs[i]);
 348                        goto error_find;
 349                }
 350
 351                if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR)
 352                        continue;
 353
 354                /* allocate per-vq irq if available and necessary */
 355                snprintf(vp_dev->msix_names[msix_vec],
 356                         sizeof *vp_dev->msix_names,
 357                         "%s-%s",
 358                         dev_name(&vp_dev->vdev.dev), names[i]);
 359                err = request_irq(vp_dev->msix_entries[msix_vec].vector,
 360                                  vring_interrupt, 0,
 361                                  vp_dev->msix_names[msix_vec],
 362                                  vqs[i]);
 363                if (err) {
 364                        vp_del_vq(vqs[i]);
 365                        goto error_find;
 366                }
 367        }
 368        return 0;
 369
 370error_find:
 371        vp_del_vqs(vdev);
 372        return err;
 373}
 374
 375/* the config->find_vqs() implementation */
 376int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
 377                struct virtqueue *vqs[],
 378                vq_callback_t *callbacks[],
 379                const char * const names[])
 380{
 381        int err;
 382
 383        /* Try MSI-X with one vector per queue. */
 384        err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, true, true);
 385        if (!err)
 386                return 0;
 387        /* Fallback: MSI-X with one vector for config, one shared for queues. */
 388        err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names,
 389                                 true, false);
 390        if (!err)
 391                return 0;
 392        /* Finally fall back to regular interrupts. */
 393        return vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names,
 394                                  false, false);
 395}
 396
 397const char *vp_bus_name(struct virtio_device *vdev)
 398{
 399        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
 400
 401        return pci_name(vp_dev->pci_dev);
 402}
 403
 404/* Setup the affinity for a virtqueue:
 405 * - force the affinity for per vq vector
 406 * - OR over all affinities for shared MSI
 407 * - ignore the affinity request if we're using INTX
 408 */
 409int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
 410{
 411        struct virtio_device *vdev = vq->vdev;
 412        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
 413        struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
 414        struct cpumask *mask;
 415        unsigned int irq;
 416
 417        if (!vq->callback)
 418                return -EINVAL;
 419
 420        if (vp_dev->msix_enabled) {
 421                mask = vp_dev->msix_affinity_masks[info->msix_vector];
 422                irq = vp_dev->msix_entries[info->msix_vector].vector;
 423                if (cpu == -1)
 424                        irq_set_affinity_hint(irq, NULL);
 425                else {
 426                        cpumask_clear(mask);
 427                        cpumask_set_cpu(cpu, mask);
 428                        irq_set_affinity_hint(irq, mask);
 429                }
 430        }
 431        return 0;
 432}
 433
 434#ifdef CONFIG_PM_SLEEP
 435static int virtio_pci_freeze(struct device *dev)
 436{
 437        struct pci_dev *pci_dev = to_pci_dev(dev);
 438        struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
 439        int ret;
 440
 441        ret = virtio_device_freeze(&vp_dev->vdev);
 442
 443        if (!ret)
 444                pci_disable_device(pci_dev);
 445        return ret;
 446}
 447
 448static int virtio_pci_restore(struct device *dev)
 449{
 450        struct pci_dev *pci_dev = to_pci_dev(dev);
 451        struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
 452        int ret;
 453
 454        ret = pci_enable_device(pci_dev);
 455        if (ret)
 456                return ret;
 457
 458        pci_set_master(pci_dev);
 459        return virtio_device_restore(&vp_dev->vdev);
 460}
 461
 462static const struct dev_pm_ops virtio_pci_pm_ops = {
 463        SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore)
 464};
 465#endif
 466
 467
 468/* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
 469static const struct pci_device_id virtio_pci_id_table[] = {
 470        { PCI_DEVICE(PCI_VENDOR_ID_REDHAT_QUMRANET, PCI_ANY_ID) },
 471        { 0 }
 472};
 473
 474MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
 475
 476static void virtio_pci_release_dev(struct device *_d)
 477{
 478        struct virtio_device *vdev = dev_to_virtio(_d);
 479        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
 480
 481        /* As struct device is a kobject, it's not safe to
 482         * free the memory (including the reference counter itself)
 483         * until it's release callback. */
 484        kfree(vp_dev);
 485}
 486
 487static int virtio_pci_probe(struct pci_dev *pci_dev,
 488                            const struct pci_device_id *id)
 489{
 490        struct virtio_pci_device *vp_dev;
 491        int rc;
 492
 493        /* allocate our structure and fill it out */
 494        vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL);
 495        if (!vp_dev)
 496                return -ENOMEM;
 497
 498        pci_set_drvdata(pci_dev, vp_dev);
 499        vp_dev->vdev.dev.parent = &pci_dev->dev;
 500        vp_dev->vdev.dev.release = virtio_pci_release_dev;
 501        vp_dev->pci_dev = pci_dev;
 502        INIT_LIST_HEAD(&vp_dev->virtqueues);
 503        spin_lock_init(&vp_dev->lock);
 504
 505        /* enable the device */
 506        rc = pci_enable_device(pci_dev);
 507        if (rc)
 508                goto err_enable_device;
 509
 510        if (force_legacy) {
 511                rc = virtio_pci_legacy_probe(vp_dev);
 512                /* Also try modern mode if we can't map BAR0 (no IO space). */
 513                if (rc == -ENODEV || rc == -ENOMEM)
 514                        rc = virtio_pci_modern_probe(vp_dev);
 515                if (rc)
 516                        goto err_probe;
 517        } else {
 518                rc = virtio_pci_modern_probe(vp_dev);
 519                if (rc == -ENODEV)
 520                        rc = virtio_pci_legacy_probe(vp_dev);
 521                if (rc)
 522                        goto err_probe;
 523        }
 524
 525        pci_set_master(pci_dev);
 526
 527        rc = register_virtio_device(&vp_dev->vdev);
 528        if (rc)
 529                goto err_register;
 530
 531        return 0;
 532
 533err_register:
 534        if (vp_dev->ioaddr)
 535             virtio_pci_legacy_remove(vp_dev);
 536        else
 537             virtio_pci_modern_remove(vp_dev);
 538err_probe:
 539        pci_disable_device(pci_dev);
 540err_enable_device:
 541        kfree(vp_dev);
 542        return rc;
 543}
 544
 545static void virtio_pci_remove(struct pci_dev *pci_dev)
 546{
 547        struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
 548        struct device *dev = get_device(&vp_dev->vdev.dev);
 549
 550        unregister_virtio_device(&vp_dev->vdev);
 551
 552        if (vp_dev->ioaddr)
 553                virtio_pci_legacy_remove(vp_dev);
 554        else
 555                virtio_pci_modern_remove(vp_dev);
 556
 557        pci_disable_device(pci_dev);
 558        put_device(dev);
 559}
 560
 561static struct pci_driver virtio_pci_driver = {
 562        .name           = "virtio-pci",
 563        .id_table       = virtio_pci_id_table,
 564        .probe          = virtio_pci_probe,
 565        .remove         = virtio_pci_remove,
 566#ifdef CONFIG_PM_SLEEP
 567        .driver.pm      = &virtio_pci_pm_ops,
 568#endif
 569};
 570
 571module_pci_driver(virtio_pci_driver);
 572
 573MODULE_AUTHOR("Anthony Liguori <aliguori@us.ibm.com>");
 574MODULE_DESCRIPTION("virtio-pci");
 575MODULE_LICENSE("GPL");
 576MODULE_VERSION("1");
 577