linux/drivers/virtio/virtio_pci_legacy.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Virtio PCI driver - legacy device support
   4 *
   5 * This module allows virtio devices to be used over a virtual PCI device.
   6 * This can be used with QEMU based VMMs like KVM or Xen.
   7 *
   8 * Copyright IBM Corp. 2007
   9 * Copyright Red Hat, Inc. 2014
  10 *
  11 * Authors:
  12 *  Anthony Liguori  <aliguori@us.ibm.com>
  13 *  Rusty Russell <rusty@rustcorp.com.au>
  14 *  Michael S. Tsirkin <mst@redhat.com>
  15 */
  16
  17#include "virtio_pci_common.h"
  18
  19/* virtio config->get_features() implementation */
  20static u64 vp_get_features(struct virtio_device *vdev)
  21{
  22        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  23
  24        /* When someone needs more than 32 feature bits, we'll need to
  25         * steal a bit to indicate that the rest are somewhere else. */
  26        return ioread32(vp_dev->ioaddr + VIRTIO_PCI_HOST_FEATURES);
  27}
  28
  29/* virtio config->finalize_features() implementation */
  30static int vp_finalize_features(struct virtio_device *vdev)
  31{
  32        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  33
  34        /* Give virtio_ring a chance to accept features. */
  35        vring_transport_features(vdev);
  36
  37        /* Make sure we don't have any features > 32 bits! */
  38        BUG_ON((u32)vdev->features != vdev->features);
  39
  40        /* We only support 32 feature bits. */
  41        iowrite32(vdev->features, vp_dev->ioaddr + VIRTIO_PCI_GUEST_FEATURES);
  42
  43        return 0;
  44}
  45
  46/* virtio config->get() implementation */
  47static void vp_get(struct virtio_device *vdev, unsigned offset,
  48                   void *buf, unsigned len)
  49{
  50        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  51        void __iomem *ioaddr = vp_dev->ioaddr +
  52                        VIRTIO_PCI_CONFIG_OFF(vp_dev->msix_enabled) +
  53                        offset;
  54        u8 *ptr = buf;
  55        int i;
  56
  57        for (i = 0; i < len; i++)
  58                ptr[i] = ioread8(ioaddr + i);
  59}
  60
  61/* the config->set() implementation.  it's symmetric to the config->get()
  62 * implementation */
  63static void vp_set(struct virtio_device *vdev, unsigned offset,
  64                   const void *buf, unsigned len)
  65{
  66        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  67        void __iomem *ioaddr = vp_dev->ioaddr +
  68                        VIRTIO_PCI_CONFIG_OFF(vp_dev->msix_enabled) +
  69                        offset;
  70        const u8 *ptr = buf;
  71        int i;
  72
  73        for (i = 0; i < len; i++)
  74                iowrite8(ptr[i], ioaddr + i);
  75}
  76
  77/* config->{get,set}_status() implementations */
  78static u8 vp_get_status(struct virtio_device *vdev)
  79{
  80        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  81        return ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS);
  82}
  83
  84static void vp_set_status(struct virtio_device *vdev, u8 status)
  85{
  86        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  87        /* We should never be setting status to 0. */
  88        BUG_ON(status == 0);
  89        iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
  90}
  91
  92static void vp_reset(struct virtio_device *vdev)
  93{
  94        struct virtio_pci_device *vp_dev = to_vp_device(vdev);
  95        /* 0 status means a reset. */
  96        iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
  97        /* Flush out the status write, and flush in device writes,
  98         * including MSi-X interrupts, if any. */
  99        ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS);
 100        /* Flush pending VQ/configuration callbacks. */
 101        vp_synchronize_vectors(vdev);
 102}
 103
 104static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
 105{
 106        /* Setup the vector used for configuration events */
 107        iowrite16(vector, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
 108        /* Verify we had enough resources to assign the vector */
 109        /* Will also flush the write out to device */
 110        return ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
 111}
 112
 113static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
 114                                  struct virtio_pci_vq_info *info,
 115                                  unsigned index,
 116                                  void (*callback)(struct virtqueue *vq),
 117                                  const char *name,
 118                                  bool ctx,
 119                                  u16 msix_vec)
 120{
 121        struct virtqueue *vq;
 122        u16 num;
 123        int err;
 124        u64 q_pfn;
 125
 126        /* Select the queue we're interested in */
 127        iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
 128
 129        /* Check if queue is either not available or already active. */
 130        num = ioread16(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NUM);
 131        if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN))
 132                return ERR_PTR(-ENOENT);
 133
 134        info->msix_vector = msix_vec;
 135
 136        /* create the vring */
 137        vq = vring_create_virtqueue(index, num,
 138                                    VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev,
 139                                    true, false, ctx,
 140                                    vp_notify, callback, name);
 141        if (!vq)
 142                return ERR_PTR(-ENOMEM);
 143
 144        q_pfn = virtqueue_get_desc_addr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
 145        if (q_pfn >> 32) {
 146                dev_err(&vp_dev->pci_dev->dev,
 147                        "platform bug: legacy virtio-mmio must not be used with RAM above 0x%llxGB\n",
 148                        0x1ULL << (32 + PAGE_SHIFT - 30));
 149                err = -E2BIG;
 150                goto out_del_vq;
 151        }
 152
 153        /* activate the queue */
 154        iowrite32(q_pfn, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
 155
 156        vq->priv = (void __force *)vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY;
 157
 158        if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
 159                iowrite16(msix_vec, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
 160                msix_vec = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
 161                if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
 162                        err = -EBUSY;
 163                        goto out_deactivate;
 164                }
 165        }
 166
 167        return vq;
 168
 169out_deactivate:
 170        iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
 171out_del_vq:
 172        vring_del_virtqueue(vq);
 173        return ERR_PTR(err);
 174}
 175
 176static void del_vq(struct virtio_pci_vq_info *info)
 177{
 178        struct virtqueue *vq = info->vq;
 179        struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
 180
 181        iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
 182
 183        if (vp_dev->msix_enabled) {
 184                iowrite16(VIRTIO_MSI_NO_VECTOR,
 185                          vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
 186                /* Flush the write out to device */
 187                ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR);
 188        }
 189
 190        /* Select and deactivate the queue */
 191        iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
 192
 193        vring_del_virtqueue(vq);
 194}
 195
 196static const struct virtio_config_ops virtio_pci_config_ops = {
 197        .get            = vp_get,
 198        .set            = vp_set,
 199        .get_status     = vp_get_status,
 200        .set_status     = vp_set_status,
 201        .reset          = vp_reset,
 202        .find_vqs       = vp_find_vqs,
 203        .del_vqs        = vp_del_vqs,
 204        .get_features   = vp_get_features,
 205        .finalize_features = vp_finalize_features,
 206        .bus_name       = vp_bus_name,
 207        .set_vq_affinity = vp_set_vq_affinity,
 208        .get_vq_affinity = vp_get_vq_affinity,
 209};
 210
 211/* the PCI probing function */
 212int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
 213{
 214        struct pci_dev *pci_dev = vp_dev->pci_dev;
 215        int rc;
 216
 217        /* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */
 218        if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f)
 219                return -ENODEV;
 220
 221        if (pci_dev->revision != VIRTIO_PCI_ABI_VERSION) {
 222                printk(KERN_ERR "virtio_pci: expected ABI version %d, got %d\n",
 223                       VIRTIO_PCI_ABI_VERSION, pci_dev->revision);
 224                return -ENODEV;
 225        }
 226
 227        rc = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(64));
 228        if (rc) {
 229                rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
 230        } else {
 231                /*
 232                 * The virtio ring base address is expressed as a 32-bit PFN,
 233                 * with a page size of 1 << VIRTIO_PCI_QUEUE_ADDR_SHIFT.
 234                 */
 235                dma_set_coherent_mask(&pci_dev->dev,
 236                                DMA_BIT_MASK(32 + VIRTIO_PCI_QUEUE_ADDR_SHIFT));
 237        }
 238
 239        if (rc)
 240                dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA.  Trying to continue, but this might not work.\n");
 241
 242        rc = pci_request_region(pci_dev, 0, "virtio-pci-legacy");
 243        if (rc)
 244                return rc;
 245
 246        rc = -ENOMEM;
 247        vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0);
 248        if (!vp_dev->ioaddr)
 249                goto err_iomap;
 250
 251        vp_dev->isr = vp_dev->ioaddr + VIRTIO_PCI_ISR;
 252
 253        /* we use the subsystem vendor/device id as the virtio vendor/device
 254         * id.  this allows us to use the same PCI vendor/device id for all
 255         * virtio devices and to identify the particular virtio driver by
 256         * the subsystem ids */
 257        vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
 258        vp_dev->vdev.id.device = pci_dev->subsystem_device;
 259
 260        vp_dev->vdev.config = &virtio_pci_config_ops;
 261
 262        vp_dev->config_vector = vp_config_vector;
 263        vp_dev->setup_vq = setup_vq;
 264        vp_dev->del_vq = del_vq;
 265
 266        return 0;
 267
 268err_iomap:
 269        pci_release_region(pci_dev, 0);
 270        return rc;
 271}
 272
 273void virtio_pci_legacy_remove(struct virtio_pci_device *vp_dev)
 274{
 275        struct pci_dev *pci_dev = vp_dev->pci_dev;
 276
 277        pci_iounmap(pci_dev, vp_dev->ioaddr);
 278        pci_release_region(pci_dev, 0);
 279}
 280