linux/drivers/virtio/virtio_pci_modern_dev.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2
   3#include <linux/virtio_pci_modern.h>
   4#include <linux/module.h>
   5#include <linux/pci.h>
   6
   7/*
   8 * vp_modern_map_capability - map a part of virtio pci capability
   9 * @mdev: the modern virtio-pci device
  10 * @off: offset of the capability
  11 * @minlen: minimal length of the capability
  12 * @align: align requirement
  13 * @start: start from the capability
  14 * @size: map size
  15 * @len: the length that is actually mapped
  16 * @pa: physical address of the capability
  17 *
  18 * Returns the io address of for the part of the capability
  19 */
  20static void __iomem *
  21vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off,
  22                         size_t minlen, u32 align, u32 start, u32 size,
  23                         size_t *len, resource_size_t *pa)
  24{
  25        struct pci_dev *dev = mdev->pci_dev;
  26        u8 bar;
  27        u32 offset, length;
  28        void __iomem *p;
  29
  30        pci_read_config_byte(dev, off + offsetof(struct virtio_pci_cap,
  31                                                 bar),
  32                             &bar);
  33        pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, offset),
  34                             &offset);
  35        pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length),
  36                              &length);
  37
  38        if (length <= start) {
  39                dev_err(&dev->dev,
  40                        "virtio_pci: bad capability len %u (>%u expected)\n",
  41                        length, start);
  42                return NULL;
  43        }
  44
  45        if (length - start < minlen) {
  46                dev_err(&dev->dev,
  47                        "virtio_pci: bad capability len %u (>=%zu expected)\n",
  48                        length, minlen);
  49                return NULL;
  50        }
  51
  52        length -= start;
  53
  54        if (start + offset < offset) {
  55                dev_err(&dev->dev,
  56                        "virtio_pci: map wrap-around %u+%u\n",
  57                        start, offset);
  58                return NULL;
  59        }
  60
  61        offset += start;
  62
  63        if (offset & (align - 1)) {
  64                dev_err(&dev->dev,
  65                        "virtio_pci: offset %u not aligned to %u\n",
  66                        offset, align);
  67                return NULL;
  68        }
  69
  70        if (length > size)
  71                length = size;
  72
  73        if (len)
  74                *len = length;
  75
  76        if (minlen + offset < minlen ||
  77            minlen + offset > pci_resource_len(dev, bar)) {
  78                dev_err(&dev->dev,
  79                        "virtio_pci: map virtio %zu@%u "
  80                        "out of range on bar %i length %lu\n",
  81                        minlen, offset,
  82                        bar, (unsigned long)pci_resource_len(dev, bar));
  83                return NULL;
  84        }
  85
  86        p = pci_iomap_range(dev, bar, offset, length);
  87        if (!p)
  88                dev_err(&dev->dev,
  89                        "virtio_pci: unable to map virtio %u@%u on bar %i\n",
  90                        length, offset, bar);
  91        else if (pa)
  92                *pa = pci_resource_start(dev, bar) + offset;
  93
  94        return p;
  95}
  96
  97/**
  98 * virtio_pci_find_capability - walk capabilities to find device info.
  99 * @dev: the pci device
 100 * @cfg_type: the VIRTIO_PCI_CAP_* value we seek
 101 * @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO.
 102 * @bars: the bitmask of BARs
 103 *
 104 * Returns offset of the capability, or 0.
 105 */
 106static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type,
 107                                             u32 ioresource_types, int *bars)
 108{
 109        int pos;
 110
 111        for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
 112             pos > 0;
 113             pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) {
 114                u8 type, bar;
 115                pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
 116                                                         cfg_type),
 117                                     &type);
 118                pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
 119                                                         bar),
 120                                     &bar);
 121
 122                /* Ignore structures with reserved BAR values */
 123                if (bar > 0x5)
 124                        continue;
 125
 126                if (type == cfg_type) {
 127                        if (pci_resource_len(dev, bar) &&
 128                            pci_resource_flags(dev, bar) & ioresource_types) {
 129                                *bars |= (1 << bar);
 130                                return pos;
 131                        }
 132                }
 133        }
 134        return 0;
 135}
 136
 137/* This is part of the ABI.  Don't screw with it. */
 138static inline void check_offsets(void)
 139{
 140        /* Note: disk space was harmed in compilation of this function. */
 141        BUILD_BUG_ON(VIRTIO_PCI_CAP_VNDR !=
 142                     offsetof(struct virtio_pci_cap, cap_vndr));
 143        BUILD_BUG_ON(VIRTIO_PCI_CAP_NEXT !=
 144                     offsetof(struct virtio_pci_cap, cap_next));
 145        BUILD_BUG_ON(VIRTIO_PCI_CAP_LEN !=
 146                     offsetof(struct virtio_pci_cap, cap_len));
 147        BUILD_BUG_ON(VIRTIO_PCI_CAP_CFG_TYPE !=
 148                     offsetof(struct virtio_pci_cap, cfg_type));
 149        BUILD_BUG_ON(VIRTIO_PCI_CAP_BAR !=
 150                     offsetof(struct virtio_pci_cap, bar));
 151        BUILD_BUG_ON(VIRTIO_PCI_CAP_OFFSET !=
 152                     offsetof(struct virtio_pci_cap, offset));
 153        BUILD_BUG_ON(VIRTIO_PCI_CAP_LENGTH !=
 154                     offsetof(struct virtio_pci_cap, length));
 155        BUILD_BUG_ON(VIRTIO_PCI_NOTIFY_CAP_MULT !=
 156                     offsetof(struct virtio_pci_notify_cap,
 157                              notify_off_multiplier));
 158        BUILD_BUG_ON(VIRTIO_PCI_COMMON_DFSELECT !=
 159                     offsetof(struct virtio_pci_common_cfg,
 160                              device_feature_select));
 161        BUILD_BUG_ON(VIRTIO_PCI_COMMON_DF !=
 162                     offsetof(struct virtio_pci_common_cfg, device_feature));
 163        BUILD_BUG_ON(VIRTIO_PCI_COMMON_GFSELECT !=
 164                     offsetof(struct virtio_pci_common_cfg,
 165                              guest_feature_select));
 166        BUILD_BUG_ON(VIRTIO_PCI_COMMON_GF !=
 167                     offsetof(struct virtio_pci_common_cfg, guest_feature));
 168        BUILD_BUG_ON(VIRTIO_PCI_COMMON_MSIX !=
 169                     offsetof(struct virtio_pci_common_cfg, msix_config));
 170        BUILD_BUG_ON(VIRTIO_PCI_COMMON_NUMQ !=
 171                     offsetof(struct virtio_pci_common_cfg, num_queues));
 172        BUILD_BUG_ON(VIRTIO_PCI_COMMON_STATUS !=
 173                     offsetof(struct virtio_pci_common_cfg, device_status));
 174        BUILD_BUG_ON(VIRTIO_PCI_COMMON_CFGGENERATION !=
 175                     offsetof(struct virtio_pci_common_cfg, config_generation));
 176        BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SELECT !=
 177                     offsetof(struct virtio_pci_common_cfg, queue_select));
 178        BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SIZE !=
 179                     offsetof(struct virtio_pci_common_cfg, queue_size));
 180        BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_MSIX !=
 181                     offsetof(struct virtio_pci_common_cfg, queue_msix_vector));
 182        BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_ENABLE !=
 183                     offsetof(struct virtio_pci_common_cfg, queue_enable));
 184        BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NOFF !=
 185                     offsetof(struct virtio_pci_common_cfg, queue_notify_off));
 186        BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCLO !=
 187                     offsetof(struct virtio_pci_common_cfg, queue_desc_lo));
 188        BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCHI !=
 189                     offsetof(struct virtio_pci_common_cfg, queue_desc_hi));
 190        BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILLO !=
 191                     offsetof(struct virtio_pci_common_cfg, queue_avail_lo));
 192        BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILHI !=
 193                     offsetof(struct virtio_pci_common_cfg, queue_avail_hi));
 194        BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDLO !=
 195                     offsetof(struct virtio_pci_common_cfg, queue_used_lo));
 196        BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI !=
 197                     offsetof(struct virtio_pci_common_cfg, queue_used_hi));
 198}
 199
 200/*
 201 * vp_modern_probe: probe the modern virtio pci device, note that the
 202 * caller is required to enable PCI device before calling this function.
 203 * @mdev: the modern virtio-pci device
 204 *
 205 * Return 0 on succeed otherwise fail
 206 */
 207int vp_modern_probe(struct virtio_pci_modern_device *mdev)
 208{
 209        struct pci_dev *pci_dev = mdev->pci_dev;
 210        int err, common, isr, notify, device;
 211        u32 notify_length;
 212        u32 notify_offset;
 213
 214        check_offsets();
 215
 216        mdev->pci_dev = pci_dev;
 217
 218        /* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */
 219        if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f)
 220                return -ENODEV;
 221
 222        if (pci_dev->device < 0x1040) {
 223                /* Transitional devices: use the PCI subsystem device id as
 224                 * virtio device id, same as legacy driver always did.
 225                 */
 226                mdev->id.device = pci_dev->subsystem_device;
 227        } else {
 228                /* Modern devices: simply use PCI device id, but start from 0x1040. */
 229                mdev->id.device = pci_dev->device - 0x1040;
 230        }
 231        mdev->id.vendor = pci_dev->subsystem_vendor;
 232
 233        /* check for a common config: if not, use legacy mode (bar 0). */
 234        common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG,
 235                                            IORESOURCE_IO | IORESOURCE_MEM,
 236                                            &mdev->modern_bars);
 237        if (!common) {
 238                dev_info(&pci_dev->dev,
 239                         "virtio_pci: leaving for legacy driver\n");
 240                return -ENODEV;
 241        }
 242
 243        /* If common is there, these should be too... */
 244        isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG,
 245                                         IORESOURCE_IO | IORESOURCE_MEM,
 246                                         &mdev->modern_bars);
 247        notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG,
 248                                            IORESOURCE_IO | IORESOURCE_MEM,
 249                                            &mdev->modern_bars);
 250        if (!isr || !notify) {
 251                dev_err(&pci_dev->dev,
 252                        "virtio_pci: missing capabilities %i/%i/%i\n",
 253                        common, isr, notify);
 254                return -EINVAL;
 255        }
 256
 257        err = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
 258        if (err)
 259                err = dma_set_mask_and_coherent(&pci_dev->dev,
 260                                                DMA_BIT_MASK(32));
 261        if (err)
 262                dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA.  Trying to continue, but this might not work.\n");
 263
 264        /* Device capability is only mandatory for devices that have
 265         * device-specific configuration.
 266         */
 267        device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG,
 268                                            IORESOURCE_IO | IORESOURCE_MEM,
 269                                            &mdev->modern_bars);
 270
 271        err = pci_request_selected_regions(pci_dev, mdev->modern_bars,
 272                                           "virtio-pci-modern");
 273        if (err)
 274                return err;
 275
 276        err = -EINVAL;
 277        mdev->common = vp_modern_map_capability(mdev, common,
 278                                      sizeof(struct virtio_pci_common_cfg), 4,
 279                                      0, sizeof(struct virtio_pci_common_cfg),
 280                                      NULL, NULL);
 281        if (!mdev->common)
 282                goto err_map_common;
 283        mdev->isr = vp_modern_map_capability(mdev, isr, sizeof(u8), 1,
 284                                             0, 1,
 285                                             NULL, NULL);
 286        if (!mdev->isr)
 287                goto err_map_isr;
 288
 289        /* Read notify_off_multiplier from config space. */
 290        pci_read_config_dword(pci_dev,
 291                              notify + offsetof(struct virtio_pci_notify_cap,
 292                                                notify_off_multiplier),
 293                              &mdev->notify_offset_multiplier);
 294        /* Read notify length and offset from config space. */
 295        pci_read_config_dword(pci_dev,
 296                              notify + offsetof(struct virtio_pci_notify_cap,
 297                                                cap.length),
 298                              &notify_length);
 299
 300        pci_read_config_dword(pci_dev,
 301                              notify + offsetof(struct virtio_pci_notify_cap,
 302                                                cap.offset),
 303                              &notify_offset);
 304
 305        /* We don't know how many VQs we'll map, ahead of the time.
 306         * If notify length is small, map it all now.
 307         * Otherwise, map each VQ individually later.
 308         */
 309        if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) {
 310                mdev->notify_base = vp_modern_map_capability(mdev, notify,
 311                                                             2, 2,
 312                                                             0, notify_length,
 313                                                             &mdev->notify_len,
 314                                                             &mdev->notify_pa);
 315                if (!mdev->notify_base)
 316                        goto err_map_notify;
 317        } else {
 318                mdev->notify_map_cap = notify;
 319        }
 320
 321        /* Again, we don't know how much we should map, but PAGE_SIZE
 322         * is more than enough for all existing devices.
 323         */
 324        if (device) {
 325                mdev->device = vp_modern_map_capability(mdev, device, 0, 4,
 326                                                        0, PAGE_SIZE,
 327                                                        &mdev->device_len,
 328                                                        NULL);
 329                if (!mdev->device)
 330                        goto err_map_device;
 331        }
 332
 333        return 0;
 334
 335err_map_device:
 336        if (mdev->notify_base)
 337                pci_iounmap(pci_dev, mdev->notify_base);
 338err_map_notify:
 339        pci_iounmap(pci_dev, mdev->isr);
 340err_map_isr:
 341        pci_iounmap(pci_dev, mdev->common);
 342err_map_common:
 343        return err;
 344}
 345EXPORT_SYMBOL_GPL(vp_modern_probe);
 346
 347/*
 348 * vp_modern_probe: remove and cleanup the modern virtio pci device
 349 * @mdev: the modern virtio-pci device
 350 */
 351void vp_modern_remove(struct virtio_pci_modern_device *mdev)
 352{
 353        struct pci_dev *pci_dev = mdev->pci_dev;
 354
 355        if (mdev->device)
 356                pci_iounmap(pci_dev, mdev->device);
 357        if (mdev->notify_base)
 358                pci_iounmap(pci_dev, mdev->notify_base);
 359        pci_iounmap(pci_dev, mdev->isr);
 360        pci_iounmap(pci_dev, mdev->common);
 361        pci_release_selected_regions(pci_dev, mdev->modern_bars);
 362}
 363EXPORT_SYMBOL_GPL(vp_modern_remove);
 364
 365/*
 366 * vp_modern_get_features - get features from device
 367 * @mdev: the modern virtio-pci device
 368 *
 369 * Returns the features read from the device
 370 */
 371u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev)
 372{
 373        struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
 374
 375        u64 features;
 376
 377        vp_iowrite32(0, &cfg->device_feature_select);
 378        features = vp_ioread32(&cfg->device_feature);
 379        vp_iowrite32(1, &cfg->device_feature_select);
 380        features |= ((u64)vp_ioread32(&cfg->device_feature) << 32);
 381
 382        return features;
 383}
 384EXPORT_SYMBOL_GPL(vp_modern_get_features);
 385
 386/*
 387 * vp_modern_get_driver_features - get driver features from device
 388 * @mdev: the modern virtio-pci device
 389 *
 390 * Returns the driver features read from the device
 391 */
 392u64 vp_modern_get_driver_features(struct virtio_pci_modern_device *mdev)
 393{
 394        struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
 395
 396        u64 features;
 397
 398        vp_iowrite32(0, &cfg->guest_feature_select);
 399        features = vp_ioread32(&cfg->guest_feature);
 400        vp_iowrite32(1, &cfg->guest_feature_select);
 401        features |= ((u64)vp_ioread32(&cfg->guest_feature) << 32);
 402
 403        return features;
 404}
 405EXPORT_SYMBOL_GPL(vp_modern_get_driver_features);
 406
 407/*
 408 * vp_modern_set_features - set features to device
 409 * @mdev: the modern virtio-pci device
 410 * @features: the features set to device
 411 */
 412void vp_modern_set_features(struct virtio_pci_modern_device *mdev,
 413                            u64 features)
 414{
 415        struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
 416
 417        vp_iowrite32(0, &cfg->guest_feature_select);
 418        vp_iowrite32((u32)features, &cfg->guest_feature);
 419        vp_iowrite32(1, &cfg->guest_feature_select);
 420        vp_iowrite32(features >> 32, &cfg->guest_feature);
 421}
 422EXPORT_SYMBOL_GPL(vp_modern_set_features);
 423
 424/*
 425 * vp_modern_generation - get the device genreation
 426 * @mdev: the modern virtio-pci device
 427 *
 428 * Returns the genreation read from device
 429 */
 430u32 vp_modern_generation(struct virtio_pci_modern_device *mdev)
 431{
 432        struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
 433
 434        return vp_ioread8(&cfg->config_generation);
 435}
 436EXPORT_SYMBOL_GPL(vp_modern_generation);
 437
 438/*
 439 * vp_modern_get_status - get the device status
 440 * @mdev: the modern virtio-pci device
 441 *
 442 * Returns the status read from device
 443 */
 444u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev)
 445{
 446        struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
 447
 448        return vp_ioread8(&cfg->device_status);
 449}
 450EXPORT_SYMBOL_GPL(vp_modern_get_status);
 451
 452/*
 453 * vp_modern_set_status - set status to device
 454 * @mdev: the modern virtio-pci device
 455 * @status: the status set to device
 456 */
 457void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
 458                                 u8 status)
 459{
 460        struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
 461
 462        vp_iowrite8(status, &cfg->device_status);
 463}
 464EXPORT_SYMBOL_GPL(vp_modern_set_status);
 465
 466/*
 467 * vp_modern_queue_vector - set the MSIX vector for a specific virtqueue
 468 * @mdev: the modern virtio-pci device
 469 * @index: queue index
 470 * @vector: the config vector
 471 *
 472 * Returns the config vector read from the device
 473 */
 474u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev,
 475                           u16 index, u16 vector)
 476{
 477        struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
 478
 479        vp_iowrite16(index, &cfg->queue_select);
 480        vp_iowrite16(vector, &cfg->queue_msix_vector);
 481        /* Flush the write out to device */
 482        return vp_ioread16(&cfg->queue_msix_vector);
 483}
 484EXPORT_SYMBOL_GPL(vp_modern_queue_vector);
 485
 486/*
 487 * vp_modern_config_vector - set the vector for config interrupt
 488 * @mdev: the modern virtio-pci device
 489 * @vector: the config vector
 490 *
 491 * Returns the config vector read from the device
 492 */
 493u16 vp_modern_config_vector(struct virtio_pci_modern_device *mdev,
 494                            u16 vector)
 495{
 496        struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
 497
 498        /* Setup the vector used for configuration events */
 499        vp_iowrite16(vector, &cfg->msix_config);
 500        /* Verify we had enough resources to assign the vector */
 501        /* Will also flush the write out to device */
 502        return vp_ioread16(&cfg->msix_config);
 503}
 504EXPORT_SYMBOL_GPL(vp_modern_config_vector);
 505
 506/*
 507 * vp_modern_queue_address - set the virtqueue address
 508 * @mdev: the modern virtio-pci device
 509 * @index: the queue index
 510 * @desc_addr: address of the descriptor area
 511 * @driver_addr: address of the driver area
 512 * @device_addr: address of the device area
 513 */
 514void vp_modern_queue_address(struct virtio_pci_modern_device *mdev,
 515                             u16 index, u64 desc_addr, u64 driver_addr,
 516                             u64 device_addr)
 517{
 518        struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
 519
 520        vp_iowrite16(index, &cfg->queue_select);
 521
 522        vp_iowrite64_twopart(desc_addr, &cfg->queue_desc_lo,
 523                             &cfg->queue_desc_hi);
 524        vp_iowrite64_twopart(driver_addr, &cfg->queue_avail_lo,
 525                             &cfg->queue_avail_hi);
 526        vp_iowrite64_twopart(device_addr, &cfg->queue_used_lo,
 527                             &cfg->queue_used_hi);
 528}
 529EXPORT_SYMBOL_GPL(vp_modern_queue_address);
 530
 531/*
 532 * vp_modern_set_queue_enable - enable a virtqueue
 533 * @mdev: the modern virtio-pci device
 534 * @index: the queue index
 535 * @enable: whether the virtqueue is enable or not
 536 */
 537void vp_modern_set_queue_enable(struct virtio_pci_modern_device *mdev,
 538                                u16 index, bool enable)
 539{
 540        vp_iowrite16(index, &mdev->common->queue_select);
 541        vp_iowrite16(enable, &mdev->common->queue_enable);
 542}
 543EXPORT_SYMBOL_GPL(vp_modern_set_queue_enable);
 544
 545/*
 546 * vp_modern_get_queue_enable - enable a virtqueue
 547 * @mdev: the modern virtio-pci device
 548 * @index: the queue index
 549 *
 550 * Returns whether a virtqueue is enabled or not
 551 */
 552bool vp_modern_get_queue_enable(struct virtio_pci_modern_device *mdev,
 553                                u16 index)
 554{
 555        vp_iowrite16(index, &mdev->common->queue_select);
 556
 557        return vp_ioread16(&mdev->common->queue_enable);
 558}
 559EXPORT_SYMBOL_GPL(vp_modern_get_queue_enable);
 560
 561/*
 562 * vp_modern_set_queue_size - set size for a virtqueue
 563 * @mdev: the modern virtio-pci device
 564 * @index: the queue index
 565 * @size: the size of the virtqueue
 566 */
 567void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev,
 568                              u16 index, u16 size)
 569{
 570        vp_iowrite16(index, &mdev->common->queue_select);
 571        vp_iowrite16(size, &mdev->common->queue_size);
 572
 573}
 574EXPORT_SYMBOL_GPL(vp_modern_set_queue_size);
 575
 576/*
 577 * vp_modern_get_queue_size - get size for a virtqueue
 578 * @mdev: the modern virtio-pci device
 579 * @index: the queue index
 580 *
 581 * Returns the size of the virtqueue
 582 */
 583u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev,
 584                             u16 index)
 585{
 586        vp_iowrite16(index, &mdev->common->queue_select);
 587
 588        return vp_ioread16(&mdev->common->queue_size);
 589
 590}
 591EXPORT_SYMBOL_GPL(vp_modern_get_queue_size);
 592
 593/*
 594 * vp_modern_get_num_queues - get the number of virtqueues
 595 * @mdev: the modern virtio-pci device
 596 *
 597 * Returns the number of virtqueues
 598 */
 599u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev)
 600{
 601        return vp_ioread16(&mdev->common->num_queues);
 602}
 603EXPORT_SYMBOL_GPL(vp_modern_get_num_queues);
 604
 605/*
 606 * vp_modern_get_queue_notify_off - get notification offset for a virtqueue
 607 * @mdev: the modern virtio-pci device
 608 * @index: the queue index
 609 *
 610 * Returns the notification offset for a virtqueue
 611 */
 612static u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev,
 613                                          u16 index)
 614{
 615        vp_iowrite16(index, &mdev->common->queue_select);
 616
 617        return vp_ioread16(&mdev->common->queue_notify_off);
 618}
 619
 620/*
 621 * vp_modern_map_vq_notify - map notification area for a
 622 * specific virtqueue
 623 * @mdev: the modern virtio-pci device
 624 * @index: the queue index
 625 * @pa: the pointer to the physical address of the nofity area
 626 *
 627 * Returns the address of the notification area
 628 */
 629void __iomem *vp_modern_map_vq_notify(struct virtio_pci_modern_device *mdev,
 630                                      u16 index, resource_size_t *pa)
 631{
 632        u16 off = vp_modern_get_queue_notify_off(mdev, index);
 633
 634        if (mdev->notify_base) {
 635                /* offset should not wrap */
 636                if ((u64)off * mdev->notify_offset_multiplier + 2
 637                        > mdev->notify_len) {
 638                        dev_warn(&mdev->pci_dev->dev,
 639                                 "bad notification offset %u (x %u) "
 640                                 "for queue %u > %zd",
 641                                 off, mdev->notify_offset_multiplier,
 642                                 index, mdev->notify_len);
 643                        return NULL;
 644                }
 645                if (pa)
 646                        *pa = mdev->notify_pa +
 647                              off * mdev->notify_offset_multiplier;
 648                return mdev->notify_base + off * mdev->notify_offset_multiplier;
 649        } else {
 650                return vp_modern_map_capability(mdev,
 651                                       mdev->notify_map_cap, 2, 2,
 652                                       off * mdev->notify_offset_multiplier, 2,
 653                                       NULL, pa);
 654        }
 655}
 656EXPORT_SYMBOL_GPL(vp_modern_map_vq_notify);
 657
 658MODULE_VERSION("0.1");
 659MODULE_DESCRIPTION("Modern Virtio PCI Device");
 660MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>");
 661MODULE_LICENSE("GPL");
 662