linux/drivers/virtio/virtio_mmio.c
<<
>>
Prefs
   1/*
   2 * Virtio memory mapped device driver
   3 *
   4 * Copyright 2011-2014, ARM Ltd.
   5 *
   6 * This module allows virtio devices to be used over a virtual, memory mapped
   7 * platform device.
   8 *
   9 * The guest device(s) may be instantiated in one of three equivalent ways:
  10 *
  11 * 1. Static platform device in board's code, eg.:
  12 *
  13 *      static struct platform_device v2m_virtio_device = {
  14 *              .name = "virtio-mmio",
  15 *              .id = -1,
  16 *              .num_resources = 2,
  17 *              .resource = (struct resource []) {
  18 *                      {
  19 *                              .start = 0x1001e000,
  20 *                              .end = 0x1001e0ff,
  21 *                              .flags = IORESOURCE_MEM,
  22 *                      }, {
  23 *                              .start = 42 + 32,
  24 *                              .end = 42 + 32,
  25 *                              .flags = IORESOURCE_IRQ,
  26 *                      },
  27 *              }
  28 *      };
  29 *
  30 * 2. Device Tree node, eg.:
  31 *
  32 *              virtio_block@1e000 {
  33 *                      compatible = "virtio,mmio";
  34 *                      reg = <0x1e000 0x100>;
  35 *                      interrupts = <42>;
  36 *              }
  37 *
  38 * 3. Kernel module (or command line) parameter. Can be used more than once -
  39 *    one device will be created for each one. Syntax:
  40 *
  41 *              [virtio_mmio.]device=<size>@<baseaddr>:<irq>[:<id>]
  42 *    where:
  43 *              <size>     := size (can use standard suffixes like K, M or G)
  44 *              <baseaddr> := physical base address
  45 *              <irq>      := interrupt number (as passed to request_irq())
  46 *              <id>       := (optional) platform device id
  47 *    eg.:
  48 *              virtio_mmio.device=0x100@0x100b0000:48 \
  49 *                              virtio_mmio.device=1K@0x1001e000:74
  50 *
  51 *
  52 *
  53 * Based on Virtio PCI driver by Anthony Liguori, copyright IBM Corp. 2007
  54 *
  55 * This work is licensed under the terms of the GNU GPL, version 2 or later.
  56 * See the COPYING file in the top-level directory.
  57 */
  58
  59#define pr_fmt(fmt) "virtio-mmio: " fmt
  60
  61#include <linux/highmem.h>
  62#include <linux/interrupt.h>
  63#include <linux/io.h>
  64#include <linux/list.h>
  65#include <linux/module.h>
  66#include <linux/platform_device.h>
  67#include <linux/slab.h>
  68#include <linux/spinlock.h>
  69#include <linux/virtio.h>
  70#include <linux/virtio_config.h>
  71#include <linux/virtio_mmio.h>
  72#include <linux/virtio_ring.h>
  73
  74
  75
  76/* The alignment to use between consumer and producer parts of vring.
  77 * Currently hardcoded to the page size. */
  78#define VIRTIO_MMIO_VRING_ALIGN         PAGE_SIZE
  79
  80
  81
  82#define to_virtio_mmio_device(_plat_dev) \
  83        container_of(_plat_dev, struct virtio_mmio_device, vdev)
  84
  85struct virtio_mmio_device {
  86        struct virtio_device vdev;
  87        struct platform_device *pdev;
  88
  89        void __iomem *base;
  90        unsigned long version;
  91
  92        /* a list of queues so we can dispatch IRQs */
  93        spinlock_t lock;
  94        struct list_head virtqueues;
  95};
  96
  97struct virtio_mmio_vq_info {
  98        /* the actual virtqueue */
  99        struct virtqueue *vq;
 100
 101        /* the list node for the virtqueues list */
 102        struct list_head node;
 103};
 104
 105
 106
 107/* Configuration interface */
 108
 109static u64 vm_get_features(struct virtio_device *vdev)
 110{
 111        struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
 112        u64 features;
 113
 114        writel(1, vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL);
 115        features = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES);
 116        features <<= 32;
 117
 118        writel(0, vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL);
 119        features |= readl(vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES);
 120
 121        return features;
 122}
 123
 124static int vm_finalize_features(struct virtio_device *vdev)
 125{
 126        struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
 127
 128        /* Give virtio_ring a chance to accept features. */
 129        vring_transport_features(vdev);
 130
 131        /* Make sure there is are no mixed devices */
 132        if (vm_dev->version == 2 &&
 133                        !__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) {
 134                dev_err(&vdev->dev, "New virtio-mmio devices (version 2) must provide VIRTIO_F_VERSION_1 feature!\n");
 135                return -EINVAL;
 136        }
 137
 138        writel(1, vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL);
 139        writel((u32)(vdev->features >> 32),
 140                        vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES);
 141
 142        writel(0, vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL);
 143        writel((u32)vdev->features,
 144                        vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES);
 145
 146        return 0;
 147}
 148
 149static void vm_get(struct virtio_device *vdev, unsigned offset,
 150                   void *buf, unsigned len)
 151{
 152        struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
 153        void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG;
 154        u8 b;
 155        __le16 w;
 156        __le32 l;
 157
 158        if (vm_dev->version == 1) {
 159                u8 *ptr = buf;
 160                int i;
 161
 162                for (i = 0; i < len; i++)
 163                        ptr[i] = readb(base + offset + i);
 164                return;
 165        }
 166
 167        switch (len) {
 168        case 1:
 169                b = readb(base + offset);
 170                memcpy(buf, &b, sizeof b);
 171                break;
 172        case 2:
 173                w = cpu_to_le16(readw(base + offset));
 174                memcpy(buf, &w, sizeof w);
 175                break;
 176        case 4:
 177                l = cpu_to_le32(readl(base + offset));
 178                memcpy(buf, &l, sizeof l);
 179                break;
 180        case 8:
 181                l = cpu_to_le32(readl(base + offset));
 182                memcpy(buf, &l, sizeof l);
 183                l = cpu_to_le32(ioread32(base + offset + sizeof l));
 184                memcpy(buf + sizeof l, &l, sizeof l);
 185                break;
 186        default:
 187                BUG();
 188        }
 189}
 190
 191static void vm_set(struct virtio_device *vdev, unsigned offset,
 192                   const void *buf, unsigned len)
 193{
 194        struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
 195        void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG;
 196        u8 b;
 197        __le16 w;
 198        __le32 l;
 199
 200        if (vm_dev->version == 1) {
 201                const u8 *ptr = buf;
 202                int i;
 203
 204                for (i = 0; i < len; i++)
 205                        writeb(ptr[i], base + offset + i);
 206
 207                return;
 208        }
 209
 210        switch (len) {
 211        case 1:
 212                memcpy(&b, buf, sizeof b);
 213                writeb(b, base + offset);
 214                break;
 215        case 2:
 216                memcpy(&w, buf, sizeof w);
 217                writew(le16_to_cpu(w), base + offset);
 218                break;
 219        case 4:
 220                memcpy(&l, buf, sizeof l);
 221                writel(le32_to_cpu(l), base + offset);
 222                break;
 223        case 8:
 224                memcpy(&l, buf, sizeof l);
 225                writel(le32_to_cpu(l), base + offset);
 226                memcpy(&l, buf + sizeof l, sizeof l);
 227                writel(le32_to_cpu(l), base + offset + sizeof l);
 228                break;
 229        default:
 230                BUG();
 231        }
 232}
 233
 234static u32 vm_generation(struct virtio_device *vdev)
 235{
 236        struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
 237
 238        if (vm_dev->version == 1)
 239                return 0;
 240        else
 241                return readl(vm_dev->base + VIRTIO_MMIO_CONFIG_GENERATION);
 242}
 243
 244static u8 vm_get_status(struct virtio_device *vdev)
 245{
 246        struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
 247
 248        return readl(vm_dev->base + VIRTIO_MMIO_STATUS) & 0xff;
 249}
 250
 251static void vm_set_status(struct virtio_device *vdev, u8 status)
 252{
 253        struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
 254
 255        /* We should never be setting status to 0. */
 256        BUG_ON(status == 0);
 257
 258        writel(status, vm_dev->base + VIRTIO_MMIO_STATUS);
 259}
 260
 261static void vm_reset(struct virtio_device *vdev)
 262{
 263        struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
 264
 265        /* 0 status means a reset. */
 266        writel(0, vm_dev->base + VIRTIO_MMIO_STATUS);
 267}
 268
 269
 270
 271/* Transport interface */
 272
 273/* the notify function used when creating a virt queue */
 274static bool vm_notify(struct virtqueue *vq)
 275{
 276        struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
 277
 278        /* We write the queue's selector into the notification register to
 279         * signal the other end */
 280        writel(vq->index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY);
 281        return true;
 282}
 283
 284/* Notify all virtqueues on an interrupt. */
 285static irqreturn_t vm_interrupt(int irq, void *opaque)
 286{
 287        struct virtio_mmio_device *vm_dev = opaque;
 288        struct virtio_mmio_vq_info *info;
 289        unsigned long status;
 290        unsigned long flags;
 291        irqreturn_t ret = IRQ_NONE;
 292
 293        /* Read and acknowledge interrupts */
 294        status = readl(vm_dev->base + VIRTIO_MMIO_INTERRUPT_STATUS);
 295        writel(status, vm_dev->base + VIRTIO_MMIO_INTERRUPT_ACK);
 296
 297        if (unlikely(status & VIRTIO_MMIO_INT_CONFIG)) {
 298                virtio_config_changed(&vm_dev->vdev);
 299                ret = IRQ_HANDLED;
 300        }
 301
 302        if (likely(status & VIRTIO_MMIO_INT_VRING)) {
 303                spin_lock_irqsave(&vm_dev->lock, flags);
 304                list_for_each_entry(info, &vm_dev->virtqueues, node)
 305                        ret |= vring_interrupt(irq, info->vq);
 306                spin_unlock_irqrestore(&vm_dev->lock, flags);
 307        }
 308
 309        return ret;
 310}
 311
 312
 313
 314static void vm_del_vq(struct virtqueue *vq)
 315{
 316        struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
 317        struct virtio_mmio_vq_info *info = vq->priv;
 318        unsigned long flags;
 319        unsigned int index = vq->index;
 320
 321        spin_lock_irqsave(&vm_dev->lock, flags);
 322        list_del(&info->node);
 323        spin_unlock_irqrestore(&vm_dev->lock, flags);
 324
 325        /* Select and deactivate the queue */
 326        writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
 327        if (vm_dev->version == 1) {
 328                writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
 329        } else {
 330                writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
 331                WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY));
 332        }
 333
 334        vring_del_virtqueue(vq);
 335
 336        kfree(info);
 337}
 338
 339static void vm_del_vqs(struct virtio_device *vdev)
 340{
 341        struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
 342        struct virtqueue *vq, *n;
 343
 344        list_for_each_entry_safe(vq, n, &vdev->vqs, list)
 345                vm_del_vq(vq);
 346
 347        free_irq(platform_get_irq(vm_dev->pdev, 0), vm_dev);
 348}
 349
 350static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
 351                                  void (*callback)(struct virtqueue *vq),
 352                                  const char *name)
 353{
 354        struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
 355        struct virtio_mmio_vq_info *info;
 356        struct virtqueue *vq;
 357        unsigned long flags;
 358        unsigned int num;
 359        int err;
 360
 361        if (!name)
 362                return NULL;
 363
 364        /* Select the queue we're interested in */
 365        writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
 366
 367        /* Queue shouldn't already be set up. */
 368        if (readl(vm_dev->base + (vm_dev->version == 1 ?
 369                        VIRTIO_MMIO_QUEUE_PFN : VIRTIO_MMIO_QUEUE_READY))) {
 370                err = -ENOENT;
 371                goto error_available;
 372        }
 373
 374        /* Allocate and fill out our active queue description */
 375        info = kmalloc(sizeof(*info), GFP_KERNEL);
 376        if (!info) {
 377                err = -ENOMEM;
 378                goto error_kmalloc;
 379        }
 380
 381        num = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NUM_MAX);
 382        if (num == 0) {
 383                err = -ENOENT;
 384                goto error_new_virtqueue;
 385        }
 386
 387        /* Create the vring */
 388        vq = vring_create_virtqueue(index, num, VIRTIO_MMIO_VRING_ALIGN, vdev,
 389                                 true, true, vm_notify, callback, name);
 390        if (!vq) {
 391                err = -ENOMEM;
 392                goto error_new_virtqueue;
 393        }
 394
 395        /* Activate the queue */
 396        writel(virtqueue_get_vring_size(vq), vm_dev->base + VIRTIO_MMIO_QUEUE_NUM);
 397        if (vm_dev->version == 1) {
 398                writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN);
 399                writel(virtqueue_get_desc_addr(vq) >> PAGE_SHIFT,
 400                                vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
 401        } else {
 402                u64 addr;
 403
 404                addr = virtqueue_get_desc_addr(vq);
 405                writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_LOW);
 406                writel((u32)(addr >> 32),
 407                                vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_HIGH);
 408
 409                addr = virtqueue_get_avail_addr(vq);
 410                writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_LOW);
 411                writel((u32)(addr >> 32),
 412                                vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_HIGH);
 413
 414                addr = virtqueue_get_used_addr(vq);
 415                writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_USED_LOW);
 416                writel((u32)(addr >> 32),
 417                                vm_dev->base + VIRTIO_MMIO_QUEUE_USED_HIGH);
 418
 419                writel(1, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
 420        }
 421
 422        vq->priv = info;
 423        info->vq = vq;
 424
 425        spin_lock_irqsave(&vm_dev->lock, flags);
 426        list_add(&info->node, &vm_dev->virtqueues);
 427        spin_unlock_irqrestore(&vm_dev->lock, flags);
 428
 429        return vq;
 430
 431error_new_virtqueue:
 432        if (vm_dev->version == 1) {
 433                writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
 434        } else {
 435                writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
 436                WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY));
 437        }
 438        kfree(info);
 439error_kmalloc:
 440error_available:
 441        return ERR_PTR(err);
 442}
 443
 444static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
 445                       struct virtqueue *vqs[],
 446                       vq_callback_t *callbacks[],
 447                       const char * const names[])
 448{
 449        struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
 450        unsigned int irq = platform_get_irq(vm_dev->pdev, 0);
 451        int i, err;
 452
 453        err = request_irq(irq, vm_interrupt, IRQF_SHARED,
 454                        dev_name(&vdev->dev), vm_dev);
 455        if (err)
 456                return err;
 457
 458        for (i = 0; i < nvqs; ++i) {
 459                vqs[i] = vm_setup_vq(vdev, i, callbacks[i], names[i]);
 460                if (IS_ERR(vqs[i])) {
 461                        vm_del_vqs(vdev);
 462                        return PTR_ERR(vqs[i]);
 463                }
 464        }
 465
 466        return 0;
 467}
 468
 469static const char *vm_bus_name(struct virtio_device *vdev)
 470{
 471        struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
 472
 473        return vm_dev->pdev->name;
 474}
 475
 476static const struct virtio_config_ops virtio_mmio_config_ops = {
 477        .get            = vm_get,
 478        .set            = vm_set,
 479        .generation     = vm_generation,
 480        .get_status     = vm_get_status,
 481        .set_status     = vm_set_status,
 482        .reset          = vm_reset,
 483        .find_vqs       = vm_find_vqs,
 484        .del_vqs        = vm_del_vqs,
 485        .get_features   = vm_get_features,
 486        .finalize_features = vm_finalize_features,
 487        .bus_name       = vm_bus_name,
 488};
 489
 490
 491
 492/* Platform device */
 493
 494static int virtio_mmio_probe(struct platform_device *pdev)
 495{
 496        struct virtio_mmio_device *vm_dev;
 497        struct resource *mem;
 498        unsigned long magic;
 499
 500        mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 501        if (!mem)
 502                return -EINVAL;
 503
 504        if (!devm_request_mem_region(&pdev->dev, mem->start,
 505                        resource_size(mem), pdev->name))
 506                return -EBUSY;
 507
 508        vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL);
 509        if (!vm_dev)
 510                return  -ENOMEM;
 511
 512        vm_dev->vdev.dev.parent = &pdev->dev;
 513        vm_dev->vdev.config = &virtio_mmio_config_ops;
 514        vm_dev->pdev = pdev;
 515        INIT_LIST_HEAD(&vm_dev->virtqueues);
 516        spin_lock_init(&vm_dev->lock);
 517
 518        vm_dev->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
 519        if (vm_dev->base == NULL)
 520                return -EFAULT;
 521
 522        /* Check magic value */
 523        magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE);
 524        if (memcmp(&magic, "virt", 4) != 0) {
 525                dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic);
 526                return -ENODEV;
 527        }
 528
 529        /* Check device version */
 530        vm_dev->version = readl(vm_dev->base + VIRTIO_MMIO_VERSION);
 531        if (vm_dev->version < 1 || vm_dev->version > 2) {
 532                dev_err(&pdev->dev, "Version %ld not supported!\n",
 533                                vm_dev->version);
 534                return -ENXIO;
 535        }
 536
 537        vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID);
 538        if (vm_dev->vdev.id.device == 0) {
 539                /*
 540                 * virtio-mmio device with an ID 0 is a (dummy) placeholder
 541                 * with no function. End probing now with no error reported.
 542                 */
 543                return -ENODEV;
 544        }
 545        vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
 546
 547        if (vm_dev->version == 1)
 548                writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
 549
 550        platform_set_drvdata(pdev, vm_dev);
 551
 552        return register_virtio_device(&vm_dev->vdev);
 553}
 554
 555static int virtio_mmio_remove(struct platform_device *pdev)
 556{
 557        struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev);
 558
 559        unregister_virtio_device(&vm_dev->vdev);
 560
 561        return 0;
 562}
 563
 564
 565
 566/* Devices list parameter */
 567
 568#if defined(CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES)
 569
 570static struct device vm_cmdline_parent = {
 571        .init_name = "virtio-mmio-cmdline",
 572};
 573
 574static int vm_cmdline_parent_registered;
 575static int vm_cmdline_id;
 576
 577static int vm_cmdline_set(const char *device,
 578                const struct kernel_param *kp)
 579{
 580        int err;
 581        struct resource resources[2] = {};
 582        char *str;
 583        long long int base, size;
 584        unsigned int irq;
 585        int processed, consumed = 0;
 586        struct platform_device *pdev;
 587
 588        /* Consume "size" part of the command line parameter */
 589        size = memparse(device, &str);
 590
 591        /* Get "@<base>:<irq>[:<id>]" chunks */
 592        processed = sscanf(str, "@%lli:%u%n:%d%n",
 593                        &base, &irq, &consumed,
 594                        &vm_cmdline_id, &consumed);
 595
 596        /*
 597         * sscanf() must processes at least 2 chunks; also there
 598         * must be no extra characters after the last chunk, so
 599         * str[consumed] must be '\0'
 600         */
 601        if (processed < 2 || str[consumed])
 602                return -EINVAL;
 603
 604        resources[0].flags = IORESOURCE_MEM;
 605        resources[0].start = base;
 606        resources[0].end = base + size - 1;
 607
 608        resources[1].flags = IORESOURCE_IRQ;
 609        resources[1].start = resources[1].end = irq;
 610
 611        if (!vm_cmdline_parent_registered) {
 612                err = device_register(&vm_cmdline_parent);
 613                if (err) {
 614                        pr_err("Failed to register parent device!\n");
 615                        return err;
 616                }
 617                vm_cmdline_parent_registered = 1;
 618        }
 619
 620        pr_info("Registering device virtio-mmio.%d at 0x%llx-0x%llx, IRQ %d.\n",
 621                       vm_cmdline_id,
 622                       (unsigned long long)resources[0].start,
 623                       (unsigned long long)resources[0].end,
 624                       (int)resources[1].start);
 625
 626        pdev = platform_device_register_resndata(&vm_cmdline_parent,
 627                        "virtio-mmio", vm_cmdline_id++,
 628                        resources, ARRAY_SIZE(resources), NULL, 0);
 629        if (IS_ERR(pdev))
 630                return PTR_ERR(pdev);
 631
 632        return 0;
 633}
 634
 635static int vm_cmdline_get_device(struct device *dev, void *data)
 636{
 637        char *buffer = data;
 638        unsigned int len = strlen(buffer);
 639        struct platform_device *pdev = to_platform_device(dev);
 640
 641        snprintf(buffer + len, PAGE_SIZE - len, "0x%llx@0x%llx:%llu:%d\n",
 642                        pdev->resource[0].end - pdev->resource[0].start + 1ULL,
 643                        (unsigned long long)pdev->resource[0].start,
 644                        (unsigned long long)pdev->resource[1].start,
 645                        pdev->id);
 646        return 0;
 647}
 648
 649static int vm_cmdline_get(char *buffer, const struct kernel_param *kp)
 650{
 651        buffer[0] = '\0';
 652        device_for_each_child(&vm_cmdline_parent, buffer,
 653                        vm_cmdline_get_device);
 654        return strlen(buffer) + 1;
 655}
 656
 657static struct kernel_param_ops vm_cmdline_param_ops = {
 658        .set = vm_cmdline_set,
 659        .get = vm_cmdline_get,
 660};
 661
 662device_param_cb(device, &vm_cmdline_param_ops, NULL, S_IRUSR);
 663
 664static int vm_unregister_cmdline_device(struct device *dev,
 665                void *data)
 666{
 667        platform_device_unregister(to_platform_device(dev));
 668
 669        return 0;
 670}
 671
 672static void vm_unregister_cmdline_devices(void)
 673{
 674        if (vm_cmdline_parent_registered) {
 675                device_for_each_child(&vm_cmdline_parent, NULL,
 676                                vm_unregister_cmdline_device);
 677                device_unregister(&vm_cmdline_parent);
 678                vm_cmdline_parent_registered = 0;
 679        }
 680}
 681
 682#else
 683
 684static void vm_unregister_cmdline_devices(void)
 685{
 686}
 687
 688#endif
 689
 690/* Platform driver */
 691
 692static struct of_device_id virtio_mmio_match[] = {
 693        { .compatible = "virtio,mmio", },
 694        {},
 695};
 696MODULE_DEVICE_TABLE(of, virtio_mmio_match);
 697
 698static struct platform_driver virtio_mmio_driver = {
 699        .probe          = virtio_mmio_probe,
 700        .remove         = virtio_mmio_remove,
 701        .driver         = {
 702                .name   = "virtio-mmio",
 703                .owner  = THIS_MODULE,
 704                .of_match_table = virtio_mmio_match,
 705        },
 706};
 707
 708static int __init virtio_mmio_init(void)
 709{
 710        return platform_driver_register(&virtio_mmio_driver);
 711}
 712
 713static void __exit virtio_mmio_exit(void)
 714{
 715        platform_driver_unregister(&virtio_mmio_driver);
 716        vm_unregister_cmdline_devices();
 717}
 718
 719module_init(virtio_mmio_init);
 720module_exit(virtio_mmio_exit);
 721
 722MODULE_AUTHOR("Pawel Moll <pawel.moll@arm.com>");
 723MODULE_DESCRIPTION("Platform bus driver for memory mapped virtio devices");
 724MODULE_LICENSE("GPL");
 725