linux/drivers/vfio/platform/vfio_platform_common.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2013 - Virtual Open Systems
   3 * Author: Antonios Motakis <a.motakis@virtualopensystems.com>
   4 *
   5 * This program is free software; you can redistribute it and/or modify
   6 * it under the terms of the GNU General Public License, version 2, as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 */
  14
  15#include <linux/device.h>
  16#include <linux/acpi.h>
  17#include <linux/iommu.h>
  18#include <linux/module.h>
  19#include <linux/mutex.h>
  20#include <linux/pm_runtime.h>
  21#include <linux/slab.h>
  22#include <linux/types.h>
  23#include <linux/uaccess.h>
  24#include <linux/vfio.h>
  25
  26#include "vfio_platform_private.h"
  27
  28#define DRIVER_VERSION  "0.10"
  29#define DRIVER_AUTHOR   "Antonios Motakis <a.motakis@virtualopensystems.com>"
  30#define DRIVER_DESC     "VFIO platform base module"
  31
  32#define VFIO_PLATFORM_IS_ACPI(vdev) ((vdev)->acpihid != NULL)
  33
  34static LIST_HEAD(reset_list);
  35static DEFINE_MUTEX(driver_lock);
  36
  37static vfio_platform_reset_fn_t vfio_platform_lookup_reset(const char *compat,
  38                                        struct module **module)
  39{
  40        struct vfio_platform_reset_node *iter;
  41        vfio_platform_reset_fn_t reset_fn = NULL;
  42
  43        mutex_lock(&driver_lock);
  44        list_for_each_entry(iter, &reset_list, link) {
  45                if (!strcmp(iter->compat, compat) &&
  46                        try_module_get(iter->owner)) {
  47                        *module = iter->owner;
  48                        reset_fn = iter->of_reset;
  49                        break;
  50                }
  51        }
  52        mutex_unlock(&driver_lock);
  53        return reset_fn;
  54}
  55
  56static int vfio_platform_acpi_probe(struct vfio_platform_device *vdev,
  57                                    struct device *dev)
  58{
  59        struct acpi_device *adev;
  60
  61        if (acpi_disabled)
  62                return -ENOENT;
  63
  64        adev = ACPI_COMPANION(dev);
  65        if (!adev) {
  66                pr_err("VFIO: ACPI companion device not found for %s\n",
  67                        vdev->name);
  68                return -ENODEV;
  69        }
  70
  71#ifdef CONFIG_ACPI
  72        vdev->acpihid = acpi_device_hid(adev);
  73#endif
  74        return WARN_ON(!vdev->acpihid) ? -EINVAL : 0;
  75}
  76
  77static int vfio_platform_acpi_call_reset(struct vfio_platform_device *vdev,
  78                                  const char **extra_dbg)
  79{
  80#ifdef CONFIG_ACPI
  81        struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
  82        struct device *dev = vdev->device;
  83        acpi_handle handle = ACPI_HANDLE(dev);
  84        acpi_status acpi_ret;
  85
  86        acpi_ret = acpi_evaluate_object(handle, "_RST", NULL, &buffer);
  87        if (ACPI_FAILURE(acpi_ret)) {
  88                if (extra_dbg)
  89                        *extra_dbg = acpi_format_exception(acpi_ret);
  90                return -EINVAL;
  91        }
  92
  93        return 0;
  94#else
  95        return -ENOENT;
  96#endif
  97}
  98
  99static bool vfio_platform_acpi_has_reset(struct vfio_platform_device *vdev)
 100{
 101#ifdef CONFIG_ACPI
 102        struct device *dev = vdev->device;
 103        acpi_handle handle = ACPI_HANDLE(dev);
 104
 105        return acpi_has_method(handle, "_RST");
 106#else
 107        return false;
 108#endif
 109}
 110
 111static bool vfio_platform_has_reset(struct vfio_platform_device *vdev)
 112{
 113        if (VFIO_PLATFORM_IS_ACPI(vdev))
 114                return vfio_platform_acpi_has_reset(vdev);
 115
 116        return vdev->of_reset ? true : false;
 117}
 118
 119static int vfio_platform_get_reset(struct vfio_platform_device *vdev)
 120{
 121        if (VFIO_PLATFORM_IS_ACPI(vdev))
 122                return vfio_platform_acpi_has_reset(vdev) ? 0 : -ENOENT;
 123
 124        vdev->of_reset = vfio_platform_lookup_reset(vdev->compat,
 125                                                    &vdev->reset_module);
 126        if (!vdev->of_reset) {
 127                request_module("vfio-reset:%s", vdev->compat);
 128                vdev->of_reset = vfio_platform_lookup_reset(vdev->compat,
 129                                                        &vdev->reset_module);
 130        }
 131
 132        return vdev->of_reset ? 0 : -ENOENT;
 133}
 134
 135static void vfio_platform_put_reset(struct vfio_platform_device *vdev)
 136{
 137        if (VFIO_PLATFORM_IS_ACPI(vdev))
 138                return;
 139
 140        if (vdev->of_reset)
 141                module_put(vdev->reset_module);
 142}
 143
 144static int vfio_platform_regions_init(struct vfio_platform_device *vdev)
 145{
 146        int cnt = 0, i;
 147
 148        while (vdev->get_resource(vdev, cnt))
 149                cnt++;
 150
 151        vdev->regions = kcalloc(cnt, sizeof(struct vfio_platform_region),
 152                                GFP_KERNEL);
 153        if (!vdev->regions)
 154                return -ENOMEM;
 155
 156        for (i = 0; i < cnt;  i++) {
 157                struct resource *res =
 158                        vdev->get_resource(vdev, i);
 159
 160                if (!res)
 161                        goto err;
 162
 163                vdev->regions[i].addr = res->start;
 164                vdev->regions[i].size = resource_size(res);
 165                vdev->regions[i].flags = 0;
 166
 167                switch (resource_type(res)) {
 168                case IORESOURCE_MEM:
 169                        vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_MMIO;
 170                        vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ;
 171                        if (!(res->flags & IORESOURCE_READONLY))
 172                                vdev->regions[i].flags |=
 173                                        VFIO_REGION_INFO_FLAG_WRITE;
 174
 175                        /*
 176                         * Only regions addressed with PAGE granularity may be
 177                         * MMAPed securely.
 178                         */
 179                        if (!(vdev->regions[i].addr & ~PAGE_MASK) &&
 180                                        !(vdev->regions[i].size & ~PAGE_MASK))
 181                                vdev->regions[i].flags |=
 182                                        VFIO_REGION_INFO_FLAG_MMAP;
 183
 184                        break;
 185                case IORESOURCE_IO:
 186                        vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_PIO;
 187                        break;
 188                default:
 189                        goto err;
 190                }
 191        }
 192
 193        vdev->num_regions = cnt;
 194
 195        return 0;
 196err:
 197        kfree(vdev->regions);
 198        return -EINVAL;
 199}
 200
 201static void vfio_platform_regions_cleanup(struct vfio_platform_device *vdev)
 202{
 203        int i;
 204
 205        for (i = 0; i < vdev->num_regions; i++)
 206                iounmap(vdev->regions[i].ioaddr);
 207
 208        vdev->num_regions = 0;
 209        kfree(vdev->regions);
 210}
 211
 212static int vfio_platform_call_reset(struct vfio_platform_device *vdev,
 213                                    const char **extra_dbg)
 214{
 215        if (VFIO_PLATFORM_IS_ACPI(vdev)) {
 216                dev_info(vdev->device, "reset\n");
 217                return vfio_platform_acpi_call_reset(vdev, extra_dbg);
 218        } else if (vdev->of_reset) {
 219                dev_info(vdev->device, "reset\n");
 220                return vdev->of_reset(vdev);
 221        }
 222
 223        dev_warn(vdev->device, "no reset function found!\n");
 224        return -EINVAL;
 225}
 226
 227static void vfio_platform_release(void *device_data)
 228{
 229        struct vfio_platform_device *vdev = device_data;
 230
 231        mutex_lock(&driver_lock);
 232
 233        if (!(--vdev->refcnt)) {
 234                const char *extra_dbg = NULL;
 235                int ret;
 236
 237                ret = vfio_platform_call_reset(vdev, &extra_dbg);
 238                if (ret && vdev->reset_required) {
 239                        dev_warn(vdev->device, "reset driver is required and reset call failed in release (%d) %s\n",
 240                                 ret, extra_dbg ? extra_dbg : "");
 241                        WARN_ON(1);
 242                }
 243                pm_runtime_put(vdev->device);
 244                vfio_platform_regions_cleanup(vdev);
 245                vfio_platform_irq_cleanup(vdev);
 246        }
 247
 248        mutex_unlock(&driver_lock);
 249
 250        module_put(vdev->parent_module);
 251}
 252
 253static int vfio_platform_open(void *device_data)
 254{
 255        struct vfio_platform_device *vdev = device_data;
 256        int ret;
 257
 258        if (!try_module_get(vdev->parent_module))
 259                return -ENODEV;
 260
 261        mutex_lock(&driver_lock);
 262
 263        if (!vdev->refcnt) {
 264                const char *extra_dbg = NULL;
 265
 266                ret = vfio_platform_regions_init(vdev);
 267                if (ret)
 268                        goto err_reg;
 269
 270                ret = vfio_platform_irq_init(vdev);
 271                if (ret)
 272                        goto err_irq;
 273
 274                ret = pm_runtime_get_sync(vdev->device);
 275                if (ret < 0)
 276                        goto err_pm;
 277
 278                ret = vfio_platform_call_reset(vdev, &extra_dbg);
 279                if (ret && vdev->reset_required) {
 280                        dev_warn(vdev->device, "reset driver is required and reset call failed in open (%d) %s\n",
 281                                 ret, extra_dbg ? extra_dbg : "");
 282                        goto err_rst;
 283                }
 284        }
 285
 286        vdev->refcnt++;
 287
 288        mutex_unlock(&driver_lock);
 289        return 0;
 290
 291err_rst:
 292        pm_runtime_put(vdev->device);
 293err_pm:
 294        vfio_platform_irq_cleanup(vdev);
 295err_irq:
 296        vfio_platform_regions_cleanup(vdev);
 297err_reg:
 298        mutex_unlock(&driver_lock);
 299        module_put(THIS_MODULE);
 300        return ret;
 301}
 302
 303static long vfio_platform_ioctl(void *device_data,
 304                                unsigned int cmd, unsigned long arg)
 305{
 306        struct vfio_platform_device *vdev = device_data;
 307        unsigned long minsz;
 308
 309        if (cmd == VFIO_DEVICE_GET_INFO) {
 310                struct vfio_device_info info;
 311
 312                minsz = offsetofend(struct vfio_device_info, num_irqs);
 313
 314                if (copy_from_user(&info, (void __user *)arg, minsz))
 315                        return -EFAULT;
 316
 317                if (info.argsz < minsz)
 318                        return -EINVAL;
 319
 320                if (vfio_platform_has_reset(vdev))
 321                        vdev->flags |= VFIO_DEVICE_FLAGS_RESET;
 322                info.flags = vdev->flags;
 323                info.num_regions = vdev->num_regions;
 324                info.num_irqs = vdev->num_irqs;
 325
 326                return copy_to_user((void __user *)arg, &info, minsz) ?
 327                        -EFAULT : 0;
 328
 329        } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
 330                struct vfio_region_info info;
 331
 332                minsz = offsetofend(struct vfio_region_info, offset);
 333
 334                if (copy_from_user(&info, (void __user *)arg, minsz))
 335                        return -EFAULT;
 336
 337                if (info.argsz < minsz)
 338                        return -EINVAL;
 339
 340                if (info.index >= vdev->num_regions)
 341                        return -EINVAL;
 342
 343                /* map offset to the physical address  */
 344                info.offset = VFIO_PLATFORM_INDEX_TO_OFFSET(info.index);
 345                info.size = vdev->regions[info.index].size;
 346                info.flags = vdev->regions[info.index].flags;
 347
 348                return copy_to_user((void __user *)arg, &info, minsz) ?
 349                        -EFAULT : 0;
 350
 351        } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
 352                struct vfio_irq_info info;
 353
 354                minsz = offsetofend(struct vfio_irq_info, count);
 355
 356                if (copy_from_user(&info, (void __user *)arg, minsz))
 357                        return -EFAULT;
 358
 359                if (info.argsz < minsz)
 360                        return -EINVAL;
 361
 362                if (info.index >= vdev->num_irqs)
 363                        return -EINVAL;
 364
 365                info.flags = vdev->irqs[info.index].flags;
 366                info.count = vdev->irqs[info.index].count;
 367
 368                return copy_to_user((void __user *)arg, &info, minsz) ?
 369                        -EFAULT : 0;
 370
 371        } else if (cmd == VFIO_DEVICE_SET_IRQS) {
 372                struct vfio_irq_set hdr;
 373                u8 *data = NULL;
 374                int ret = 0;
 375                size_t data_size = 0;
 376
 377                minsz = offsetofend(struct vfio_irq_set, count);
 378
 379                if (copy_from_user(&hdr, (void __user *)arg, minsz))
 380                        return -EFAULT;
 381
 382                ret = vfio_set_irqs_validate_and_prepare(&hdr, vdev->num_irqs,
 383                                                 vdev->num_irqs, &data_size);
 384                if (ret)
 385                        return ret;
 386
 387                if (data_size) {
 388                        data = memdup_user((void __user *)(arg + minsz),
 389                                            data_size);
 390                        if (IS_ERR(data))
 391                                return PTR_ERR(data);
 392                }
 393
 394                mutex_lock(&vdev->igate);
 395
 396                ret = vfio_platform_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
 397                                                   hdr.start, hdr.count, data);
 398                mutex_unlock(&vdev->igate);
 399                kfree(data);
 400
 401                return ret;
 402
 403        } else if (cmd == VFIO_DEVICE_RESET) {
 404                return vfio_platform_call_reset(vdev, NULL);
 405        }
 406
 407        return -ENOTTY;
 408}
 409
 410static ssize_t vfio_platform_read_mmio(struct vfio_platform_region *reg,
 411                                       char __user *buf, size_t count,
 412                                       loff_t off)
 413{
 414        unsigned int done = 0;
 415
 416        if (!reg->ioaddr) {
 417                reg->ioaddr =
 418                        ioremap_nocache(reg->addr, reg->size);
 419
 420                if (!reg->ioaddr)
 421                        return -ENOMEM;
 422        }
 423
 424        while (count) {
 425                size_t filled;
 426
 427                if (count >= 4 && !(off % 4)) {
 428                        u32 val;
 429
 430                        val = ioread32(reg->ioaddr + off);
 431                        if (copy_to_user(buf, &val, 4))
 432                                goto err;
 433
 434                        filled = 4;
 435                } else if (count >= 2 && !(off % 2)) {
 436                        u16 val;
 437
 438                        val = ioread16(reg->ioaddr + off);
 439                        if (copy_to_user(buf, &val, 2))
 440                                goto err;
 441
 442                        filled = 2;
 443                } else {
 444                        u8 val;
 445
 446                        val = ioread8(reg->ioaddr + off);
 447                        if (copy_to_user(buf, &val, 1))
 448                                goto err;
 449
 450                        filled = 1;
 451                }
 452
 453
 454                count -= filled;
 455                done += filled;
 456                off += filled;
 457                buf += filled;
 458        }
 459
 460        return done;
 461err:
 462        return -EFAULT;
 463}
 464
 465static ssize_t vfio_platform_read(void *device_data, char __user *buf,
 466                                  size_t count, loff_t *ppos)
 467{
 468        struct vfio_platform_device *vdev = device_data;
 469        unsigned int index = VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos);
 470        loff_t off = *ppos & VFIO_PLATFORM_OFFSET_MASK;
 471
 472        if (index >= vdev->num_regions)
 473                return -EINVAL;
 474
 475        if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ))
 476                return -EINVAL;
 477
 478        if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
 479                return vfio_platform_read_mmio(&vdev->regions[index],
 480                                                        buf, count, off);
 481        else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
 482                return -EINVAL; /* not implemented */
 483
 484        return -EINVAL;
 485}
 486
 487static ssize_t vfio_platform_write_mmio(struct vfio_platform_region *reg,
 488                                        const char __user *buf, size_t count,
 489                                        loff_t off)
 490{
 491        unsigned int done = 0;
 492
 493        if (!reg->ioaddr) {
 494                reg->ioaddr =
 495                        ioremap_nocache(reg->addr, reg->size);
 496
 497                if (!reg->ioaddr)
 498                        return -ENOMEM;
 499        }
 500
 501        while (count) {
 502                size_t filled;
 503
 504                if (count >= 4 && !(off % 4)) {
 505                        u32 val;
 506
 507                        if (copy_from_user(&val, buf, 4))
 508                                goto err;
 509                        iowrite32(val, reg->ioaddr + off);
 510
 511                        filled = 4;
 512                } else if (count >= 2 && !(off % 2)) {
 513                        u16 val;
 514
 515                        if (copy_from_user(&val, buf, 2))
 516                                goto err;
 517                        iowrite16(val, reg->ioaddr + off);
 518
 519                        filled = 2;
 520                } else {
 521                        u8 val;
 522
 523                        if (copy_from_user(&val, buf, 1))
 524                                goto err;
 525                        iowrite8(val, reg->ioaddr + off);
 526
 527                        filled = 1;
 528                }
 529
 530                count -= filled;
 531                done += filled;
 532                off += filled;
 533                buf += filled;
 534        }
 535
 536        return done;
 537err:
 538        return -EFAULT;
 539}
 540
 541static ssize_t vfio_platform_write(void *device_data, const char __user *buf,
 542                                   size_t count, loff_t *ppos)
 543{
 544        struct vfio_platform_device *vdev = device_data;
 545        unsigned int index = VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos);
 546        loff_t off = *ppos & VFIO_PLATFORM_OFFSET_MASK;
 547
 548        if (index >= vdev->num_regions)
 549                return -EINVAL;
 550
 551        if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE))
 552                return -EINVAL;
 553
 554        if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
 555                return vfio_platform_write_mmio(&vdev->regions[index],
 556                                                        buf, count, off);
 557        else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
 558                return -EINVAL; /* not implemented */
 559
 560        return -EINVAL;
 561}
 562
 563static int vfio_platform_mmap_mmio(struct vfio_platform_region region,
 564                                   struct vm_area_struct *vma)
 565{
 566        u64 req_len, pgoff, req_start;
 567
 568        req_len = vma->vm_end - vma->vm_start;
 569        pgoff = vma->vm_pgoff &
 570                ((1U << (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
 571        req_start = pgoff << PAGE_SHIFT;
 572
 573        if (region.size < PAGE_SIZE || req_start + req_len > region.size)
 574                return -EINVAL;
 575
 576        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 577        vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
 578
 579        return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
 580                               req_len, vma->vm_page_prot);
 581}
 582
 583static int vfio_platform_mmap(void *device_data, struct vm_area_struct *vma)
 584{
 585        struct vfio_platform_device *vdev = device_data;
 586        unsigned int index;
 587
 588        index = vma->vm_pgoff >> (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT);
 589
 590        if (vma->vm_end < vma->vm_start)
 591                return -EINVAL;
 592        if (!(vma->vm_flags & VM_SHARED))
 593                return -EINVAL;
 594        if (index >= vdev->num_regions)
 595                return -EINVAL;
 596        if (vma->vm_start & ~PAGE_MASK)
 597                return -EINVAL;
 598        if (vma->vm_end & ~PAGE_MASK)
 599                return -EINVAL;
 600
 601        if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP))
 602                return -EINVAL;
 603
 604        if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ)
 605                        && (vma->vm_flags & VM_READ))
 606                return -EINVAL;
 607
 608        if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE)
 609                        && (vma->vm_flags & VM_WRITE))
 610                return -EINVAL;
 611
 612        vma->vm_private_data = vdev;
 613
 614        if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
 615                return vfio_platform_mmap_mmio(vdev->regions[index], vma);
 616
 617        else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
 618                return -EINVAL; /* not implemented */
 619
 620        return -EINVAL;
 621}
 622
 623static const struct vfio_device_ops vfio_platform_ops = {
 624        .name           = "vfio-platform",
 625        .open           = vfio_platform_open,
 626        .release        = vfio_platform_release,
 627        .ioctl          = vfio_platform_ioctl,
 628        .read           = vfio_platform_read,
 629        .write          = vfio_platform_write,
 630        .mmap           = vfio_platform_mmap,
 631};
 632
 633static int vfio_platform_of_probe(struct vfio_platform_device *vdev,
 634                           struct device *dev)
 635{
 636        int ret;
 637
 638        ret = device_property_read_string(dev, "compatible",
 639                                          &vdev->compat);
 640        if (ret)
 641                pr_err("VFIO: Cannot retrieve compat for %s\n", vdev->name);
 642
 643        return ret;
 644}
 645
 646/*
 647 * There can be two kernel build combinations. One build where
 648 * ACPI is not selected in Kconfig and another one with the ACPI Kconfig.
 649 *
 650 * In the first case, vfio_platform_acpi_probe will return since
 651 * acpi_disabled is 1. DT user will not see any kind of messages from
 652 * ACPI.
 653 *
 654 * In the second case, both DT and ACPI is compiled in but the system is
 655 * booting with any of these combinations.
 656 *
 657 * If the firmware is DT type, then acpi_disabled is 1. The ACPI probe routine
 658 * terminates immediately without any messages.
 659 *
 660 * If the firmware is ACPI type, then acpi_disabled is 0. All other checks are
 661 * valid checks. We cannot claim that this system is DT.
 662 */
 663int vfio_platform_probe_common(struct vfio_platform_device *vdev,
 664                               struct device *dev)
 665{
 666        struct iommu_group *group;
 667        int ret;
 668
 669        if (!vdev)
 670                return -EINVAL;
 671
 672        ret = vfio_platform_acpi_probe(vdev, dev);
 673        if (ret)
 674                ret = vfio_platform_of_probe(vdev, dev);
 675
 676        if (ret)
 677                return ret;
 678
 679        vdev->device = dev;
 680
 681        ret = vfio_platform_get_reset(vdev);
 682        if (ret && vdev->reset_required) {
 683                pr_err("VFIO: No reset function found for device %s\n",
 684                       vdev->name);
 685                return ret;
 686        }
 687
 688        group = vfio_iommu_group_get(dev);
 689        if (!group) {
 690                pr_err("VFIO: No IOMMU group for device %s\n", vdev->name);
 691                ret = -EINVAL;
 692                goto put_reset;
 693        }
 694
 695        ret = vfio_add_group_dev(dev, &vfio_platform_ops, vdev);
 696        if (ret)
 697                goto put_iommu;
 698
 699        mutex_init(&vdev->igate);
 700
 701        pm_runtime_enable(vdev->device);
 702        return 0;
 703
 704put_iommu:
 705        vfio_iommu_group_put(group, dev);
 706put_reset:
 707        vfio_platform_put_reset(vdev);
 708        return ret;
 709}
 710EXPORT_SYMBOL_GPL(vfio_platform_probe_common);
 711
 712struct vfio_platform_device *vfio_platform_remove_common(struct device *dev)
 713{
 714        struct vfio_platform_device *vdev;
 715
 716        vdev = vfio_del_group_dev(dev);
 717
 718        if (vdev) {
 719                pm_runtime_disable(vdev->device);
 720                vfio_platform_put_reset(vdev);
 721                vfio_iommu_group_put(dev->iommu_group, dev);
 722        }
 723
 724        return vdev;
 725}
 726EXPORT_SYMBOL_GPL(vfio_platform_remove_common);
 727
 728void __vfio_platform_register_reset(struct vfio_platform_reset_node *node)
 729{
 730        mutex_lock(&driver_lock);
 731        list_add(&node->link, &reset_list);
 732        mutex_unlock(&driver_lock);
 733}
 734EXPORT_SYMBOL_GPL(__vfio_platform_register_reset);
 735
 736void vfio_platform_unregister_reset(const char *compat,
 737                                    vfio_platform_reset_fn_t fn)
 738{
 739        struct vfio_platform_reset_node *iter, *temp;
 740
 741        mutex_lock(&driver_lock);
 742        list_for_each_entry_safe(iter, temp, &reset_list, link) {
 743                if (!strcmp(iter->compat, compat) && (iter->of_reset == fn)) {
 744                        list_del(&iter->link);
 745                        break;
 746                }
 747        }
 748
 749        mutex_unlock(&driver_lock);
 750
 751}
 752EXPORT_SYMBOL_GPL(vfio_platform_unregister_reset);
 753
 754MODULE_VERSION(DRIVER_VERSION);
 755MODULE_LICENSE("GPL v2");
 756MODULE_AUTHOR(DRIVER_AUTHOR);
 757MODULE_DESCRIPTION(DRIVER_DESC);
 758