linux/drivers/vfio/platform/vfio_platform_common.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2013 - Virtual Open Systems
   4 * Author: Antonios Motakis <a.motakis@virtualopensystems.com>
   5 */
   6
   7#define dev_fmt(fmt)    "VFIO: " fmt
   8
   9#include <linux/device.h>
  10#include <linux/acpi.h>
  11#include <linux/iommu.h>
  12#include <linux/module.h>
  13#include <linux/mutex.h>
  14#include <linux/pm_runtime.h>
  15#include <linux/slab.h>
  16#include <linux/types.h>
  17#include <linux/uaccess.h>
  18#include <linux/vfio.h>
  19
  20#include "vfio_platform_private.h"
  21
  22#define DRIVER_VERSION  "0.10"
  23#define DRIVER_AUTHOR   "Antonios Motakis <a.motakis@virtualopensystems.com>"
  24#define DRIVER_DESC     "VFIO platform base module"
  25
  26#define VFIO_PLATFORM_IS_ACPI(vdev) ((vdev)->acpihid != NULL)
  27
  28static LIST_HEAD(reset_list);
  29static DEFINE_MUTEX(driver_lock);
  30
  31static vfio_platform_reset_fn_t vfio_platform_lookup_reset(const char *compat,
  32                                        struct module **module)
  33{
  34        struct vfio_platform_reset_node *iter;
  35        vfio_platform_reset_fn_t reset_fn = NULL;
  36
  37        mutex_lock(&driver_lock);
  38        list_for_each_entry(iter, &reset_list, link) {
  39                if (!strcmp(iter->compat, compat) &&
  40                        try_module_get(iter->owner)) {
  41                        *module = iter->owner;
  42                        reset_fn = iter->of_reset;
  43                        break;
  44                }
  45        }
  46        mutex_unlock(&driver_lock);
  47        return reset_fn;
  48}
  49
  50static int vfio_platform_acpi_probe(struct vfio_platform_device *vdev,
  51                                    struct device *dev)
  52{
  53        struct acpi_device *adev;
  54
  55        if (acpi_disabled)
  56                return -ENOENT;
  57
  58        adev = ACPI_COMPANION(dev);
  59        if (!adev) {
  60                dev_err(dev, "ACPI companion device not found for %s\n",
  61                        vdev->name);
  62                return -ENODEV;
  63        }
  64
  65#ifdef CONFIG_ACPI
  66        vdev->acpihid = acpi_device_hid(adev);
  67#endif
  68        return WARN_ON(!vdev->acpihid) ? -EINVAL : 0;
  69}
  70
  71static int vfio_platform_acpi_call_reset(struct vfio_platform_device *vdev,
  72                                  const char **extra_dbg)
  73{
  74#ifdef CONFIG_ACPI
  75        struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
  76        struct device *dev = vdev->device;
  77        acpi_handle handle = ACPI_HANDLE(dev);
  78        acpi_status acpi_ret;
  79
  80        acpi_ret = acpi_evaluate_object(handle, "_RST", NULL, &buffer);
  81        if (ACPI_FAILURE(acpi_ret)) {
  82                if (extra_dbg)
  83                        *extra_dbg = acpi_format_exception(acpi_ret);
  84                return -EINVAL;
  85        }
  86
  87        return 0;
  88#else
  89        return -ENOENT;
  90#endif
  91}
  92
  93static bool vfio_platform_acpi_has_reset(struct vfio_platform_device *vdev)
  94{
  95#ifdef CONFIG_ACPI
  96        struct device *dev = vdev->device;
  97        acpi_handle handle = ACPI_HANDLE(dev);
  98
  99        return acpi_has_method(handle, "_RST");
 100#else
 101        return false;
 102#endif
 103}
 104
 105static bool vfio_platform_has_reset(struct vfio_platform_device *vdev)
 106{
 107        if (VFIO_PLATFORM_IS_ACPI(vdev))
 108                return vfio_platform_acpi_has_reset(vdev);
 109
 110        return vdev->of_reset ? true : false;
 111}
 112
 113static int vfio_platform_get_reset(struct vfio_platform_device *vdev)
 114{
 115        if (VFIO_PLATFORM_IS_ACPI(vdev))
 116                return vfio_platform_acpi_has_reset(vdev) ? 0 : -ENOENT;
 117
 118        vdev->of_reset = vfio_platform_lookup_reset(vdev->compat,
 119                                                    &vdev->reset_module);
 120        if (!vdev->of_reset) {
 121                request_module("vfio-reset:%s", vdev->compat);
 122                vdev->of_reset = vfio_platform_lookup_reset(vdev->compat,
 123                                                        &vdev->reset_module);
 124        }
 125
 126        return vdev->of_reset ? 0 : -ENOENT;
 127}
 128
 129static void vfio_platform_put_reset(struct vfio_platform_device *vdev)
 130{
 131        if (VFIO_PLATFORM_IS_ACPI(vdev))
 132                return;
 133
 134        if (vdev->of_reset)
 135                module_put(vdev->reset_module);
 136}
 137
 138static int vfio_platform_regions_init(struct vfio_platform_device *vdev)
 139{
 140        int cnt = 0, i;
 141
 142        while (vdev->get_resource(vdev, cnt))
 143                cnt++;
 144
 145        vdev->regions = kcalloc(cnt, sizeof(struct vfio_platform_region),
 146                                GFP_KERNEL);
 147        if (!vdev->regions)
 148                return -ENOMEM;
 149
 150        for (i = 0; i < cnt;  i++) {
 151                struct resource *res =
 152                        vdev->get_resource(vdev, i);
 153
 154                if (!res)
 155                        goto err;
 156
 157                vdev->regions[i].addr = res->start;
 158                vdev->regions[i].size = resource_size(res);
 159                vdev->regions[i].flags = 0;
 160
 161                switch (resource_type(res)) {
 162                case IORESOURCE_MEM:
 163                        vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_MMIO;
 164                        vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ;
 165                        if (!(res->flags & IORESOURCE_READONLY))
 166                                vdev->regions[i].flags |=
 167                                        VFIO_REGION_INFO_FLAG_WRITE;
 168
 169                        /*
 170                         * Only regions addressed with PAGE granularity may be
 171                         * MMAPed securely.
 172                         */
 173                        if (!(vdev->regions[i].addr & ~PAGE_MASK) &&
 174                                        !(vdev->regions[i].size & ~PAGE_MASK))
 175                                vdev->regions[i].flags |=
 176                                        VFIO_REGION_INFO_FLAG_MMAP;
 177
 178                        break;
 179                case IORESOURCE_IO:
 180                        vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_PIO;
 181                        break;
 182                default:
 183                        goto err;
 184                }
 185        }
 186
 187        vdev->num_regions = cnt;
 188
 189        return 0;
 190err:
 191        kfree(vdev->regions);
 192        return -EINVAL;
 193}
 194
 195static void vfio_platform_regions_cleanup(struct vfio_platform_device *vdev)
 196{
 197        int i;
 198
 199        for (i = 0; i < vdev->num_regions; i++)
 200                iounmap(vdev->regions[i].ioaddr);
 201
 202        vdev->num_regions = 0;
 203        kfree(vdev->regions);
 204}
 205
 206static int vfio_platform_call_reset(struct vfio_platform_device *vdev,
 207                                    const char **extra_dbg)
 208{
 209        if (VFIO_PLATFORM_IS_ACPI(vdev)) {
 210                dev_info(vdev->device, "reset\n");
 211                return vfio_platform_acpi_call_reset(vdev, extra_dbg);
 212        } else if (vdev->of_reset) {
 213                dev_info(vdev->device, "reset\n");
 214                return vdev->of_reset(vdev);
 215        }
 216
 217        dev_warn(vdev->device, "no reset function found!\n");
 218        return -EINVAL;
 219}
 220
 221static void vfio_platform_release(void *device_data)
 222{
 223        struct vfio_platform_device *vdev = device_data;
 224
 225        mutex_lock(&driver_lock);
 226
 227        if (!(--vdev->refcnt)) {
 228                const char *extra_dbg = NULL;
 229                int ret;
 230
 231                ret = vfio_platform_call_reset(vdev, &extra_dbg);
 232                if (ret && vdev->reset_required) {
 233                        dev_warn(vdev->device, "reset driver is required and reset call failed in release (%d) %s\n",
 234                                 ret, extra_dbg ? extra_dbg : "");
 235                        WARN_ON(1);
 236                }
 237                pm_runtime_put(vdev->device);
 238                vfio_platform_regions_cleanup(vdev);
 239                vfio_platform_irq_cleanup(vdev);
 240        }
 241
 242        mutex_unlock(&driver_lock);
 243
 244        module_put(vdev->parent_module);
 245}
 246
 247static int vfio_platform_open(void *device_data)
 248{
 249        struct vfio_platform_device *vdev = device_data;
 250        int ret;
 251
 252        if (!try_module_get(vdev->parent_module))
 253                return -ENODEV;
 254
 255        mutex_lock(&driver_lock);
 256
 257        if (!vdev->refcnt) {
 258                const char *extra_dbg = NULL;
 259
 260                ret = vfio_platform_regions_init(vdev);
 261                if (ret)
 262                        goto err_reg;
 263
 264                ret = vfio_platform_irq_init(vdev);
 265                if (ret)
 266                        goto err_irq;
 267
 268                ret = pm_runtime_get_sync(vdev->device);
 269                if (ret < 0)
 270                        goto err_pm;
 271
 272                ret = vfio_platform_call_reset(vdev, &extra_dbg);
 273                if (ret && vdev->reset_required) {
 274                        dev_warn(vdev->device, "reset driver is required and reset call failed in open (%d) %s\n",
 275                                 ret, extra_dbg ? extra_dbg : "");
 276                        goto err_rst;
 277                }
 278        }
 279
 280        vdev->refcnt++;
 281
 282        mutex_unlock(&driver_lock);
 283        return 0;
 284
 285err_rst:
 286        pm_runtime_put(vdev->device);
 287err_pm:
 288        vfio_platform_irq_cleanup(vdev);
 289err_irq:
 290        vfio_platform_regions_cleanup(vdev);
 291err_reg:
 292        mutex_unlock(&driver_lock);
 293        module_put(THIS_MODULE);
 294        return ret;
 295}
 296
 297static long vfio_platform_ioctl(void *device_data,
 298                                unsigned int cmd, unsigned long arg)
 299{
 300        struct vfio_platform_device *vdev = device_data;
 301        unsigned long minsz;
 302
 303        if (cmd == VFIO_DEVICE_GET_INFO) {
 304                struct vfio_device_info info;
 305
 306                minsz = offsetofend(struct vfio_device_info, num_irqs);
 307
 308                if (copy_from_user(&info, (void __user *)arg, minsz))
 309                        return -EFAULT;
 310
 311                if (info.argsz < minsz)
 312                        return -EINVAL;
 313
 314                if (vfio_platform_has_reset(vdev))
 315                        vdev->flags |= VFIO_DEVICE_FLAGS_RESET;
 316                info.flags = vdev->flags;
 317                info.num_regions = vdev->num_regions;
 318                info.num_irqs = vdev->num_irqs;
 319
 320                return copy_to_user((void __user *)arg, &info, minsz) ?
 321                        -EFAULT : 0;
 322
 323        } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
 324                struct vfio_region_info info;
 325
 326                minsz = offsetofend(struct vfio_region_info, offset);
 327
 328                if (copy_from_user(&info, (void __user *)arg, minsz))
 329                        return -EFAULT;
 330
 331                if (info.argsz < minsz)
 332                        return -EINVAL;
 333
 334                if (info.index >= vdev->num_regions)
 335                        return -EINVAL;
 336
 337                /* map offset to the physical address  */
 338                info.offset = VFIO_PLATFORM_INDEX_TO_OFFSET(info.index);
 339                info.size = vdev->regions[info.index].size;
 340                info.flags = vdev->regions[info.index].flags;
 341
 342                return copy_to_user((void __user *)arg, &info, minsz) ?
 343                        -EFAULT : 0;
 344
 345        } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
 346                struct vfio_irq_info info;
 347
 348                minsz = offsetofend(struct vfio_irq_info, count);
 349
 350                if (copy_from_user(&info, (void __user *)arg, minsz))
 351                        return -EFAULT;
 352
 353                if (info.argsz < minsz)
 354                        return -EINVAL;
 355
 356                if (info.index >= vdev->num_irqs)
 357                        return -EINVAL;
 358
 359                info.flags = vdev->irqs[info.index].flags;
 360                info.count = vdev->irqs[info.index].count;
 361
 362                return copy_to_user((void __user *)arg, &info, minsz) ?
 363                        -EFAULT : 0;
 364
 365        } else if (cmd == VFIO_DEVICE_SET_IRQS) {
 366                struct vfio_irq_set hdr;
 367                u8 *data = NULL;
 368                int ret = 0;
 369                size_t data_size = 0;
 370
 371                minsz = offsetofend(struct vfio_irq_set, count);
 372
 373                if (copy_from_user(&hdr, (void __user *)arg, minsz))
 374                        return -EFAULT;
 375
 376                ret = vfio_set_irqs_validate_and_prepare(&hdr, vdev->num_irqs,
 377                                                 vdev->num_irqs, &data_size);
 378                if (ret)
 379                        return ret;
 380
 381                if (data_size) {
 382                        data = memdup_user((void __user *)(arg + minsz),
 383                                            data_size);
 384                        if (IS_ERR(data))
 385                                return PTR_ERR(data);
 386                }
 387
 388                mutex_lock(&vdev->igate);
 389
 390                ret = vfio_platform_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
 391                                                   hdr.start, hdr.count, data);
 392                mutex_unlock(&vdev->igate);
 393                kfree(data);
 394
 395                return ret;
 396
 397        } else if (cmd == VFIO_DEVICE_RESET) {
 398                return vfio_platform_call_reset(vdev, NULL);
 399        }
 400
 401        return -ENOTTY;
 402}
 403
 404static ssize_t vfio_platform_read_mmio(struct vfio_platform_region *reg,
 405                                       char __user *buf, size_t count,
 406                                       loff_t off)
 407{
 408        unsigned int done = 0;
 409
 410        if (!reg->ioaddr) {
 411                reg->ioaddr =
 412                        ioremap_nocache(reg->addr, reg->size);
 413
 414                if (!reg->ioaddr)
 415                        return -ENOMEM;
 416        }
 417
 418        while (count) {
 419                size_t filled;
 420
 421                if (count >= 4 && !(off % 4)) {
 422                        u32 val;
 423
 424                        val = ioread32(reg->ioaddr + off);
 425                        if (copy_to_user(buf, &val, 4))
 426                                goto err;
 427
 428                        filled = 4;
 429                } else if (count >= 2 && !(off % 2)) {
 430                        u16 val;
 431
 432                        val = ioread16(reg->ioaddr + off);
 433                        if (copy_to_user(buf, &val, 2))
 434                                goto err;
 435
 436                        filled = 2;
 437                } else {
 438                        u8 val;
 439
 440                        val = ioread8(reg->ioaddr + off);
 441                        if (copy_to_user(buf, &val, 1))
 442                                goto err;
 443
 444                        filled = 1;
 445                }
 446
 447
 448                count -= filled;
 449                done += filled;
 450                off += filled;
 451                buf += filled;
 452        }
 453
 454        return done;
 455err:
 456        return -EFAULT;
 457}
 458
 459static ssize_t vfio_platform_read(void *device_data, char __user *buf,
 460                                  size_t count, loff_t *ppos)
 461{
 462        struct vfio_platform_device *vdev = device_data;
 463        unsigned int index = VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos);
 464        loff_t off = *ppos & VFIO_PLATFORM_OFFSET_MASK;
 465
 466        if (index >= vdev->num_regions)
 467                return -EINVAL;
 468
 469        if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ))
 470                return -EINVAL;
 471
 472        if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
 473                return vfio_platform_read_mmio(&vdev->regions[index],
 474                                                        buf, count, off);
 475        else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
 476                return -EINVAL; /* not implemented */
 477
 478        return -EINVAL;
 479}
 480
 481static ssize_t vfio_platform_write_mmio(struct vfio_platform_region *reg,
 482                                        const char __user *buf, size_t count,
 483                                        loff_t off)
 484{
 485        unsigned int done = 0;
 486
 487        if (!reg->ioaddr) {
 488                reg->ioaddr =
 489                        ioremap_nocache(reg->addr, reg->size);
 490
 491                if (!reg->ioaddr)
 492                        return -ENOMEM;
 493        }
 494
 495        while (count) {
 496                size_t filled;
 497
 498                if (count >= 4 && !(off % 4)) {
 499                        u32 val;
 500
 501                        if (copy_from_user(&val, buf, 4))
 502                                goto err;
 503                        iowrite32(val, reg->ioaddr + off);
 504
 505                        filled = 4;
 506                } else if (count >= 2 && !(off % 2)) {
 507                        u16 val;
 508
 509                        if (copy_from_user(&val, buf, 2))
 510                                goto err;
 511                        iowrite16(val, reg->ioaddr + off);
 512
 513                        filled = 2;
 514                } else {
 515                        u8 val;
 516
 517                        if (copy_from_user(&val, buf, 1))
 518                                goto err;
 519                        iowrite8(val, reg->ioaddr + off);
 520
 521                        filled = 1;
 522                }
 523
 524                count -= filled;
 525                done += filled;
 526                off += filled;
 527                buf += filled;
 528        }
 529
 530        return done;
 531err:
 532        return -EFAULT;
 533}
 534
 535static ssize_t vfio_platform_write(void *device_data, const char __user *buf,
 536                                   size_t count, loff_t *ppos)
 537{
 538        struct vfio_platform_device *vdev = device_data;
 539        unsigned int index = VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos);
 540        loff_t off = *ppos & VFIO_PLATFORM_OFFSET_MASK;
 541
 542        if (index >= vdev->num_regions)
 543                return -EINVAL;
 544
 545        if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE))
 546                return -EINVAL;
 547
 548        if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
 549                return vfio_platform_write_mmio(&vdev->regions[index],
 550                                                        buf, count, off);
 551        else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
 552                return -EINVAL; /* not implemented */
 553
 554        return -EINVAL;
 555}
 556
 557static int vfio_platform_mmap_mmio(struct vfio_platform_region region,
 558                                   struct vm_area_struct *vma)
 559{
 560        u64 req_len, pgoff, req_start;
 561
 562        req_len = vma->vm_end - vma->vm_start;
 563        pgoff = vma->vm_pgoff &
 564                ((1U << (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
 565        req_start = pgoff << PAGE_SHIFT;
 566
 567        if (region.size < PAGE_SIZE || req_start + req_len > region.size)
 568                return -EINVAL;
 569
 570        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 571        vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
 572
 573        return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
 574                               req_len, vma->vm_page_prot);
 575}
 576
 577static int vfio_platform_mmap(void *device_data, struct vm_area_struct *vma)
 578{
 579        struct vfio_platform_device *vdev = device_data;
 580        unsigned int index;
 581
 582        index = vma->vm_pgoff >> (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT);
 583
 584        if (vma->vm_end < vma->vm_start)
 585                return -EINVAL;
 586        if (!(vma->vm_flags & VM_SHARED))
 587                return -EINVAL;
 588        if (index >= vdev->num_regions)
 589                return -EINVAL;
 590        if (vma->vm_start & ~PAGE_MASK)
 591                return -EINVAL;
 592        if (vma->vm_end & ~PAGE_MASK)
 593                return -EINVAL;
 594
 595        if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP))
 596                return -EINVAL;
 597
 598        if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ)
 599                        && (vma->vm_flags & VM_READ))
 600                return -EINVAL;
 601
 602        if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE)
 603                        && (vma->vm_flags & VM_WRITE))
 604                return -EINVAL;
 605
 606        vma->vm_private_data = vdev;
 607
 608        if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
 609                return vfio_platform_mmap_mmio(vdev->regions[index], vma);
 610
 611        else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
 612                return -EINVAL; /* not implemented */
 613
 614        return -EINVAL;
 615}
 616
 617static const struct vfio_device_ops vfio_platform_ops = {
 618        .name           = "vfio-platform",
 619        .open           = vfio_platform_open,
 620        .release        = vfio_platform_release,
 621        .ioctl          = vfio_platform_ioctl,
 622        .read           = vfio_platform_read,
 623        .write          = vfio_platform_write,
 624        .mmap           = vfio_platform_mmap,
 625};
 626
 627static int vfio_platform_of_probe(struct vfio_platform_device *vdev,
 628                           struct device *dev)
 629{
 630        int ret;
 631
 632        ret = device_property_read_string(dev, "compatible",
 633                                          &vdev->compat);
 634        if (ret)
 635                dev_err(dev, "Cannot retrieve compat for %s\n", vdev->name);
 636
 637        return ret;
 638}
 639
 640/*
 641 * There can be two kernel build combinations. One build where
 642 * ACPI is not selected in Kconfig and another one with the ACPI Kconfig.
 643 *
 644 * In the first case, vfio_platform_acpi_probe will return since
 645 * acpi_disabled is 1. DT user will not see any kind of messages from
 646 * ACPI.
 647 *
 648 * In the second case, both DT and ACPI is compiled in but the system is
 649 * booting with any of these combinations.
 650 *
 651 * If the firmware is DT type, then acpi_disabled is 1. The ACPI probe routine
 652 * terminates immediately without any messages.
 653 *
 654 * If the firmware is ACPI type, then acpi_disabled is 0. All other checks are
 655 * valid checks. We cannot claim that this system is DT.
 656 */
 657int vfio_platform_probe_common(struct vfio_platform_device *vdev,
 658                               struct device *dev)
 659{
 660        struct iommu_group *group;
 661        int ret;
 662
 663        if (!vdev)
 664                return -EINVAL;
 665
 666        ret = vfio_platform_acpi_probe(vdev, dev);
 667        if (ret)
 668                ret = vfio_platform_of_probe(vdev, dev);
 669
 670        if (ret)
 671                return ret;
 672
 673        vdev->device = dev;
 674
 675        ret = vfio_platform_get_reset(vdev);
 676        if (ret && vdev->reset_required) {
 677                dev_err(dev, "No reset function found for device %s\n",
 678                        vdev->name);
 679                return ret;
 680        }
 681
 682        group = vfio_iommu_group_get(dev);
 683        if (!group) {
 684                dev_err(dev, "No IOMMU group for device %s\n", vdev->name);
 685                ret = -EINVAL;
 686                goto put_reset;
 687        }
 688
 689        ret = vfio_add_group_dev(dev, &vfio_platform_ops, vdev);
 690        if (ret)
 691                goto put_iommu;
 692
 693        mutex_init(&vdev->igate);
 694
 695        pm_runtime_enable(vdev->device);
 696        return 0;
 697
 698put_iommu:
 699        vfio_iommu_group_put(group, dev);
 700put_reset:
 701        vfio_platform_put_reset(vdev);
 702        return ret;
 703}
 704EXPORT_SYMBOL_GPL(vfio_platform_probe_common);
 705
 706struct vfio_platform_device *vfio_platform_remove_common(struct device *dev)
 707{
 708        struct vfio_platform_device *vdev;
 709
 710        vdev = vfio_del_group_dev(dev);
 711
 712        if (vdev) {
 713                pm_runtime_disable(vdev->device);
 714                vfio_platform_put_reset(vdev);
 715                vfio_iommu_group_put(dev->iommu_group, dev);
 716        }
 717
 718        return vdev;
 719}
 720EXPORT_SYMBOL_GPL(vfio_platform_remove_common);
 721
 722void __vfio_platform_register_reset(struct vfio_platform_reset_node *node)
 723{
 724        mutex_lock(&driver_lock);
 725        list_add(&node->link, &reset_list);
 726        mutex_unlock(&driver_lock);
 727}
 728EXPORT_SYMBOL_GPL(__vfio_platform_register_reset);
 729
 730void vfio_platform_unregister_reset(const char *compat,
 731                                    vfio_platform_reset_fn_t fn)
 732{
 733        struct vfio_platform_reset_node *iter, *temp;
 734
 735        mutex_lock(&driver_lock);
 736        list_for_each_entry_safe(iter, temp, &reset_list, link) {
 737                if (!strcmp(iter->compat, compat) && (iter->of_reset == fn)) {
 738                        list_del(&iter->link);
 739                        break;
 740                }
 741        }
 742
 743        mutex_unlock(&driver_lock);
 744
 745}
 746EXPORT_SYMBOL_GPL(vfio_platform_unregister_reset);
 747
 748MODULE_VERSION(DRIVER_VERSION);
 749MODULE_LICENSE("GPL v2");
 750MODULE_AUTHOR(DRIVER_AUTHOR);
 751MODULE_DESCRIPTION(DRIVER_DESC);
 752