linux/drivers/iommu/iommu.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
   4 * Author: Joerg Roedel <jroedel@suse.de>
   5 */
   6
   7#define pr_fmt(fmt)    "iommu: " fmt
   8
   9#include <linux/device.h>
  10#include <linux/dma-iommu.h>
  11#include <linux/kernel.h>
  12#include <linux/bits.h>
  13#include <linux/bug.h>
  14#include <linux/types.h>
  15#include <linux/init.h>
  16#include <linux/export.h>
  17#include <linux/slab.h>
  18#include <linux/errno.h>
  19#include <linux/iommu.h>
  20#include <linux/idr.h>
  21#include <linux/notifier.h>
  22#include <linux/err.h>
  23#include <linux/pci.h>
  24#include <linux/bitops.h>
  25#include <linux/property.h>
  26#include <linux/fsl/mc.h>
  27#include <linux/module.h>
  28#include <trace/events/iommu.h>
  29
  30static struct kset *iommu_group_kset;
  31static DEFINE_IDA(iommu_group_ida);
  32
  33static unsigned int iommu_def_domain_type __read_mostly;
  34static bool iommu_dma_strict __read_mostly = IS_ENABLED(CONFIG_IOMMU_DEFAULT_DMA_STRICT);
  35static u32 iommu_cmd_line __read_mostly;
  36
  37struct iommu_group {
  38        struct kobject kobj;
  39        struct kobject *devices_kobj;
  40        struct list_head devices;
  41        struct mutex mutex;
  42        struct blocking_notifier_head notifier;
  43        void *iommu_data;
  44        void (*iommu_data_release)(void *iommu_data);
  45        char *name;
  46        int id;
  47        struct iommu_domain *default_domain;
  48        struct iommu_domain *domain;
  49        struct list_head entry;
  50};
  51
  52struct group_device {
  53        struct list_head list;
  54        struct device *dev;
  55        char *name;
  56};
  57
  58struct iommu_group_attribute {
  59        struct attribute attr;
  60        ssize_t (*show)(struct iommu_group *group, char *buf);
  61        ssize_t (*store)(struct iommu_group *group,
  62                         const char *buf, size_t count);
  63};
  64
  65static const char * const iommu_group_resv_type_string[] = {
  66        [IOMMU_RESV_DIRECT]                     = "direct",
  67        [IOMMU_RESV_DIRECT_RELAXABLE]           = "direct-relaxable",
  68        [IOMMU_RESV_RESERVED]                   = "reserved",
  69        [IOMMU_RESV_MSI]                        = "msi",
  70        [IOMMU_RESV_SW_MSI]                     = "msi",
  71};
  72
  73#define IOMMU_CMD_LINE_DMA_API          BIT(0)
  74#define IOMMU_CMD_LINE_STRICT           BIT(1)
  75
  76static int iommu_alloc_default_domain(struct iommu_group *group,
  77                                      struct device *dev);
  78static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
  79                                                 unsigned type);
  80static int __iommu_attach_device(struct iommu_domain *domain,
  81                                 struct device *dev);
  82static int __iommu_attach_group(struct iommu_domain *domain,
  83                                struct iommu_group *group);
  84static void __iommu_detach_group(struct iommu_domain *domain,
  85                                 struct iommu_group *group);
  86static int iommu_create_device_direct_mappings(struct iommu_group *group,
  87                                               struct device *dev);
  88static struct iommu_group *iommu_group_get_for_dev(struct device *dev);
  89static ssize_t iommu_group_store_type(struct iommu_group *group,
  90                                      const char *buf, size_t count);
  91
  92#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store)           \
  93struct iommu_group_attribute iommu_group_attr_##_name =         \
  94        __ATTR(_name, _mode, _show, _store)
  95
  96#define to_iommu_group_attr(_attr)      \
  97        container_of(_attr, struct iommu_group_attribute, attr)
  98#define to_iommu_group(_kobj)           \
  99        container_of(_kobj, struct iommu_group, kobj)
 100
 101static LIST_HEAD(iommu_device_list);
 102static DEFINE_SPINLOCK(iommu_device_lock);
 103
 104/*
 105 * Use a function instead of an array here because the domain-type is a
 106 * bit-field, so an array would waste memory.
 107 */
 108static const char *iommu_domain_type_str(unsigned int t)
 109{
 110        switch (t) {
 111        case IOMMU_DOMAIN_BLOCKED:
 112                return "Blocked";
 113        case IOMMU_DOMAIN_IDENTITY:
 114                return "Passthrough";
 115        case IOMMU_DOMAIN_UNMANAGED:
 116                return "Unmanaged";
 117        case IOMMU_DOMAIN_DMA:
 118        case IOMMU_DOMAIN_DMA_FQ:
 119                return "Translated";
 120        default:
 121                return "Unknown";
 122        }
 123}
 124
 125static int __init iommu_subsys_init(void)
 126{
 127        if (!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API)) {
 128                if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH))
 129                        iommu_set_default_passthrough(false);
 130                else
 131                        iommu_set_default_translated(false);
 132
 133                if (iommu_default_passthrough() && mem_encrypt_active()) {
 134                        pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n");
 135                        iommu_set_default_translated(false);
 136                }
 137        }
 138
 139        if (!iommu_default_passthrough() && !iommu_dma_strict)
 140                iommu_def_domain_type = IOMMU_DOMAIN_DMA_FQ;
 141
 142        pr_info("Default domain type: %s %s\n",
 143                iommu_domain_type_str(iommu_def_domain_type),
 144                (iommu_cmd_line & IOMMU_CMD_LINE_DMA_API) ?
 145                        "(set via kernel command line)" : "");
 146
 147        if (!iommu_default_passthrough())
 148                pr_info("DMA domain TLB invalidation policy: %s mode %s\n",
 149                        iommu_dma_strict ? "strict" : "lazy",
 150                        (iommu_cmd_line & IOMMU_CMD_LINE_STRICT) ?
 151                                "(set via kernel command line)" : "");
 152
 153        return 0;
 154}
 155subsys_initcall(iommu_subsys_init);
 156
 157/**
 158 * iommu_device_register() - Register an IOMMU hardware instance
 159 * @iommu: IOMMU handle for the instance
 160 * @ops:   IOMMU ops to associate with the instance
 161 * @hwdev: (optional) actual instance device, used for fwnode lookup
 162 *
 163 * Return: 0 on success, or an error.
 164 */
 165int iommu_device_register(struct iommu_device *iommu,
 166                          const struct iommu_ops *ops, struct device *hwdev)
 167{
 168        /* We need to be able to take module references appropriately */
 169        if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner))
 170                return -EINVAL;
 171
 172        iommu->ops = ops;
 173        if (hwdev)
 174                iommu->fwnode = hwdev->fwnode;
 175
 176        spin_lock(&iommu_device_lock);
 177        list_add_tail(&iommu->list, &iommu_device_list);
 178        spin_unlock(&iommu_device_lock);
 179        return 0;
 180}
 181EXPORT_SYMBOL_GPL(iommu_device_register);
 182
 183void iommu_device_unregister(struct iommu_device *iommu)
 184{
 185        spin_lock(&iommu_device_lock);
 186        list_del(&iommu->list);
 187        spin_unlock(&iommu_device_lock);
 188}
 189EXPORT_SYMBOL_GPL(iommu_device_unregister);
 190
 191static struct dev_iommu *dev_iommu_get(struct device *dev)
 192{
 193        struct dev_iommu *param = dev->iommu;
 194
 195        if (param)
 196                return param;
 197
 198        param = kzalloc(sizeof(*param), GFP_KERNEL);
 199        if (!param)
 200                return NULL;
 201
 202        mutex_init(&param->lock);
 203        dev->iommu = param;
 204        return param;
 205}
 206
 207static void dev_iommu_free(struct device *dev)
 208{
 209        iommu_fwspec_free(dev);
 210        kfree(dev->iommu);
 211        dev->iommu = NULL;
 212}
 213
 214static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
 215{
 216        const struct iommu_ops *ops = dev->bus->iommu_ops;
 217        struct iommu_device *iommu_dev;
 218        struct iommu_group *group;
 219        int ret;
 220
 221        if (!ops)
 222                return -ENODEV;
 223
 224        if (!dev_iommu_get(dev))
 225                return -ENOMEM;
 226
 227        if (!try_module_get(ops->owner)) {
 228                ret = -EINVAL;
 229                goto err_free;
 230        }
 231
 232        iommu_dev = ops->probe_device(dev);
 233        if (IS_ERR(iommu_dev)) {
 234                ret = PTR_ERR(iommu_dev);
 235                goto out_module_put;
 236        }
 237
 238        dev->iommu->iommu_dev = iommu_dev;
 239
 240        group = iommu_group_get_for_dev(dev);
 241        if (IS_ERR(group)) {
 242                ret = PTR_ERR(group);
 243                goto out_release;
 244        }
 245        iommu_group_put(group);
 246
 247        if (group_list && !group->default_domain && list_empty(&group->entry))
 248                list_add_tail(&group->entry, group_list);
 249
 250        iommu_device_link(iommu_dev, dev);
 251
 252        return 0;
 253
 254out_release:
 255        ops->release_device(dev);
 256
 257out_module_put:
 258        module_put(ops->owner);
 259
 260err_free:
 261        dev_iommu_free(dev);
 262
 263        return ret;
 264}
 265
 266int iommu_probe_device(struct device *dev)
 267{
 268        const struct iommu_ops *ops = dev->bus->iommu_ops;
 269        struct iommu_group *group;
 270        int ret;
 271
 272        ret = __iommu_probe_device(dev, NULL);
 273        if (ret)
 274                goto err_out;
 275
 276        group = iommu_group_get(dev);
 277        if (!group) {
 278                ret = -ENODEV;
 279                goto err_release;
 280        }
 281
 282        /*
 283         * Try to allocate a default domain - needs support from the
 284         * IOMMU driver. There are still some drivers which don't
 285         * support default domains, so the return value is not yet
 286         * checked.
 287         */
 288        mutex_lock(&group->mutex);
 289        iommu_alloc_default_domain(group, dev);
 290        mutex_unlock(&group->mutex);
 291
 292        if (group->default_domain) {
 293                ret = __iommu_attach_device(group->default_domain, dev);
 294                if (ret) {
 295                        iommu_group_put(group);
 296                        goto err_release;
 297                }
 298        }
 299
 300        iommu_create_device_direct_mappings(group, dev);
 301
 302        iommu_group_put(group);
 303
 304        if (ops->probe_finalize)
 305                ops->probe_finalize(dev);
 306
 307        return 0;
 308
 309err_release:
 310        iommu_release_device(dev);
 311
 312err_out:
 313        return ret;
 314
 315}
 316
 317void iommu_release_device(struct device *dev)
 318{
 319        const struct iommu_ops *ops = dev->bus->iommu_ops;
 320
 321        if (!dev->iommu)
 322                return;
 323
 324        iommu_device_unlink(dev->iommu->iommu_dev, dev);
 325
 326        ops->release_device(dev);
 327
 328        iommu_group_remove_device(dev);
 329        module_put(ops->owner);
 330        dev_iommu_free(dev);
 331}
 332
 333static int __init iommu_set_def_domain_type(char *str)
 334{
 335        bool pt;
 336        int ret;
 337
 338        ret = kstrtobool(str, &pt);
 339        if (ret)
 340                return ret;
 341
 342        if (pt)
 343                iommu_set_default_passthrough(true);
 344        else
 345                iommu_set_default_translated(true);
 346
 347        return 0;
 348}
 349early_param("iommu.passthrough", iommu_set_def_domain_type);
 350
 351static int __init iommu_dma_setup(char *str)
 352{
 353        int ret = kstrtobool(str, &iommu_dma_strict);
 354
 355        if (!ret)
 356                iommu_cmd_line |= IOMMU_CMD_LINE_STRICT;
 357        return ret;
 358}
 359early_param("iommu.strict", iommu_dma_setup);
 360
 361void iommu_set_dma_strict(void)
 362{
 363        iommu_dma_strict = true;
 364        if (iommu_def_domain_type == IOMMU_DOMAIN_DMA_FQ)
 365                iommu_def_domain_type = IOMMU_DOMAIN_DMA;
 366}
 367
 368static ssize_t iommu_group_attr_show(struct kobject *kobj,
 369                                     struct attribute *__attr, char *buf)
 370{
 371        struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
 372        struct iommu_group *group = to_iommu_group(kobj);
 373        ssize_t ret = -EIO;
 374
 375        if (attr->show)
 376                ret = attr->show(group, buf);
 377        return ret;
 378}
 379
 380static ssize_t iommu_group_attr_store(struct kobject *kobj,
 381                                      struct attribute *__attr,
 382                                      const char *buf, size_t count)
 383{
 384        struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
 385        struct iommu_group *group = to_iommu_group(kobj);
 386        ssize_t ret = -EIO;
 387
 388        if (attr->store)
 389                ret = attr->store(group, buf, count);
 390        return ret;
 391}
 392
 393static const struct sysfs_ops iommu_group_sysfs_ops = {
 394        .show = iommu_group_attr_show,
 395        .store = iommu_group_attr_store,
 396};
 397
 398static int iommu_group_create_file(struct iommu_group *group,
 399                                   struct iommu_group_attribute *attr)
 400{
 401        return sysfs_create_file(&group->kobj, &attr->attr);
 402}
 403
 404static void iommu_group_remove_file(struct iommu_group *group,
 405                                    struct iommu_group_attribute *attr)
 406{
 407        sysfs_remove_file(&group->kobj, &attr->attr);
 408}
 409
 410static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
 411{
 412        return sprintf(buf, "%s\n", group->name);
 413}
 414
 415/**
 416 * iommu_insert_resv_region - Insert a new region in the
 417 * list of reserved regions.
 418 * @new: new region to insert
 419 * @regions: list of regions
 420 *
 421 * Elements are sorted by start address and overlapping segments
 422 * of the same type are merged.
 423 */
 424static int iommu_insert_resv_region(struct iommu_resv_region *new,
 425                                    struct list_head *regions)
 426{
 427        struct iommu_resv_region *iter, *tmp, *nr, *top;
 428        LIST_HEAD(stack);
 429
 430        nr = iommu_alloc_resv_region(new->start, new->length,
 431                                     new->prot, new->type);
 432        if (!nr)
 433                return -ENOMEM;
 434
 435        /* First add the new element based on start address sorting */
 436        list_for_each_entry(iter, regions, list) {
 437                if (nr->start < iter->start ||
 438                    (nr->start == iter->start && nr->type <= iter->type))
 439                        break;
 440        }
 441        list_add_tail(&nr->list, &iter->list);
 442
 443        /* Merge overlapping segments of type nr->type in @regions, if any */
 444        list_for_each_entry_safe(iter, tmp, regions, list) {
 445                phys_addr_t top_end, iter_end = iter->start + iter->length - 1;
 446
 447                /* no merge needed on elements of different types than @new */
 448                if (iter->type != new->type) {
 449                        list_move_tail(&iter->list, &stack);
 450                        continue;
 451                }
 452
 453                /* look for the last stack element of same type as @iter */
 454                list_for_each_entry_reverse(top, &stack, list)
 455                        if (top->type == iter->type)
 456                                goto check_overlap;
 457
 458                list_move_tail(&iter->list, &stack);
 459                continue;
 460
 461check_overlap:
 462                top_end = top->start + top->length - 1;
 463
 464                if (iter->start > top_end + 1) {
 465                        list_move_tail(&iter->list, &stack);
 466                } else {
 467                        top->length = max(top_end, iter_end) - top->start + 1;
 468                        list_del(&iter->list);
 469                        kfree(iter);
 470                }
 471        }
 472        list_splice(&stack, regions);
 473        return 0;
 474}
 475
 476static int
 477iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
 478                                 struct list_head *group_resv_regions)
 479{
 480        struct iommu_resv_region *entry;
 481        int ret = 0;
 482
 483        list_for_each_entry(entry, dev_resv_regions, list) {
 484                ret = iommu_insert_resv_region(entry, group_resv_regions);
 485                if (ret)
 486                        break;
 487        }
 488        return ret;
 489}
 490
 491int iommu_get_group_resv_regions(struct iommu_group *group,
 492                                 struct list_head *head)
 493{
 494        struct group_device *device;
 495        int ret = 0;
 496
 497        mutex_lock(&group->mutex);
 498        list_for_each_entry(device, &group->devices, list) {
 499                struct list_head dev_resv_regions;
 500
 501                INIT_LIST_HEAD(&dev_resv_regions);
 502                iommu_get_resv_regions(device->dev, &dev_resv_regions);
 503                ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
 504                iommu_put_resv_regions(device->dev, &dev_resv_regions);
 505                if (ret)
 506                        break;
 507        }
 508        mutex_unlock(&group->mutex);
 509        return ret;
 510}
 511EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);
 512
 513static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
 514                                             char *buf)
 515{
 516        struct iommu_resv_region *region, *next;
 517        struct list_head group_resv_regions;
 518        char *str = buf;
 519
 520        INIT_LIST_HEAD(&group_resv_regions);
 521        iommu_get_group_resv_regions(group, &group_resv_regions);
 522
 523        list_for_each_entry_safe(region, next, &group_resv_regions, list) {
 524                str += sprintf(str, "0x%016llx 0x%016llx %s\n",
 525                               (long long int)region->start,
 526                               (long long int)(region->start +
 527                                                region->length - 1),
 528                               iommu_group_resv_type_string[region->type]);
 529                kfree(region);
 530        }
 531
 532        return (str - buf);
 533}
 534
 535static ssize_t iommu_group_show_type(struct iommu_group *group,
 536                                     char *buf)
 537{
 538        char *type = "unknown\n";
 539
 540        mutex_lock(&group->mutex);
 541        if (group->default_domain) {
 542                switch (group->default_domain->type) {
 543                case IOMMU_DOMAIN_BLOCKED:
 544                        type = "blocked\n";
 545                        break;
 546                case IOMMU_DOMAIN_IDENTITY:
 547                        type = "identity\n";
 548                        break;
 549                case IOMMU_DOMAIN_UNMANAGED:
 550                        type = "unmanaged\n";
 551                        break;
 552                case IOMMU_DOMAIN_DMA:
 553                        type = "DMA\n";
 554                        break;
 555                case IOMMU_DOMAIN_DMA_FQ:
 556                        type = "DMA-FQ\n";
 557                        break;
 558                }
 559        }
 560        mutex_unlock(&group->mutex);
 561        strcpy(buf, type);
 562
 563        return strlen(type);
 564}
 565
 566static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
 567
 568static IOMMU_GROUP_ATTR(reserved_regions, 0444,
 569                        iommu_group_show_resv_regions, NULL);
 570
 571static IOMMU_GROUP_ATTR(type, 0644, iommu_group_show_type,
 572                        iommu_group_store_type);
 573
 574static void iommu_group_release(struct kobject *kobj)
 575{
 576        struct iommu_group *group = to_iommu_group(kobj);
 577
 578        pr_debug("Releasing group %d\n", group->id);
 579
 580        if (group->iommu_data_release)
 581                group->iommu_data_release(group->iommu_data);
 582
 583        ida_simple_remove(&iommu_group_ida, group->id);
 584
 585        if (group->default_domain)
 586                iommu_domain_free(group->default_domain);
 587
 588        kfree(group->name);
 589        kfree(group);
 590}
 591
 592static struct kobj_type iommu_group_ktype = {
 593        .sysfs_ops = &iommu_group_sysfs_ops,
 594        .release = iommu_group_release,
 595};
 596
 597/**
 598 * iommu_group_alloc - Allocate a new group
 599 *
 600 * This function is called by an iommu driver to allocate a new iommu
 601 * group.  The iommu group represents the minimum granularity of the iommu.
 602 * Upon successful return, the caller holds a reference to the supplied
 603 * group in order to hold the group until devices are added.  Use
 604 * iommu_group_put() to release this extra reference count, allowing the
 605 * group to be automatically reclaimed once it has no devices or external
 606 * references.
 607 */
 608struct iommu_group *iommu_group_alloc(void)
 609{
 610        struct iommu_group *group;
 611        int ret;
 612
 613        group = kzalloc(sizeof(*group), GFP_KERNEL);
 614        if (!group)
 615                return ERR_PTR(-ENOMEM);
 616
 617        group->kobj.kset = iommu_group_kset;
 618        mutex_init(&group->mutex);
 619        INIT_LIST_HEAD(&group->devices);
 620        INIT_LIST_HEAD(&group->entry);
 621        BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
 622
 623        ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL);
 624        if (ret < 0) {
 625                kfree(group);
 626                return ERR_PTR(ret);
 627        }
 628        group->id = ret;
 629
 630        ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
 631                                   NULL, "%d", group->id);
 632        if (ret) {
 633                ida_simple_remove(&iommu_group_ida, group->id);
 634                kobject_put(&group->kobj);
 635                return ERR_PTR(ret);
 636        }
 637
 638        group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
 639        if (!group->devices_kobj) {
 640                kobject_put(&group->kobj); /* triggers .release & free */
 641                return ERR_PTR(-ENOMEM);
 642        }
 643
 644        /*
 645         * The devices_kobj holds a reference on the group kobject, so
 646         * as long as that exists so will the group.  We can therefore
 647         * use the devices_kobj for reference counting.
 648         */
 649        kobject_put(&group->kobj);
 650
 651        ret = iommu_group_create_file(group,
 652                                      &iommu_group_attr_reserved_regions);
 653        if (ret)
 654                return ERR_PTR(ret);
 655
 656        ret = iommu_group_create_file(group, &iommu_group_attr_type);
 657        if (ret)
 658                return ERR_PTR(ret);
 659
 660        pr_debug("Allocated group %d\n", group->id);
 661
 662        return group;
 663}
 664EXPORT_SYMBOL_GPL(iommu_group_alloc);
 665
 666struct iommu_group *iommu_group_get_by_id(int id)
 667{
 668        struct kobject *group_kobj;
 669        struct iommu_group *group;
 670        const char *name;
 671
 672        if (!iommu_group_kset)
 673                return NULL;
 674
 675        name = kasprintf(GFP_KERNEL, "%d", id);
 676        if (!name)
 677                return NULL;
 678
 679        group_kobj = kset_find_obj(iommu_group_kset, name);
 680        kfree(name);
 681
 682        if (!group_kobj)
 683                return NULL;
 684
 685        group = container_of(group_kobj, struct iommu_group, kobj);
 686        BUG_ON(group->id != id);
 687
 688        kobject_get(group->devices_kobj);
 689        kobject_put(&group->kobj);
 690
 691        return group;
 692}
 693EXPORT_SYMBOL_GPL(iommu_group_get_by_id);
 694
 695/**
 696 * iommu_group_get_iommudata - retrieve iommu_data registered for a group
 697 * @group: the group
 698 *
 699 * iommu drivers can store data in the group for use when doing iommu
 700 * operations.  This function provides a way to retrieve it.  Caller
 701 * should hold a group reference.
 702 */
 703void *iommu_group_get_iommudata(struct iommu_group *group)
 704{
 705        return group->iommu_data;
 706}
 707EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
 708
 709/**
 710 * iommu_group_set_iommudata - set iommu_data for a group
 711 * @group: the group
 712 * @iommu_data: new data
 713 * @release: release function for iommu_data
 714 *
 715 * iommu drivers can store data in the group for use when doing iommu
 716 * operations.  This function provides a way to set the data after
 717 * the group has been allocated.  Caller should hold a group reference.
 718 */
 719void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
 720                               void (*release)(void *iommu_data))
 721{
 722        group->iommu_data = iommu_data;
 723        group->iommu_data_release = release;
 724}
 725EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
 726
 727/**
 728 * iommu_group_set_name - set name for a group
 729 * @group: the group
 730 * @name: name
 731 *
 732 * Allow iommu driver to set a name for a group.  When set it will
 733 * appear in a name attribute file under the group in sysfs.
 734 */
 735int iommu_group_set_name(struct iommu_group *group, const char *name)
 736{
 737        int ret;
 738
 739        if (group->name) {
 740                iommu_group_remove_file(group, &iommu_group_attr_name);
 741                kfree(group->name);
 742                group->name = NULL;
 743                if (!name)
 744                        return 0;
 745        }
 746
 747        group->name = kstrdup(name, GFP_KERNEL);
 748        if (!group->name)
 749                return -ENOMEM;
 750
 751        ret = iommu_group_create_file(group, &iommu_group_attr_name);
 752        if (ret) {
 753                kfree(group->name);
 754                group->name = NULL;
 755                return ret;
 756        }
 757
 758        return 0;
 759}
 760EXPORT_SYMBOL_GPL(iommu_group_set_name);
 761
 762static int iommu_create_device_direct_mappings(struct iommu_group *group,
 763                                               struct device *dev)
 764{
 765        struct iommu_domain *domain = group->default_domain;
 766        struct iommu_resv_region *entry;
 767        struct list_head mappings;
 768        unsigned long pg_size;
 769        int ret = 0;
 770
 771        if (!domain || !iommu_is_dma_domain(domain))
 772                return 0;
 773
 774        BUG_ON(!domain->pgsize_bitmap);
 775
 776        pg_size = 1UL << __ffs(domain->pgsize_bitmap);
 777        INIT_LIST_HEAD(&mappings);
 778
 779        iommu_get_resv_regions(dev, &mappings);
 780
 781        /* We need to consider overlapping regions for different devices */
 782        list_for_each_entry(entry, &mappings, list) {
 783                dma_addr_t start, end, addr;
 784                size_t map_size = 0;
 785
 786                if (domain->ops->apply_resv_region)
 787                        domain->ops->apply_resv_region(dev, domain, entry);
 788
 789                start = ALIGN(entry->start, pg_size);
 790                end   = ALIGN(entry->start + entry->length, pg_size);
 791
 792                if (entry->type != IOMMU_RESV_DIRECT &&
 793                    entry->type != IOMMU_RESV_DIRECT_RELAXABLE)
 794                        continue;
 795
 796                for (addr = start; addr <= end; addr += pg_size) {
 797                        phys_addr_t phys_addr;
 798
 799                        if (addr == end)
 800                                goto map_end;
 801
 802                        phys_addr = iommu_iova_to_phys(domain, addr);
 803                        if (!phys_addr) {
 804                                map_size += pg_size;
 805                                continue;
 806                        }
 807
 808map_end:
 809                        if (map_size) {
 810                                ret = iommu_map(domain, addr - map_size,
 811                                                addr - map_size, map_size,
 812                                                entry->prot);
 813                                if (ret)
 814                                        goto out;
 815                                map_size = 0;
 816                        }
 817                }
 818
 819        }
 820
 821        iommu_flush_iotlb_all(domain);
 822
 823out:
 824        iommu_put_resv_regions(dev, &mappings);
 825
 826        return ret;
 827}
 828
 829static bool iommu_is_attach_deferred(struct iommu_domain *domain,
 830                                     struct device *dev)
 831{
 832        if (domain->ops->is_attach_deferred)
 833                return domain->ops->is_attach_deferred(domain, dev);
 834
 835        return false;
 836}
 837
 838/**
 839 * iommu_group_add_device - add a device to an iommu group
 840 * @group: the group into which to add the device (reference should be held)
 841 * @dev: the device
 842 *
 843 * This function is called by an iommu driver to add a device into a
 844 * group.  Adding a device increments the group reference count.
 845 */
 846int iommu_group_add_device(struct iommu_group *group, struct device *dev)
 847{
 848        int ret, i = 0;
 849        struct group_device *device;
 850
 851        device = kzalloc(sizeof(*device), GFP_KERNEL);
 852        if (!device)
 853                return -ENOMEM;
 854
 855        device->dev = dev;
 856
 857        ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
 858        if (ret)
 859                goto err_free_device;
 860
 861        device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
 862rename:
 863        if (!device->name) {
 864                ret = -ENOMEM;
 865                goto err_remove_link;
 866        }
 867
 868        ret = sysfs_create_link_nowarn(group->devices_kobj,
 869                                       &dev->kobj, device->name);
 870        if (ret) {
 871                if (ret == -EEXIST && i >= 0) {
 872                        /*
 873                         * Account for the slim chance of collision
 874                         * and append an instance to the name.
 875                         */
 876                        kfree(device->name);
 877                        device->name = kasprintf(GFP_KERNEL, "%s.%d",
 878                                                 kobject_name(&dev->kobj), i++);
 879                        goto rename;
 880                }
 881                goto err_free_name;
 882        }
 883
 884        kobject_get(group->devices_kobj);
 885
 886        dev->iommu_group = group;
 887
 888        mutex_lock(&group->mutex);
 889        list_add_tail(&device->list, &group->devices);
 890        if (group->domain  && !iommu_is_attach_deferred(group->domain, dev))
 891                ret = __iommu_attach_device(group->domain, dev);
 892        mutex_unlock(&group->mutex);
 893        if (ret)
 894                goto err_put_group;
 895
 896        /* Notify any listeners about change to group. */
 897        blocking_notifier_call_chain(&group->notifier,
 898                                     IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
 899
 900        trace_add_device_to_group(group->id, dev);
 901
 902        dev_info(dev, "Adding to iommu group %d\n", group->id);
 903
 904        return 0;
 905
 906err_put_group:
 907        mutex_lock(&group->mutex);
 908        list_del(&device->list);
 909        mutex_unlock(&group->mutex);
 910        dev->iommu_group = NULL;
 911        kobject_put(group->devices_kobj);
 912        sysfs_remove_link(group->devices_kobj, device->name);
 913err_free_name:
 914        kfree(device->name);
 915err_remove_link:
 916        sysfs_remove_link(&dev->kobj, "iommu_group");
 917err_free_device:
 918        kfree(device);
 919        dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret);
 920        return ret;
 921}
 922EXPORT_SYMBOL_GPL(iommu_group_add_device);
 923
 924/**
 925 * iommu_group_remove_device - remove a device from it's current group
 926 * @dev: device to be removed
 927 *
 928 * This function is called by an iommu driver to remove the device from
 929 * it's current group.  This decrements the iommu group reference count.
 930 */
 931void iommu_group_remove_device(struct device *dev)
 932{
 933        struct iommu_group *group = dev->iommu_group;
 934        struct group_device *tmp_device, *device = NULL;
 935
 936        if (!group)
 937                return;
 938
 939        dev_info(dev, "Removing from iommu group %d\n", group->id);
 940
 941        /* Pre-notify listeners that a device is being removed. */
 942        blocking_notifier_call_chain(&group->notifier,
 943                                     IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
 944
 945        mutex_lock(&group->mutex);
 946        list_for_each_entry(tmp_device, &group->devices, list) {
 947                if (tmp_device->dev == dev) {
 948                        device = tmp_device;
 949                        list_del(&device->list);
 950                        break;
 951                }
 952        }
 953        mutex_unlock(&group->mutex);
 954
 955        if (!device)
 956                return;
 957
 958        sysfs_remove_link(group->devices_kobj, device->name);
 959        sysfs_remove_link(&dev->kobj, "iommu_group");
 960
 961        trace_remove_device_from_group(group->id, dev);
 962
 963        kfree(device->name);
 964        kfree(device);
 965        dev->iommu_group = NULL;
 966        kobject_put(group->devices_kobj);
 967}
 968EXPORT_SYMBOL_GPL(iommu_group_remove_device);
 969
 970static int iommu_group_device_count(struct iommu_group *group)
 971{
 972        struct group_device *entry;
 973        int ret = 0;
 974
 975        list_for_each_entry(entry, &group->devices, list)
 976                ret++;
 977
 978        return ret;
 979}
 980
 981/**
 982 * iommu_group_for_each_dev - iterate over each device in the group
 983 * @group: the group
 984 * @data: caller opaque data to be passed to callback function
 985 * @fn: caller supplied callback function
 986 *
 987 * This function is called by group users to iterate over group devices.
 988 * Callers should hold a reference count to the group during callback.
 989 * The group->mutex is held across callbacks, which will block calls to
 990 * iommu_group_add/remove_device.
 991 */
 992static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
 993                                      int (*fn)(struct device *, void *))
 994{
 995        struct group_device *device;
 996        int ret = 0;
 997
 998        list_for_each_entry(device, &group->devices, list) {
 999                ret = fn(device->dev, data);
1000                if (ret)
1001                        break;
1002        }
1003        return ret;
1004}
1005
1006
1007int iommu_group_for_each_dev(struct iommu_group *group, void *data,
1008                             int (*fn)(struct device *, void *))
1009{
1010        int ret;
1011
1012        mutex_lock(&group->mutex);
1013        ret = __iommu_group_for_each_dev(group, data, fn);
1014        mutex_unlock(&group->mutex);
1015
1016        return ret;
1017}
1018EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
1019
1020/**
1021 * iommu_group_get - Return the group for a device and increment reference
1022 * @dev: get the group that this device belongs to
1023 *
1024 * This function is called by iommu drivers and users to get the group
1025 * for the specified device.  If found, the group is returned and the group
1026 * reference in incremented, else NULL.
1027 */
1028struct iommu_group *iommu_group_get(struct device *dev)
1029{
1030        struct iommu_group *group = dev->iommu_group;
1031
1032        if (group)
1033                kobject_get(group->devices_kobj);
1034
1035        return group;
1036}
1037EXPORT_SYMBOL_GPL(iommu_group_get);
1038
1039/**
1040 * iommu_group_ref_get - Increment reference on a group
1041 * @group: the group to use, must not be NULL
1042 *
1043 * This function is called by iommu drivers to take additional references on an
1044 * existing group.  Returns the given group for convenience.
1045 */
1046struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
1047{
1048        kobject_get(group->devices_kobj);
1049        return group;
1050}
1051EXPORT_SYMBOL_GPL(iommu_group_ref_get);
1052
1053/**
1054 * iommu_group_put - Decrement group reference
1055 * @group: the group to use
1056 *
1057 * This function is called by iommu drivers and users to release the
1058 * iommu group.  Once the reference count is zero, the group is released.
1059 */
1060void iommu_group_put(struct iommu_group *group)
1061{
1062        if (group)
1063                kobject_put(group->devices_kobj);
1064}
1065EXPORT_SYMBOL_GPL(iommu_group_put);
1066
1067/**
1068 * iommu_group_register_notifier - Register a notifier for group changes
1069 * @group: the group to watch
1070 * @nb: notifier block to signal
1071 *
1072 * This function allows iommu group users to track changes in a group.
1073 * See include/linux/iommu.h for actions sent via this notifier.  Caller
1074 * should hold a reference to the group throughout notifier registration.
1075 */
1076int iommu_group_register_notifier(struct iommu_group *group,
1077                                  struct notifier_block *nb)
1078{
1079        return blocking_notifier_chain_register(&group->notifier, nb);
1080}
1081EXPORT_SYMBOL_GPL(iommu_group_register_notifier);
1082
1083/**
1084 * iommu_group_unregister_notifier - Unregister a notifier
1085 * @group: the group to watch
1086 * @nb: notifier block to signal
1087 *
1088 * Unregister a previously registered group notifier block.
1089 */
1090int iommu_group_unregister_notifier(struct iommu_group *group,
1091                                    struct notifier_block *nb)
1092{
1093        return blocking_notifier_chain_unregister(&group->notifier, nb);
1094}
1095EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
1096
1097/**
1098 * iommu_register_device_fault_handler() - Register a device fault handler
1099 * @dev: the device
1100 * @handler: the fault handler
1101 * @data: private data passed as argument to the handler
1102 *
1103 * When an IOMMU fault event is received, this handler gets called with the
1104 * fault event and data as argument. The handler should return 0 on success. If
1105 * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also
1106 * complete the fault by calling iommu_page_response() with one of the following
1107 * response code:
1108 * - IOMMU_PAGE_RESP_SUCCESS: retry the translation
1109 * - IOMMU_PAGE_RESP_INVALID: terminate the fault
1110 * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting
1111 *   page faults if possible.
1112 *
1113 * Return 0 if the fault handler was installed successfully, or an error.
1114 */
1115int iommu_register_device_fault_handler(struct device *dev,
1116                                        iommu_dev_fault_handler_t handler,
1117                                        void *data)
1118{
1119        struct dev_iommu *param = dev->iommu;
1120        int ret = 0;
1121
1122        if (!param)
1123                return -EINVAL;
1124
1125        mutex_lock(&param->lock);
1126        /* Only allow one fault handler registered for each device */
1127        if (param->fault_param) {
1128                ret = -EBUSY;
1129                goto done_unlock;
1130        }
1131
1132        get_device(dev);
1133        param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL);
1134        if (!param->fault_param) {
1135                put_device(dev);
1136                ret = -ENOMEM;
1137                goto done_unlock;
1138        }
1139        param->fault_param->handler = handler;
1140        param->fault_param->data = data;
1141        mutex_init(&param->fault_param->lock);
1142        INIT_LIST_HEAD(&param->fault_param->faults);
1143
1144done_unlock:
1145        mutex_unlock(&param->lock);
1146
1147        return ret;
1148}
1149EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler);
1150
1151/**
1152 * iommu_unregister_device_fault_handler() - Unregister the device fault handler
1153 * @dev: the device
1154 *
1155 * Remove the device fault handler installed with
1156 * iommu_register_device_fault_handler().
1157 *
1158 * Return 0 on success, or an error.
1159 */
1160int iommu_unregister_device_fault_handler(struct device *dev)
1161{
1162        struct dev_iommu *param = dev->iommu;
1163        int ret = 0;
1164
1165        if (!param)
1166                return -EINVAL;
1167
1168        mutex_lock(&param->lock);
1169
1170        if (!param->fault_param)
1171                goto unlock;
1172
1173        /* we cannot unregister handler if there are pending faults */
1174        if (!list_empty(&param->fault_param->faults)) {
1175                ret = -EBUSY;
1176                goto unlock;
1177        }
1178
1179        kfree(param->fault_param);
1180        param->fault_param = NULL;
1181        put_device(dev);
1182unlock:
1183        mutex_unlock(&param->lock);
1184
1185        return ret;
1186}
1187EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler);
1188
1189/**
1190 * iommu_report_device_fault() - Report fault event to device driver
1191 * @dev: the device
1192 * @evt: fault event data
1193 *
1194 * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
1195 * handler. When this function fails and the fault is recoverable, it is the
1196 * caller's responsibility to complete the fault.
1197 *
1198 * Return 0 on success, or an error.
1199 */
1200int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
1201{
1202        struct dev_iommu *param = dev->iommu;
1203        struct iommu_fault_event *evt_pending = NULL;
1204        struct iommu_fault_param *fparam;
1205        int ret = 0;
1206
1207        if (!param || !evt)
1208                return -EINVAL;
1209
1210        /* we only report device fault if there is a handler registered */
1211        mutex_lock(&param->lock);
1212        fparam = param->fault_param;
1213        if (!fparam || !fparam->handler) {
1214                ret = -EINVAL;
1215                goto done_unlock;
1216        }
1217
1218        if (evt->fault.type == IOMMU_FAULT_PAGE_REQ &&
1219            (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
1220                evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event),
1221                                      GFP_KERNEL);
1222                if (!evt_pending) {
1223                        ret = -ENOMEM;
1224                        goto done_unlock;
1225                }
1226                mutex_lock(&fparam->lock);
1227                list_add_tail(&evt_pending->list, &fparam->faults);
1228                mutex_unlock(&fparam->lock);
1229        }
1230
1231        ret = fparam->handler(&evt->fault, fparam->data);
1232        if (ret && evt_pending) {
1233                mutex_lock(&fparam->lock);
1234                list_del(&evt_pending->list);
1235                mutex_unlock(&fparam->lock);
1236                kfree(evt_pending);
1237        }
1238done_unlock:
1239        mutex_unlock(&param->lock);
1240        return ret;
1241}
1242EXPORT_SYMBOL_GPL(iommu_report_device_fault);
1243
1244int iommu_page_response(struct device *dev,
1245                        struct iommu_page_response *msg)
1246{
1247        bool needs_pasid;
1248        int ret = -EINVAL;
1249        struct iommu_fault_event *evt;
1250        struct iommu_fault_page_request *prm;
1251        struct dev_iommu *param = dev->iommu;
1252        bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID;
1253        struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1254
1255        if (!domain || !domain->ops->page_response)
1256                return -ENODEV;
1257
1258        if (!param || !param->fault_param)
1259                return -EINVAL;
1260
1261        if (msg->version != IOMMU_PAGE_RESP_VERSION_1 ||
1262            msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID)
1263                return -EINVAL;
1264
1265        /* Only send response if there is a fault report pending */
1266        mutex_lock(&param->fault_param->lock);
1267        if (list_empty(&param->fault_param->faults)) {
1268                dev_warn_ratelimited(dev, "no pending PRQ, drop response\n");
1269                goto done_unlock;
1270        }
1271        /*
1272         * Check if we have a matching page request pending to respond,
1273         * otherwise return -EINVAL
1274         */
1275        list_for_each_entry(evt, &param->fault_param->faults, list) {
1276                prm = &evt->fault.prm;
1277                if (prm->grpid != msg->grpid)
1278                        continue;
1279
1280                /*
1281                 * If the PASID is required, the corresponding request is
1282                 * matched using the group ID, the PASID valid bit and the PASID
1283                 * value. Otherwise only the group ID matches request and
1284                 * response.
1285                 */
1286                needs_pasid = prm->flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
1287                if (needs_pasid && (!has_pasid || msg->pasid != prm->pasid))
1288                        continue;
1289
1290                if (!needs_pasid && has_pasid) {
1291                        /* No big deal, just clear it. */
1292                        msg->flags &= ~IOMMU_PAGE_RESP_PASID_VALID;
1293                        msg->pasid = 0;
1294                }
1295
1296                ret = domain->ops->page_response(dev, evt, msg);
1297                list_del(&evt->list);
1298                kfree(evt);
1299                break;
1300        }
1301
1302done_unlock:
1303        mutex_unlock(&param->fault_param->lock);
1304        return ret;
1305}
1306EXPORT_SYMBOL_GPL(iommu_page_response);
1307
1308/**
1309 * iommu_group_id - Return ID for a group
1310 * @group: the group to ID
1311 *
1312 * Return the unique ID for the group matching the sysfs group number.
1313 */
1314int iommu_group_id(struct iommu_group *group)
1315{
1316        return group->id;
1317}
1318EXPORT_SYMBOL_GPL(iommu_group_id);
1319
1320static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1321                                               unsigned long *devfns);
1322
1323/*
1324 * To consider a PCI device isolated, we require ACS to support Source
1325 * Validation, Request Redirection, Completer Redirection, and Upstream
1326 * Forwarding.  This effectively means that devices cannot spoof their
1327 * requester ID, requests and completions cannot be redirected, and all
1328 * transactions are forwarded upstream, even as it passes through a
1329 * bridge where the target device is downstream.
1330 */
1331#define REQ_ACS_FLAGS   (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
1332
1333/*
1334 * For multifunction devices which are not isolated from each other, find
1335 * all the other non-isolated functions and look for existing groups.  For
1336 * each function, we also need to look for aliases to or from other devices
1337 * that may already have a group.
1338 */
1339static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev,
1340                                                        unsigned long *devfns)
1341{
1342        struct pci_dev *tmp = NULL;
1343        struct iommu_group *group;
1344
1345        if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
1346                return NULL;
1347
1348        for_each_pci_dev(tmp) {
1349                if (tmp == pdev || tmp->bus != pdev->bus ||
1350                    PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
1351                    pci_acs_enabled(tmp, REQ_ACS_FLAGS))
1352                        continue;
1353
1354                group = get_pci_alias_group(tmp, devfns);
1355                if (group) {
1356                        pci_dev_put(tmp);
1357                        return group;
1358                }
1359        }
1360
1361        return NULL;
1362}
1363
1364/*
1365 * Look for aliases to or from the given device for existing groups. DMA
1366 * aliases are only supported on the same bus, therefore the search
1367 * space is quite small (especially since we're really only looking at pcie
1368 * device, and therefore only expect multiple slots on the root complex or
1369 * downstream switch ports).  It's conceivable though that a pair of
1370 * multifunction devices could have aliases between them that would cause a
1371 * loop.  To prevent this, we use a bitmap to track where we've been.
1372 */
1373static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1374                                               unsigned long *devfns)
1375{
1376        struct pci_dev *tmp = NULL;
1377        struct iommu_group *group;
1378
1379        if (test_and_set_bit(pdev->devfn & 0xff, devfns))
1380                return NULL;
1381
1382        group = iommu_group_get(&pdev->dev);
1383        if (group)
1384                return group;
1385
1386        for_each_pci_dev(tmp) {
1387                if (tmp == pdev || tmp->bus != pdev->bus)
1388                        continue;
1389
1390                /* We alias them or they alias us */
1391                if (pci_devs_are_dma_aliases(pdev, tmp)) {
1392                        group = get_pci_alias_group(tmp, devfns);
1393                        if (group) {
1394                                pci_dev_put(tmp);
1395                                return group;
1396                        }
1397
1398                        group = get_pci_function_alias_group(tmp, devfns);
1399                        if (group) {
1400                                pci_dev_put(tmp);
1401                                return group;
1402                        }
1403                }
1404        }
1405
1406        return NULL;
1407}
1408
1409struct group_for_pci_data {
1410        struct pci_dev *pdev;
1411        struct iommu_group *group;
1412};
1413
1414/*
1415 * DMA alias iterator callback, return the last seen device.  Stop and return
1416 * the IOMMU group if we find one along the way.
1417 */
1418static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque)
1419{
1420        struct group_for_pci_data *data = opaque;
1421
1422        data->pdev = pdev;
1423        data->group = iommu_group_get(&pdev->dev);
1424
1425        return data->group != NULL;
1426}
1427
1428/*
1429 * Generic device_group call-back function. It just allocates one
1430 * iommu-group per device.
1431 */
1432struct iommu_group *generic_device_group(struct device *dev)
1433{
1434        return iommu_group_alloc();
1435}
1436EXPORT_SYMBOL_GPL(generic_device_group);
1437
1438/*
1439 * Use standard PCI bus topology, isolation features, and DMA alias quirks
1440 * to find or create an IOMMU group for a device.
1441 */
1442struct iommu_group *pci_device_group(struct device *dev)
1443{
1444        struct pci_dev *pdev = to_pci_dev(dev);
1445        struct group_for_pci_data data;
1446        struct pci_bus *bus;
1447        struct iommu_group *group = NULL;
1448        u64 devfns[4] = { 0 };
1449
1450        if (WARN_ON(!dev_is_pci(dev)))
1451                return ERR_PTR(-EINVAL);
1452
1453        /*
1454         * Find the upstream DMA alias for the device.  A device must not
1455         * be aliased due to topology in order to have its own IOMMU group.
1456         * If we find an alias along the way that already belongs to a
1457         * group, use it.
1458         */
1459        if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data))
1460                return data.group;
1461
1462        pdev = data.pdev;
1463
1464        /*
1465         * Continue upstream from the point of minimum IOMMU granularity
1466         * due to aliases to the point where devices are protected from
1467         * peer-to-peer DMA by PCI ACS.  Again, if we find an existing
1468         * group, use it.
1469         */
1470        for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
1471                if (!bus->self)
1472                        continue;
1473
1474                if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
1475                        break;
1476
1477                pdev = bus->self;
1478
1479                group = iommu_group_get(&pdev->dev);
1480                if (group)
1481                        return group;
1482        }
1483
1484        /*
1485         * Look for existing groups on device aliases.  If we alias another
1486         * device or another device aliases us, use the same group.
1487         */
1488        group = get_pci_alias_group(pdev, (unsigned long *)devfns);
1489        if (group)
1490                return group;
1491
1492        /*
1493         * Look for existing groups on non-isolated functions on the same
1494         * slot and aliases of those funcions, if any.  No need to clear
1495         * the search bitmap, the tested devfns are still valid.
1496         */
1497        group = get_pci_function_alias_group(pdev, (unsigned long *)devfns);
1498        if (group)
1499                return group;
1500
1501        /* No shared group found, allocate new */
1502        return iommu_group_alloc();
1503}
1504EXPORT_SYMBOL_GPL(pci_device_group);
1505
1506/* Get the IOMMU group for device on fsl-mc bus */
1507struct iommu_group *fsl_mc_device_group(struct device *dev)
1508{
1509        struct device *cont_dev = fsl_mc_cont_dev(dev);
1510        struct iommu_group *group;
1511
1512        group = iommu_group_get(cont_dev);
1513        if (!group)
1514                group = iommu_group_alloc();
1515        return group;
1516}
1517EXPORT_SYMBOL_GPL(fsl_mc_device_group);
1518
1519static int iommu_get_def_domain_type(struct device *dev)
1520{
1521        const struct iommu_ops *ops = dev->bus->iommu_ops;
1522
1523        if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted)
1524                return IOMMU_DOMAIN_DMA;
1525
1526        if (ops->def_domain_type)
1527                return ops->def_domain_type(dev);
1528
1529        return 0;
1530}
1531
1532static int iommu_group_alloc_default_domain(struct bus_type *bus,
1533                                            struct iommu_group *group,
1534                                            unsigned int type)
1535{
1536        struct iommu_domain *dom;
1537
1538        dom = __iommu_domain_alloc(bus, type);
1539        if (!dom && type != IOMMU_DOMAIN_DMA) {
1540                dom = __iommu_domain_alloc(bus, IOMMU_DOMAIN_DMA);
1541                if (dom)
1542                        pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA",
1543                                type, group->name);
1544        }
1545
1546        if (!dom)
1547                return -ENOMEM;
1548
1549        group->default_domain = dom;
1550        if (!group->domain)
1551                group->domain = dom;
1552        return 0;
1553}
1554
1555static int iommu_alloc_default_domain(struct iommu_group *group,
1556                                      struct device *dev)
1557{
1558        unsigned int type;
1559
1560        if (group->default_domain)
1561                return 0;
1562
1563        type = iommu_get_def_domain_type(dev) ? : iommu_def_domain_type;
1564
1565        return iommu_group_alloc_default_domain(dev->bus, group, type);
1566}
1567
1568/**
1569 * iommu_group_get_for_dev - Find or create the IOMMU group for a device
1570 * @dev: target device
1571 *
1572 * This function is intended to be called by IOMMU drivers and extended to
1573 * support common, bus-defined algorithms when determining or creating the
1574 * IOMMU group for a device.  On success, the caller will hold a reference
1575 * to the returned IOMMU group, which will already include the provided
1576 * device.  The reference should be released with iommu_group_put().
1577 */
1578static struct iommu_group *iommu_group_get_for_dev(struct device *dev)
1579{
1580        const struct iommu_ops *ops = dev->bus->iommu_ops;
1581        struct iommu_group *group;
1582        int ret;
1583
1584        group = iommu_group_get(dev);
1585        if (group)
1586                return group;
1587
1588        if (!ops)
1589                return ERR_PTR(-EINVAL);
1590
1591        group = ops->device_group(dev);
1592        if (WARN_ON_ONCE(group == NULL))
1593                return ERR_PTR(-EINVAL);
1594
1595        if (IS_ERR(group))
1596                return group;
1597
1598        ret = iommu_group_add_device(group, dev);
1599        if (ret)
1600                goto out_put_group;
1601
1602        return group;
1603
1604out_put_group:
1605        iommu_group_put(group);
1606
1607        return ERR_PTR(ret);
1608}
1609
1610struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
1611{
1612        return group->default_domain;
1613}
1614
1615static int probe_iommu_group(struct device *dev, void *data)
1616{
1617        struct list_head *group_list = data;
1618        struct iommu_group *group;
1619        int ret;
1620
1621        /* Device is probed already if in a group */
1622        group = iommu_group_get(dev);
1623        if (group) {
1624                iommu_group_put(group);
1625                return 0;
1626        }
1627
1628        ret = __iommu_probe_device(dev, group_list);
1629        if (ret == -ENODEV)
1630                ret = 0;
1631
1632        return ret;
1633}
1634
1635static int remove_iommu_group(struct device *dev, void *data)
1636{
1637        iommu_release_device(dev);
1638
1639        return 0;
1640}
1641
1642static int iommu_bus_notifier(struct notifier_block *nb,
1643                              unsigned long action, void *data)
1644{
1645        unsigned long group_action = 0;
1646        struct device *dev = data;
1647        struct iommu_group *group;
1648
1649        /*
1650         * ADD/DEL call into iommu driver ops if provided, which may
1651         * result in ADD/DEL notifiers to group->notifier
1652         */
1653        if (action == BUS_NOTIFY_ADD_DEVICE) {
1654                int ret;
1655
1656                ret = iommu_probe_device(dev);
1657                return (ret) ? NOTIFY_DONE : NOTIFY_OK;
1658        } else if (action == BUS_NOTIFY_REMOVED_DEVICE) {
1659                iommu_release_device(dev);
1660                return NOTIFY_OK;
1661        }
1662
1663        /*
1664         * Remaining BUS_NOTIFYs get filtered and republished to the
1665         * group, if anyone is listening
1666         */
1667        group = iommu_group_get(dev);
1668        if (!group)
1669                return 0;
1670
1671        switch (action) {
1672        case BUS_NOTIFY_BIND_DRIVER:
1673                group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
1674                break;
1675        case BUS_NOTIFY_BOUND_DRIVER:
1676                group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
1677                break;
1678        case BUS_NOTIFY_UNBIND_DRIVER:
1679                group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
1680                break;
1681        case BUS_NOTIFY_UNBOUND_DRIVER:
1682                group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
1683                break;
1684        }
1685
1686        if (group_action)
1687                blocking_notifier_call_chain(&group->notifier,
1688                                             group_action, dev);
1689
1690        iommu_group_put(group);
1691        return 0;
1692}
1693
1694struct __group_domain_type {
1695        struct device *dev;
1696        unsigned int type;
1697};
1698
1699static int probe_get_default_domain_type(struct device *dev, void *data)
1700{
1701        struct __group_domain_type *gtype = data;
1702        unsigned int type = iommu_get_def_domain_type(dev);
1703
1704        if (type) {
1705                if (gtype->type && gtype->type != type) {
1706                        dev_warn(dev, "Device needs domain type %s, but device %s in the same iommu group requires type %s - using default\n",
1707                                 iommu_domain_type_str(type),
1708                                 dev_name(gtype->dev),
1709                                 iommu_domain_type_str(gtype->type));
1710                        gtype->type = 0;
1711                }
1712
1713                if (!gtype->dev) {
1714                        gtype->dev  = dev;
1715                        gtype->type = type;
1716                }
1717        }
1718
1719        return 0;
1720}
1721
1722static void probe_alloc_default_domain(struct bus_type *bus,
1723                                       struct iommu_group *group)
1724{
1725        struct __group_domain_type gtype;
1726
1727        memset(&gtype, 0, sizeof(gtype));
1728
1729        /* Ask for default domain requirements of all devices in the group */
1730        __iommu_group_for_each_dev(group, &gtype,
1731                                   probe_get_default_domain_type);
1732
1733        if (!gtype.type)
1734                gtype.type = iommu_def_domain_type;
1735
1736        iommu_group_alloc_default_domain(bus, group, gtype.type);
1737
1738}
1739
1740static int iommu_group_do_dma_attach(struct device *dev, void *data)
1741{
1742        struct iommu_domain *domain = data;
1743        int ret = 0;
1744
1745        if (!iommu_is_attach_deferred(domain, dev))
1746                ret = __iommu_attach_device(domain, dev);
1747
1748        return ret;
1749}
1750
1751static int __iommu_group_dma_attach(struct iommu_group *group)
1752{
1753        return __iommu_group_for_each_dev(group, group->default_domain,
1754                                          iommu_group_do_dma_attach);
1755}
1756
1757static int iommu_group_do_probe_finalize(struct device *dev, void *data)
1758{
1759        struct iommu_domain *domain = data;
1760
1761        if (domain->ops->probe_finalize)
1762                domain->ops->probe_finalize(dev);
1763
1764        return 0;
1765}
1766
1767static void __iommu_group_dma_finalize(struct iommu_group *group)
1768{
1769        __iommu_group_for_each_dev(group, group->default_domain,
1770                                   iommu_group_do_probe_finalize);
1771}
1772
1773static int iommu_do_create_direct_mappings(struct device *dev, void *data)
1774{
1775        struct iommu_group *group = data;
1776
1777        iommu_create_device_direct_mappings(group, dev);
1778
1779        return 0;
1780}
1781
1782static int iommu_group_create_direct_mappings(struct iommu_group *group)
1783{
1784        return __iommu_group_for_each_dev(group, group,
1785                                          iommu_do_create_direct_mappings);
1786}
1787
1788int bus_iommu_probe(struct bus_type *bus)
1789{
1790        struct iommu_group *group, *next;
1791        LIST_HEAD(group_list);
1792        int ret;
1793
1794        /*
1795         * This code-path does not allocate the default domain when
1796         * creating the iommu group, so do it after the groups are
1797         * created.
1798         */
1799        ret = bus_for_each_dev(bus, NULL, &group_list, probe_iommu_group);
1800        if (ret)
1801                return ret;
1802
1803        list_for_each_entry_safe(group, next, &group_list, entry) {
1804                /* Remove item from the list */
1805                list_del_init(&group->entry);
1806
1807                mutex_lock(&group->mutex);
1808
1809                /* Try to allocate default domain */
1810                probe_alloc_default_domain(bus, group);
1811
1812                if (!group->default_domain) {
1813                        mutex_unlock(&group->mutex);
1814                        continue;
1815                }
1816
1817                iommu_group_create_direct_mappings(group);
1818
1819                ret = __iommu_group_dma_attach(group);
1820
1821                mutex_unlock(&group->mutex);
1822
1823                if (ret)
1824                        break;
1825
1826                __iommu_group_dma_finalize(group);
1827        }
1828
1829        return ret;
1830}
1831
1832static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
1833{
1834        struct notifier_block *nb;
1835        int err;
1836
1837        nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
1838        if (!nb)
1839                return -ENOMEM;
1840
1841        nb->notifier_call = iommu_bus_notifier;
1842
1843        err = bus_register_notifier(bus, nb);
1844        if (err)
1845                goto out_free;
1846
1847        err = bus_iommu_probe(bus);
1848        if (err)
1849                goto out_err;
1850
1851
1852        return 0;
1853
1854out_err:
1855        /* Clean up */
1856        bus_for_each_dev(bus, NULL, NULL, remove_iommu_group);
1857        bus_unregister_notifier(bus, nb);
1858
1859out_free:
1860        kfree(nb);
1861
1862        return err;
1863}
1864
1865/**
1866 * bus_set_iommu - set iommu-callbacks for the bus
1867 * @bus: bus.
1868 * @ops: the callbacks provided by the iommu-driver
1869 *
1870 * This function is called by an iommu driver to set the iommu methods
1871 * used for a particular bus. Drivers for devices on that bus can use
1872 * the iommu-api after these ops are registered.
1873 * This special function is needed because IOMMUs are usually devices on
1874 * the bus itself, so the iommu drivers are not initialized when the bus
1875 * is set up. With this function the iommu-driver can set the iommu-ops
1876 * afterwards.
1877 */
1878int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
1879{
1880        int err;
1881
1882        if (ops == NULL) {
1883                bus->iommu_ops = NULL;
1884                return 0;
1885        }
1886
1887        if (bus->iommu_ops != NULL)
1888                return -EBUSY;
1889
1890        bus->iommu_ops = ops;
1891
1892        /* Do IOMMU specific setup for this bus-type */
1893        err = iommu_bus_init(bus, ops);
1894        if (err)
1895                bus->iommu_ops = NULL;
1896
1897        return err;
1898}
1899EXPORT_SYMBOL_GPL(bus_set_iommu);
1900
1901bool iommu_present(struct bus_type *bus)
1902{
1903        return bus->iommu_ops != NULL;
1904}
1905EXPORT_SYMBOL_GPL(iommu_present);
1906
1907bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
1908{
1909        if (!bus->iommu_ops || !bus->iommu_ops->capable)
1910                return false;
1911
1912        return bus->iommu_ops->capable(cap);
1913}
1914EXPORT_SYMBOL_GPL(iommu_capable);
1915
1916/**
1917 * iommu_set_fault_handler() - set a fault handler for an iommu domain
1918 * @domain: iommu domain
1919 * @handler: fault handler
1920 * @token: user data, will be passed back to the fault handler
1921 *
1922 * This function should be used by IOMMU users which want to be notified
1923 * whenever an IOMMU fault happens.
1924 *
1925 * The fault handler itself should return 0 on success, and an appropriate
1926 * error code otherwise.
1927 */
1928void iommu_set_fault_handler(struct iommu_domain *domain,
1929                                        iommu_fault_handler_t handler,
1930                                        void *token)
1931{
1932        BUG_ON(!domain);
1933
1934        domain->handler = handler;
1935        domain->handler_token = token;
1936}
1937EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
1938
1939static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
1940                                                 unsigned type)
1941{
1942        struct iommu_domain *domain;
1943
1944        if (bus == NULL || bus->iommu_ops == NULL)
1945                return NULL;
1946
1947        domain = bus->iommu_ops->domain_alloc(type);
1948        if (!domain)
1949                return NULL;
1950
1951        domain->ops  = bus->iommu_ops;
1952        domain->type = type;
1953        /* Assume all sizes by default; the driver may override this later */
1954        domain->pgsize_bitmap  = bus->iommu_ops->pgsize_bitmap;
1955
1956        /* Temporarily avoid -EEXIST while drivers still get their own cookies */
1957        if (iommu_is_dma_domain(domain) && !domain->iova_cookie && iommu_get_dma_cookie(domain)) {
1958                iommu_domain_free(domain);
1959                domain = NULL;
1960        }
1961        return domain;
1962}
1963
1964struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
1965{
1966        return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED);
1967}
1968EXPORT_SYMBOL_GPL(iommu_domain_alloc);
1969
1970void iommu_domain_free(struct iommu_domain *domain)
1971{
1972        iommu_put_dma_cookie(domain);
1973        domain->ops->domain_free(domain);
1974}
1975EXPORT_SYMBOL_GPL(iommu_domain_free);
1976
1977static int __iommu_attach_device(struct iommu_domain *domain,
1978                                 struct device *dev)
1979{
1980        int ret;
1981
1982        if (unlikely(domain->ops->attach_dev == NULL))
1983                return -ENODEV;
1984
1985        ret = domain->ops->attach_dev(domain, dev);
1986        if (!ret)
1987                trace_attach_device_to_domain(dev);
1988        return ret;
1989}
1990
1991int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
1992{
1993        struct iommu_group *group;
1994        int ret;
1995
1996        group = iommu_group_get(dev);
1997        if (!group)
1998                return -ENODEV;
1999
2000        /*
2001         * Lock the group to make sure the device-count doesn't
2002         * change while we are attaching
2003         */
2004        mutex_lock(&group->mutex);
2005        ret = -EINVAL;
2006        if (iommu_group_device_count(group) != 1)
2007                goto out_unlock;
2008
2009        ret = __iommu_attach_group(domain, group);
2010
2011out_unlock:
2012        mutex_unlock(&group->mutex);
2013        iommu_group_put(group);
2014
2015        return ret;
2016}
2017EXPORT_SYMBOL_GPL(iommu_attach_device);
2018
2019int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain)
2020{
2021        const struct iommu_ops *ops = domain->ops;
2022
2023        if (ops->is_attach_deferred && ops->is_attach_deferred(domain, dev))
2024                return __iommu_attach_device(domain, dev);
2025
2026        return 0;
2027}
2028
2029/*
2030 * Check flags and other user provided data for valid combinations. We also
2031 * make sure no reserved fields or unused flags are set. This is to ensure
2032 * not breaking userspace in the future when these fields or flags are used.
2033 */
2034static int iommu_check_cache_invl_data(struct iommu_cache_invalidate_info *info)
2035{
2036        u32 mask;
2037        int i;
2038
2039        if (info->version != IOMMU_CACHE_INVALIDATE_INFO_VERSION_1)
2040                return -EINVAL;
2041
2042        mask = (1 << IOMMU_CACHE_INV_TYPE_NR) - 1;
2043        if (info->cache & ~mask)
2044                return -EINVAL;
2045
2046        if (info->granularity >= IOMMU_INV_GRANU_NR)
2047                return -EINVAL;
2048
2049        switch (info->granularity) {
2050        case IOMMU_INV_GRANU_ADDR:
2051                if (info->cache & IOMMU_CACHE_INV_TYPE_PASID)
2052                        return -EINVAL;
2053
2054                mask = IOMMU_INV_ADDR_FLAGS_PASID |
2055                        IOMMU_INV_ADDR_FLAGS_ARCHID |
2056                        IOMMU_INV_ADDR_FLAGS_LEAF;
2057
2058                if (info->granu.addr_info.flags & ~mask)
2059                        return -EINVAL;
2060                break;
2061        case IOMMU_INV_GRANU_PASID:
2062                mask = IOMMU_INV_PASID_FLAGS_PASID |
2063                        IOMMU_INV_PASID_FLAGS_ARCHID;
2064                if (info->granu.pasid_info.flags & ~mask)
2065                        return -EINVAL;
2066
2067                break;
2068        case IOMMU_INV_GRANU_DOMAIN:
2069                if (info->cache & IOMMU_CACHE_INV_TYPE_DEV_IOTLB)
2070                        return -EINVAL;
2071                break;
2072        default:
2073                return -EINVAL;
2074        }
2075
2076        /* Check reserved padding fields */
2077        for (i = 0; i < sizeof(info->padding); i++) {
2078                if (info->padding[i])
2079                        return -EINVAL;
2080        }
2081
2082        return 0;
2083}
2084
2085int iommu_uapi_cache_invalidate(struct iommu_domain *domain, struct device *dev,
2086                                void __user *uinfo)
2087{
2088        struct iommu_cache_invalidate_info inv_info = { 0 };
2089        u32 minsz;
2090        int ret;
2091
2092        if (unlikely(!domain->ops->cache_invalidate))
2093                return -ENODEV;
2094
2095        /*
2096         * No new spaces can be added before the variable sized union, the
2097         * minimum size is the offset to the union.
2098         */
2099        minsz = offsetof(struct iommu_cache_invalidate_info, granu);
2100
2101        /* Copy minsz from user to get flags and argsz */
2102        if (copy_from_user(&inv_info, uinfo, minsz))
2103                return -EFAULT;
2104
2105        /* Fields before the variable size union are mandatory */
2106        if (inv_info.argsz < minsz)
2107                return -EINVAL;
2108
2109        /* PASID and address granu require additional info beyond minsz */
2110        if (inv_info.granularity == IOMMU_INV_GRANU_PASID &&
2111            inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.pasid_info))
2112                return -EINVAL;
2113
2114        if (inv_info.granularity == IOMMU_INV_GRANU_ADDR &&
2115            inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.addr_info))
2116                return -EINVAL;
2117
2118        /*
2119         * User might be using a newer UAPI header which has a larger data
2120         * size, we shall support the existing flags within the current
2121         * size. Copy the remaining user data _after_ minsz but not more
2122         * than the current kernel supported size.
2123         */
2124        if (copy_from_user((void *)&inv_info + minsz, uinfo + minsz,
2125                           min_t(u32, inv_info.argsz, sizeof(inv_info)) - minsz))
2126                return -EFAULT;
2127
2128        /* Now the argsz is validated, check the content */
2129        ret = iommu_check_cache_invl_data(&inv_info);
2130        if (ret)
2131                return ret;
2132
2133        return domain->ops->cache_invalidate(domain, dev, &inv_info);
2134}
2135EXPORT_SYMBOL_GPL(iommu_uapi_cache_invalidate);
2136
2137static int iommu_check_bind_data(struct iommu_gpasid_bind_data *data)
2138{
2139        u64 mask;
2140        int i;
2141
2142        if (data->version != IOMMU_GPASID_BIND_VERSION_1)
2143                return -EINVAL;
2144
2145        /* Check the range of supported formats */
2146        if (data->format >= IOMMU_PASID_FORMAT_LAST)
2147                return -EINVAL;
2148
2149        /* Check all flags */
2150        mask = IOMMU_SVA_GPASID_VAL;
2151        if (data->flags & ~mask)
2152                return -EINVAL;
2153
2154        /* Check reserved padding fields */
2155        for (i = 0; i < sizeof(data->padding); i++) {
2156                if (data->padding[i])
2157                        return -EINVAL;
2158        }
2159
2160        return 0;
2161}
2162
2163static int iommu_sva_prepare_bind_data(void __user *udata,
2164                                       struct iommu_gpasid_bind_data *data)
2165{
2166        u32 minsz;
2167
2168        /*
2169         * No new spaces can be added before the variable sized union, the
2170         * minimum size is the offset to the union.
2171         */
2172        minsz = offsetof(struct iommu_gpasid_bind_data, vendor);
2173
2174        /* Copy minsz from user to get flags and argsz */
2175        if (copy_from_user(data, udata, minsz))
2176                return -EFAULT;
2177
2178        /* Fields before the variable size union are mandatory */
2179        if (data->argsz < minsz)
2180                return -EINVAL;
2181        /*
2182         * User might be using a newer UAPI header, we shall let IOMMU vendor
2183         * driver decide on what size it needs. Since the guest PASID bind data
2184         * can be vendor specific, larger argsz could be the result of extension
2185         * for one vendor but it should not affect another vendor.
2186         * Copy the remaining user data _after_ minsz
2187         */
2188        if (copy_from_user((void *)data + minsz, udata + minsz,
2189                           min_t(u32, data->argsz, sizeof(*data)) - minsz))
2190                return -EFAULT;
2191
2192        return iommu_check_bind_data(data);
2193}
2194
2195int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain, struct device *dev,
2196                               void __user *udata)
2197{
2198        struct iommu_gpasid_bind_data data = { 0 };
2199        int ret;
2200
2201        if (unlikely(!domain->ops->sva_bind_gpasid))
2202                return -ENODEV;
2203
2204        ret = iommu_sva_prepare_bind_data(udata, &data);
2205        if (ret)
2206                return ret;
2207
2208        return domain->ops->sva_bind_gpasid(domain, dev, &data);
2209}
2210EXPORT_SYMBOL_GPL(iommu_uapi_sva_bind_gpasid);
2211
2212int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
2213                             ioasid_t pasid)
2214{
2215        if (unlikely(!domain->ops->sva_unbind_gpasid))
2216                return -ENODEV;
2217
2218        return domain->ops->sva_unbind_gpasid(dev, pasid);
2219}
2220EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid);
2221
2222int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
2223                                 void __user *udata)
2224{
2225        struct iommu_gpasid_bind_data data = { 0 };
2226        int ret;
2227
2228        if (unlikely(!domain->ops->sva_bind_gpasid))
2229                return -ENODEV;
2230
2231        ret = iommu_sva_prepare_bind_data(udata, &data);
2232        if (ret)
2233                return ret;
2234
2235        return iommu_sva_unbind_gpasid(domain, dev, data.hpasid);
2236}
2237EXPORT_SYMBOL_GPL(iommu_uapi_sva_unbind_gpasid);
2238
2239static void __iommu_detach_device(struct iommu_domain *domain,
2240                                  struct device *dev)
2241{
2242        if (iommu_is_attach_deferred(domain, dev))
2243                return;
2244
2245        if (unlikely(domain->ops->detach_dev == NULL))
2246                return;
2247
2248        domain->ops->detach_dev(domain, dev);
2249        trace_detach_device_from_domain(dev);
2250}
2251
2252void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
2253{
2254        struct iommu_group *group;
2255
2256        group = iommu_group_get(dev);
2257        if (!group)
2258                return;
2259
2260        mutex_lock(&group->mutex);
2261        if (iommu_group_device_count(group) != 1) {
2262                WARN_ON(1);
2263                goto out_unlock;
2264        }
2265
2266        __iommu_detach_group(domain, group);
2267
2268out_unlock:
2269        mutex_unlock(&group->mutex);
2270        iommu_group_put(group);
2271}
2272EXPORT_SYMBOL_GPL(iommu_detach_device);
2273
2274struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
2275{
2276        struct iommu_domain *domain;
2277        struct iommu_group *group;
2278
2279        group = iommu_group_get(dev);
2280        if (!group)
2281                return NULL;
2282
2283        domain = group->domain;
2284
2285        iommu_group_put(group);
2286
2287        return domain;
2288}
2289EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
2290
2291/*
2292 * For IOMMU_DOMAIN_DMA implementations which already provide their own
2293 * guarantees that the group and its default domain are valid and correct.
2294 */
2295struct iommu_domain *iommu_get_dma_domain(struct device *dev)
2296{
2297        return dev->iommu_group->default_domain;
2298}
2299
2300/*
2301 * IOMMU groups are really the natural working unit of the IOMMU, but
2302 * the IOMMU API works on domains and devices.  Bridge that gap by
2303 * iterating over the devices in a group.  Ideally we'd have a single
2304 * device which represents the requestor ID of the group, but we also
2305 * allow IOMMU drivers to create policy defined minimum sets, where
2306 * the physical hardware may be able to distiguish members, but we
2307 * wish to group them at a higher level (ex. untrusted multi-function
2308 * PCI devices).  Thus we attach each device.
2309 */
2310static int iommu_group_do_attach_device(struct device *dev, void *data)
2311{
2312        struct iommu_domain *domain = data;
2313
2314        return __iommu_attach_device(domain, dev);
2315}
2316
2317static int __iommu_attach_group(struct iommu_domain *domain,
2318                                struct iommu_group *group)
2319{
2320        int ret;
2321
2322        if (group->default_domain && group->domain != group->default_domain)
2323                return -EBUSY;
2324
2325        ret = __iommu_group_for_each_dev(group, domain,
2326                                         iommu_group_do_attach_device);
2327        if (ret == 0)
2328                group->domain = domain;
2329
2330        return ret;
2331}
2332
2333int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
2334{
2335        int ret;
2336
2337        mutex_lock(&group->mutex);
2338        ret = __iommu_attach_group(domain, group);
2339        mutex_unlock(&group->mutex);
2340
2341        return ret;
2342}
2343EXPORT_SYMBOL_GPL(iommu_attach_group);
2344
2345static int iommu_group_do_detach_device(struct device *dev, void *data)
2346{
2347        struct iommu_domain *domain = data;
2348
2349        __iommu_detach_device(domain, dev);
2350
2351        return 0;
2352}
2353
2354static void __iommu_detach_group(struct iommu_domain *domain,
2355                                 struct iommu_group *group)
2356{
2357        int ret;
2358
2359        if (!group->default_domain) {
2360                __iommu_group_for_each_dev(group, domain,
2361                                           iommu_group_do_detach_device);
2362                group->domain = NULL;
2363                return;
2364        }
2365
2366        if (group->domain == group->default_domain)
2367                return;
2368
2369        /* Detach by re-attaching to the default domain */
2370        ret = __iommu_group_for_each_dev(group, group->default_domain,
2371                                         iommu_group_do_attach_device);
2372        if (ret != 0)
2373                WARN_ON(1);
2374        else
2375                group->domain = group->default_domain;
2376}
2377
2378void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
2379{
2380        mutex_lock(&group->mutex);
2381        __iommu_detach_group(domain, group);
2382        mutex_unlock(&group->mutex);
2383}
2384EXPORT_SYMBOL_GPL(iommu_detach_group);
2385
2386phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
2387{
2388        if (domain->type == IOMMU_DOMAIN_IDENTITY)
2389                return iova;
2390
2391        if (domain->type == IOMMU_DOMAIN_BLOCKED)
2392                return 0;
2393
2394        return domain->ops->iova_to_phys(domain, iova);
2395}
2396EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
2397
2398static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova,
2399                           phys_addr_t paddr, size_t size, size_t *count)
2400{
2401        unsigned int pgsize_idx, pgsize_idx_next;
2402        unsigned long pgsizes;
2403        size_t offset, pgsize, pgsize_next;
2404        unsigned long addr_merge = paddr | iova;
2405
2406        /* Page sizes supported by the hardware and small enough for @size */
2407        pgsizes = domain->pgsize_bitmap & GENMASK(__fls(size), 0);
2408
2409        /* Constrain the page sizes further based on the maximum alignment */
2410        if (likely(addr_merge))
2411                pgsizes &= GENMASK(__ffs(addr_merge), 0);
2412
2413        /* Make sure we have at least one suitable page size */
2414        BUG_ON(!pgsizes);
2415
2416        /* Pick the biggest page size remaining */
2417        pgsize_idx = __fls(pgsizes);
2418        pgsize = BIT(pgsize_idx);
2419        if (!count)
2420                return pgsize;
2421
2422        /* Find the next biggest support page size, if it exists */
2423        pgsizes = domain->pgsize_bitmap & ~GENMASK(pgsize_idx, 0);
2424        if (!pgsizes)
2425                goto out_set_count;
2426
2427        pgsize_idx_next = __ffs(pgsizes);
2428        pgsize_next = BIT(pgsize_idx_next);
2429
2430        /*
2431         * There's no point trying a bigger page size unless the virtual
2432         * and physical addresses are similarly offset within the larger page.
2433         */
2434        if ((iova ^ paddr) & (pgsize_next - 1))
2435                goto out_set_count;
2436
2437        /* Calculate the offset to the next page size alignment boundary */
2438        offset = pgsize_next - (addr_merge & (pgsize_next - 1));
2439
2440        /*
2441         * If size is big enough to accommodate the larger page, reduce
2442         * the number of smaller pages.
2443         */
2444        if (offset + pgsize_next <= size)
2445                size = offset;
2446
2447out_set_count:
2448        *count = size >> pgsize_idx;
2449        return pgsize;
2450}
2451
2452static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova,
2453                             phys_addr_t paddr, size_t size, int prot,
2454                             gfp_t gfp, size_t *mapped)
2455{
2456        const struct iommu_ops *ops = domain->ops;
2457        size_t pgsize, count;
2458        int ret;
2459
2460        pgsize = iommu_pgsize(domain, iova, paddr, size, &count);
2461
2462        pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %zu\n",
2463                 iova, &paddr, pgsize, count);
2464
2465        if (ops->map_pages) {
2466                ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot,
2467                                     gfp, mapped);
2468        } else {
2469                ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
2470                *mapped = ret ? 0 : pgsize;
2471        }
2472
2473        return ret;
2474}
2475
2476static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
2477                       phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
2478{
2479        const struct iommu_ops *ops = domain->ops;
2480        unsigned long orig_iova = iova;
2481        unsigned int min_pagesz;
2482        size_t orig_size = size;
2483        phys_addr_t orig_paddr = paddr;
2484        int ret = 0;
2485
2486        if (unlikely(!(ops->map || ops->map_pages) ||
2487                     domain->pgsize_bitmap == 0UL))
2488                return -ENODEV;
2489
2490        if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
2491                return -EINVAL;
2492
2493        /* find out the minimum page size supported */
2494        min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
2495
2496        /*
2497         * both the virtual address and the physical one, as well as
2498         * the size of the mapping, must be aligned (at least) to the
2499         * size of the smallest page supported by the hardware
2500         */
2501        if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
2502                pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
2503                       iova, &paddr, size, min_pagesz);
2504                return -EINVAL;
2505        }
2506
2507        pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
2508
2509        while (size) {
2510                size_t mapped = 0;
2511
2512                ret = __iommu_map_pages(domain, iova, paddr, size, prot, gfp,
2513                                        &mapped);
2514                /*
2515                 * Some pages may have been mapped, even if an error occurred,
2516                 * so we should account for those so they can be unmapped.
2517                 */
2518                size -= mapped;
2519
2520                if (ret)
2521                        break;
2522
2523                iova += mapped;
2524                paddr += mapped;
2525        }
2526
2527        /* unroll mapping in case something went wrong */
2528        if (ret)
2529                iommu_unmap(domain, orig_iova, orig_size - size);
2530        else
2531                trace_map(orig_iova, orig_paddr, orig_size);
2532
2533        return ret;
2534}
2535
2536static int _iommu_map(struct iommu_domain *domain, unsigned long iova,
2537                      phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
2538{
2539        const struct iommu_ops *ops = domain->ops;
2540        int ret;
2541
2542        ret = __iommu_map(domain, iova, paddr, size, prot, gfp);
2543        if (ret == 0 && ops->iotlb_sync_map)
2544                ops->iotlb_sync_map(domain, iova, size);
2545
2546        return ret;
2547}
2548
2549int iommu_map(struct iommu_domain *domain, unsigned long iova,
2550              phys_addr_t paddr, size_t size, int prot)
2551{
2552        might_sleep();
2553        return _iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
2554}
2555EXPORT_SYMBOL_GPL(iommu_map);
2556
2557int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
2558              phys_addr_t paddr, size_t size, int prot)
2559{
2560        return _iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
2561}
2562EXPORT_SYMBOL_GPL(iommu_map_atomic);
2563
2564static size_t __iommu_unmap_pages(struct iommu_domain *domain,
2565                                  unsigned long iova, size_t size,
2566                                  struct iommu_iotlb_gather *iotlb_gather)
2567{
2568        const struct iommu_ops *ops = domain->ops;
2569        size_t pgsize, count;
2570
2571        pgsize = iommu_pgsize(domain, iova, iova, size, &count);
2572        return ops->unmap_pages ?
2573               ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather) :
2574               ops->unmap(domain, iova, pgsize, iotlb_gather);
2575}
2576
2577static size_t __iommu_unmap(struct iommu_domain *domain,
2578                            unsigned long iova, size_t size,
2579                            struct iommu_iotlb_gather *iotlb_gather)
2580{
2581        const struct iommu_ops *ops = domain->ops;
2582        size_t unmapped_page, unmapped = 0;
2583        unsigned long orig_iova = iova;
2584        unsigned int min_pagesz;
2585
2586        if (unlikely(!(ops->unmap || ops->unmap_pages) ||
2587                     domain->pgsize_bitmap == 0UL))
2588                return 0;
2589
2590        if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
2591                return 0;
2592
2593        /* find out the minimum page size supported */
2594        min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
2595
2596        /*
2597         * The virtual address, as well as the size of the mapping, must be
2598         * aligned (at least) to the size of the smallest page supported
2599         * by the hardware
2600         */
2601        if (!IS_ALIGNED(iova | size, min_pagesz)) {
2602                pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
2603                       iova, size, min_pagesz);
2604                return 0;
2605        }
2606
2607        pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
2608
2609        /*
2610         * Keep iterating until we either unmap 'size' bytes (or more)
2611         * or we hit an area that isn't mapped.
2612         */
2613        while (unmapped < size) {
2614                unmapped_page = __iommu_unmap_pages(domain, iova,
2615                                                    size - unmapped,
2616                                                    iotlb_gather);
2617                if (!unmapped_page)
2618                        break;
2619
2620                pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
2621                         iova, unmapped_page);
2622
2623                iova += unmapped_page;
2624                unmapped += unmapped_page;
2625        }
2626
2627        trace_unmap(orig_iova, size, unmapped);
2628        return unmapped;
2629}
2630
2631size_t iommu_unmap(struct iommu_domain *domain,
2632                   unsigned long iova, size_t size)
2633{
2634        struct iommu_iotlb_gather iotlb_gather;
2635        size_t ret;
2636
2637        iommu_iotlb_gather_init(&iotlb_gather);
2638        ret = __iommu_unmap(domain, iova, size, &iotlb_gather);
2639        iommu_iotlb_sync(domain, &iotlb_gather);
2640
2641        return ret;
2642}
2643EXPORT_SYMBOL_GPL(iommu_unmap);
2644
2645size_t iommu_unmap_fast(struct iommu_domain *domain,
2646                        unsigned long iova, size_t size,
2647                        struct iommu_iotlb_gather *iotlb_gather)
2648{
2649        return __iommu_unmap(domain, iova, size, iotlb_gather);
2650}
2651EXPORT_SYMBOL_GPL(iommu_unmap_fast);
2652
2653static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
2654                struct scatterlist *sg, unsigned int nents, int prot,
2655                gfp_t gfp)
2656{
2657        const struct iommu_ops *ops = domain->ops;
2658        size_t len = 0, mapped = 0;
2659        phys_addr_t start;
2660        unsigned int i = 0;
2661        int ret;
2662
2663        while (i <= nents) {
2664                phys_addr_t s_phys = sg_phys(sg);
2665
2666                if (len && s_phys != start + len) {
2667                        ret = __iommu_map(domain, iova + mapped, start,
2668                                        len, prot, gfp);
2669
2670                        if (ret)
2671                                goto out_err;
2672
2673                        mapped += len;
2674                        len = 0;
2675                }
2676
2677                if (len) {
2678                        len += sg->length;
2679                } else {
2680                        len = sg->length;
2681                        start = s_phys;
2682                }
2683
2684                if (++i < nents)
2685                        sg = sg_next(sg);
2686        }
2687
2688        if (ops->iotlb_sync_map)
2689                ops->iotlb_sync_map(domain, iova, mapped);
2690        return mapped;
2691
2692out_err:
2693        /* undo mappings already done */
2694        iommu_unmap(domain, iova, mapped);
2695
2696        return ret;
2697}
2698
2699ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
2700                     struct scatterlist *sg, unsigned int nents, int prot)
2701{
2702        might_sleep();
2703        return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL);
2704}
2705EXPORT_SYMBOL_GPL(iommu_map_sg);
2706
2707ssize_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova,
2708                    struct scatterlist *sg, unsigned int nents, int prot)
2709{
2710        return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
2711}
2712
2713/**
2714 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
2715 * @domain: the iommu domain where the fault has happened
2716 * @dev: the device where the fault has happened
2717 * @iova: the faulting address
2718 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
2719 *
2720 * This function should be called by the low-level IOMMU implementations
2721 * whenever IOMMU faults happen, to allow high-level users, that are
2722 * interested in such events, to know about them.
2723 *
2724 * This event may be useful for several possible use cases:
2725 * - mere logging of the event
2726 * - dynamic TLB/PTE loading
2727 * - if restarting of the faulting device is required
2728 *
2729 * Returns 0 on success and an appropriate error code otherwise (if dynamic
2730 * PTE/TLB loading will one day be supported, implementations will be able
2731 * to tell whether it succeeded or not according to this return value).
2732 *
2733 * Specifically, -ENOSYS is returned if a fault handler isn't installed
2734 * (though fault handlers can also return -ENOSYS, in case they want to
2735 * elicit the default behavior of the IOMMU drivers).
2736 */
2737int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
2738                       unsigned long iova, int flags)
2739{
2740        int ret = -ENOSYS;
2741
2742        /*
2743         * if upper layers showed interest and installed a fault handler,
2744         * invoke it.
2745         */
2746        if (domain->handler)
2747                ret = domain->handler(domain, dev, iova, flags,
2748                                                domain->handler_token);
2749
2750        trace_io_page_fault(dev, iova, flags);
2751        return ret;
2752}
2753EXPORT_SYMBOL_GPL(report_iommu_fault);
2754
2755static int __init iommu_init(void)
2756{
2757        iommu_group_kset = kset_create_and_add("iommu_groups",
2758                                               NULL, kernel_kobj);
2759        BUG_ON(!iommu_group_kset);
2760
2761        iommu_debugfs_setup();
2762
2763        return 0;
2764}
2765core_initcall(iommu_init);
2766
2767int iommu_enable_nesting(struct iommu_domain *domain)
2768{
2769        if (domain->type != IOMMU_DOMAIN_UNMANAGED)
2770                return -EINVAL;
2771        if (!domain->ops->enable_nesting)
2772                return -EINVAL;
2773        return domain->ops->enable_nesting(domain);
2774}
2775EXPORT_SYMBOL_GPL(iommu_enable_nesting);
2776
2777int iommu_set_pgtable_quirks(struct iommu_domain *domain,
2778                unsigned long quirk)
2779{
2780        if (domain->type != IOMMU_DOMAIN_UNMANAGED)
2781                return -EINVAL;
2782        if (!domain->ops->set_pgtable_quirks)
2783                return -EINVAL;
2784        return domain->ops->set_pgtable_quirks(domain, quirk);
2785}
2786EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks);
2787
2788void iommu_get_resv_regions(struct device *dev, struct list_head *list)
2789{
2790        const struct iommu_ops *ops = dev->bus->iommu_ops;
2791
2792        if (ops && ops->get_resv_regions)
2793                ops->get_resv_regions(dev, list);
2794}
2795
2796void iommu_put_resv_regions(struct device *dev, struct list_head *list)
2797{
2798        const struct iommu_ops *ops = dev->bus->iommu_ops;
2799
2800        if (ops && ops->put_resv_regions)
2801                ops->put_resv_regions(dev, list);
2802}
2803
2804/**
2805 * generic_iommu_put_resv_regions - Reserved region driver helper
2806 * @dev: device for which to free reserved regions
2807 * @list: reserved region list for device
2808 *
2809 * IOMMU drivers can use this to implement their .put_resv_regions() callback
2810 * for simple reservations. Memory allocated for each reserved region will be
2811 * freed. If an IOMMU driver allocates additional resources per region, it is
2812 * going to have to implement a custom callback.
2813 */
2814void generic_iommu_put_resv_regions(struct device *dev, struct list_head *list)
2815{
2816        struct iommu_resv_region *entry, *next;
2817
2818        list_for_each_entry_safe(entry, next, list, list)
2819                kfree(entry);
2820}
2821EXPORT_SYMBOL(generic_iommu_put_resv_regions);
2822
2823struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
2824                                                  size_t length, int prot,
2825                                                  enum iommu_resv_type type)
2826{
2827        struct iommu_resv_region *region;
2828
2829        region = kzalloc(sizeof(*region), GFP_KERNEL);
2830        if (!region)
2831                return NULL;
2832
2833        INIT_LIST_HEAD(&region->list);
2834        region->start = start;
2835        region->length = length;
2836        region->prot = prot;
2837        region->type = type;
2838        return region;
2839}
2840EXPORT_SYMBOL_GPL(iommu_alloc_resv_region);
2841
2842void iommu_set_default_passthrough(bool cmd_line)
2843{
2844        if (cmd_line)
2845                iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
2846        iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
2847}
2848
2849void iommu_set_default_translated(bool cmd_line)
2850{
2851        if (cmd_line)
2852                iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
2853        iommu_def_domain_type = IOMMU_DOMAIN_DMA;
2854}
2855
2856bool iommu_default_passthrough(void)
2857{
2858        return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY;
2859}
2860EXPORT_SYMBOL_GPL(iommu_default_passthrough);
2861
2862const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
2863{
2864        const struct iommu_ops *ops = NULL;
2865        struct iommu_device *iommu;
2866
2867        spin_lock(&iommu_device_lock);
2868        list_for_each_entry(iommu, &iommu_device_list, list)
2869                if (iommu->fwnode == fwnode) {
2870                        ops = iommu->ops;
2871                        break;
2872                }
2873        spin_unlock(&iommu_device_lock);
2874        return ops;
2875}
2876
2877int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
2878                      const struct iommu_ops *ops)
2879{
2880        struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2881
2882        if (fwspec)
2883                return ops == fwspec->ops ? 0 : -EINVAL;
2884
2885        if (!dev_iommu_get(dev))
2886                return -ENOMEM;
2887
2888        /* Preallocate for the overwhelmingly common case of 1 ID */
2889        fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL);
2890        if (!fwspec)
2891                return -ENOMEM;
2892
2893        of_node_get(to_of_node(iommu_fwnode));
2894        fwspec->iommu_fwnode = iommu_fwnode;
2895        fwspec->ops = ops;
2896        dev_iommu_fwspec_set(dev, fwspec);
2897        return 0;
2898}
2899EXPORT_SYMBOL_GPL(iommu_fwspec_init);
2900
2901void iommu_fwspec_free(struct device *dev)
2902{
2903        struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2904
2905        if (fwspec) {
2906                fwnode_handle_put(fwspec->iommu_fwnode);
2907                kfree(fwspec);
2908                dev_iommu_fwspec_set(dev, NULL);
2909        }
2910}
2911EXPORT_SYMBOL_GPL(iommu_fwspec_free);
2912
2913int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
2914{
2915        struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2916        int i, new_num;
2917
2918        if (!fwspec)
2919                return -EINVAL;
2920
2921        new_num = fwspec->num_ids + num_ids;
2922        if (new_num > 1) {
2923                fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num),
2924                                  GFP_KERNEL);
2925                if (!fwspec)
2926                        return -ENOMEM;
2927
2928                dev_iommu_fwspec_set(dev, fwspec);
2929        }
2930
2931        for (i = 0; i < num_ids; i++)
2932                fwspec->ids[fwspec->num_ids + i] = ids[i];
2933
2934        fwspec->num_ids = new_num;
2935        return 0;
2936}
2937EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
2938
2939/*
2940 * Per device IOMMU features.
2941 */
2942int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
2943{
2944        if (dev->iommu && dev->iommu->iommu_dev) {
2945                const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
2946
2947                if (ops->dev_enable_feat)
2948                        return ops->dev_enable_feat(dev, feat);
2949        }
2950
2951        return -ENODEV;
2952}
2953EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
2954
2955/*
2956 * The device drivers should do the necessary cleanups before calling this.
2957 * For example, before disabling the aux-domain feature, the device driver
2958 * should detach all aux-domains. Otherwise, this will return -EBUSY.
2959 */
2960int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
2961{
2962        if (dev->iommu && dev->iommu->iommu_dev) {
2963                const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
2964
2965                if (ops->dev_disable_feat)
2966                        return ops->dev_disable_feat(dev, feat);
2967        }
2968
2969        return -EBUSY;
2970}
2971EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
2972
2973bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
2974{
2975        if (dev->iommu && dev->iommu->iommu_dev) {
2976                const struct iommu_ops *ops = dev->iommu->iommu_dev->ops;
2977
2978                if (ops->dev_feat_enabled)
2979                        return ops->dev_feat_enabled(dev, feat);
2980        }
2981
2982        return false;
2983}
2984EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled);
2985
2986/*
2987 * Aux-domain specific attach/detach.
2988 *
2989 * Only works if iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX) returns
2990 * true. Also, as long as domains are attached to a device through this
2991 * interface, any tries to call iommu_attach_device() should fail
2992 * (iommu_detach_device() can't fail, so we fail when trying to re-attach).
2993 * This should make us safe against a device being attached to a guest as a
2994 * whole while there are still pasid users on it (aux and sva).
2995 */
2996int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
2997{
2998        int ret = -ENODEV;
2999
3000        if (domain->ops->aux_attach_dev)
3001                ret = domain->ops->aux_attach_dev(domain, dev);
3002
3003        if (!ret)
3004                trace_attach_device_to_domain(dev);
3005
3006        return ret;
3007}
3008EXPORT_SYMBOL_GPL(iommu_aux_attach_device);
3009
3010void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
3011{
3012        if (domain->ops->aux_detach_dev) {
3013                domain->ops->aux_detach_dev(domain, dev);
3014                trace_detach_device_from_domain(dev);
3015        }
3016}
3017EXPORT_SYMBOL_GPL(iommu_aux_detach_device);
3018
3019int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
3020{
3021        int ret = -ENODEV;
3022
3023        if (domain->ops->aux_get_pasid)
3024                ret = domain->ops->aux_get_pasid(domain, dev);
3025
3026        return ret;
3027}
3028EXPORT_SYMBOL_GPL(iommu_aux_get_pasid);
3029
3030/**
3031 * iommu_sva_bind_device() - Bind a process address space to a device
3032 * @dev: the device
3033 * @mm: the mm to bind, caller must hold a reference to it
3034 *
3035 * Create a bond between device and address space, allowing the device to access
3036 * the mm using the returned PASID. If a bond already exists between @device and
3037 * @mm, it is returned and an additional reference is taken. Caller must call
3038 * iommu_sva_unbind_device() to release each reference.
3039 *
3040 * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
3041 * initialize the required SVA features.
3042 *
3043 * On error, returns an ERR_PTR value.
3044 */
3045struct iommu_sva *
3046iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
3047{
3048        struct iommu_group *group;
3049        struct iommu_sva *handle = ERR_PTR(-EINVAL);
3050        const struct iommu_ops *ops = dev->bus->iommu_ops;
3051
3052        if (!ops || !ops->sva_bind)
3053                return ERR_PTR(-ENODEV);
3054
3055        group = iommu_group_get(dev);
3056        if (!group)
3057                return ERR_PTR(-ENODEV);
3058
3059        /* Ensure device count and domain don't change while we're binding */
3060        mutex_lock(&group->mutex);
3061
3062        /*
3063         * To keep things simple, SVA currently doesn't support IOMMU groups
3064         * with more than one device. Existing SVA-capable systems are not
3065         * affected by the problems that required IOMMU groups (lack of ACS
3066         * isolation, device ID aliasing and other hardware issues).
3067         */
3068        if (iommu_group_device_count(group) != 1)
3069                goto out_unlock;
3070
3071        handle = ops->sva_bind(dev, mm, drvdata);
3072
3073out_unlock:
3074        mutex_unlock(&group->mutex);
3075        iommu_group_put(group);
3076
3077        return handle;
3078}
3079EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
3080
3081/**
3082 * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
3083 * @handle: the handle returned by iommu_sva_bind_device()
3084 *
3085 * Put reference to a bond between device and address space. The device should
3086 * not be issuing any more transaction for this PASID. All outstanding page
3087 * requests for this PASID must have been flushed to the IOMMU.
3088 */
3089void iommu_sva_unbind_device(struct iommu_sva *handle)
3090{
3091        struct iommu_group *group;
3092        struct device *dev = handle->dev;
3093        const struct iommu_ops *ops = dev->bus->iommu_ops;
3094
3095        if (!ops || !ops->sva_unbind)
3096                return;
3097
3098        group = iommu_group_get(dev);
3099        if (!group)
3100                return;
3101
3102        mutex_lock(&group->mutex);
3103        ops->sva_unbind(handle);
3104        mutex_unlock(&group->mutex);
3105
3106        iommu_group_put(group);
3107}
3108EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
3109
3110u32 iommu_sva_get_pasid(struct iommu_sva *handle)
3111{
3112        const struct iommu_ops *ops = handle->dev->bus->iommu_ops;
3113
3114        if (!ops || !ops->sva_get_pasid)
3115                return IOMMU_PASID_INVALID;
3116
3117        return ops->sva_get_pasid(handle);
3118}
3119EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
3120
3121/*
3122 * Changes the default domain of an iommu group that has *only* one device
3123 *
3124 * @group: The group for which the default domain should be changed
3125 * @prev_dev: The device in the group (this is used to make sure that the device
3126 *       hasn't changed after the caller has called this function)
3127 * @type: The type of the new default domain that gets associated with the group
3128 *
3129 * Returns 0 on success and error code on failure
3130 *
3131 * Note:
3132 * 1. Presently, this function is called only when user requests to change the
3133 *    group's default domain type through /sys/kernel/iommu_groups/<grp_id>/type
3134 *    Please take a closer look if intended to use for other purposes.
3135 */
3136static int iommu_change_dev_def_domain(struct iommu_group *group,
3137                                       struct device *prev_dev, int type)
3138{
3139        struct iommu_domain *prev_dom;
3140        struct group_device *grp_dev;
3141        int ret, dev_def_dom;
3142        struct device *dev;
3143
3144        mutex_lock(&group->mutex);
3145
3146        if (group->default_domain != group->domain) {
3147                dev_err_ratelimited(prev_dev, "Group not assigned to default domain\n");
3148                ret = -EBUSY;
3149                goto out;
3150        }
3151
3152        /*
3153         * iommu group wasn't locked while acquiring device lock in
3154         * iommu_group_store_type(). So, make sure that the device count hasn't
3155         * changed while acquiring device lock.
3156         *
3157         * Changing default domain of an iommu group with two or more devices
3158         * isn't supported because there could be a potential deadlock. Consider
3159         * the following scenario. T1 is trying to acquire device locks of all
3160         * the devices in the group and before it could acquire all of them,
3161         * there could be another thread T2 (from different sub-system and use
3162         * case) that has already acquired some of the device locks and might be
3163         * waiting for T1 to release other device locks.
3164         */
3165        if (iommu_group_device_count(group) != 1) {
3166                dev_err_ratelimited(prev_dev, "Cannot change default domain: Group has more than one device\n");
3167                ret = -EINVAL;
3168                goto out;
3169        }
3170
3171        /* Since group has only one device */
3172        grp_dev = list_first_entry(&group->devices, struct group_device, list);
3173        dev = grp_dev->dev;
3174
3175        if (prev_dev != dev) {
3176                dev_err_ratelimited(prev_dev, "Cannot change default domain: Device has been changed\n");
3177                ret = -EBUSY;
3178                goto out;
3179        }
3180
3181        prev_dom = group->default_domain;
3182        if (!prev_dom) {
3183                ret = -EINVAL;
3184                goto out;
3185        }
3186
3187        dev_def_dom = iommu_get_def_domain_type(dev);
3188        if (!type) {
3189                /*
3190                 * If the user hasn't requested any specific type of domain and
3191                 * if the device supports both the domains, then default to the
3192                 * domain the device was booted with
3193                 */
3194                type = dev_def_dom ? : iommu_def_domain_type;
3195        } else if (dev_def_dom && type != dev_def_dom) {
3196                dev_err_ratelimited(prev_dev, "Device cannot be in %s domain\n",
3197                                    iommu_domain_type_str(type));
3198                ret = -EINVAL;
3199                goto out;
3200        }
3201
3202        /*
3203         * Switch to a new domain only if the requested domain type is different
3204         * from the existing default domain type
3205         */
3206        if (prev_dom->type == type) {
3207                ret = 0;
3208                goto out;
3209        }
3210
3211        /* We can bring up a flush queue without tearing down the domain */
3212        if (type == IOMMU_DOMAIN_DMA_FQ && prev_dom->type == IOMMU_DOMAIN_DMA) {
3213                ret = iommu_dma_init_fq(prev_dom);
3214                if (!ret)
3215                        prev_dom->type = IOMMU_DOMAIN_DMA_FQ;
3216                goto out;
3217        }
3218
3219        /* Sets group->default_domain to the newly allocated domain */
3220        ret = iommu_group_alloc_default_domain(dev->bus, group, type);
3221        if (ret)
3222                goto out;
3223
3224        ret = iommu_create_device_direct_mappings(group, dev);
3225        if (ret)
3226                goto free_new_domain;
3227
3228        ret = __iommu_attach_device(group->default_domain, dev);
3229        if (ret)
3230                goto free_new_domain;
3231
3232        group->domain = group->default_domain;
3233
3234        /*
3235         * Release the mutex here because ops->probe_finalize() call-back of
3236         * some vendor IOMMU drivers calls arm_iommu_attach_device() which
3237         * in-turn might call back into IOMMU core code, where it tries to take
3238         * group->mutex, resulting in a deadlock.
3239         */
3240        mutex_unlock(&group->mutex);
3241
3242        /* Make sure dma_ops is appropriatley set */
3243        iommu_group_do_probe_finalize(dev, group->default_domain);
3244        iommu_domain_free(prev_dom);
3245        return 0;
3246
3247free_new_domain:
3248        iommu_domain_free(group->default_domain);
3249        group->default_domain = prev_dom;
3250        group->domain = prev_dom;
3251
3252out:
3253        mutex_unlock(&group->mutex);
3254
3255        return ret;
3256}
3257
3258/*
3259 * Changing the default domain through sysfs requires the users to unbind the
3260 * drivers from the devices in the iommu group, except for a DMA -> DMA-FQ
3261 * transition. Return failure if this isn't met.
3262 *
3263 * We need to consider the race between this and the device release path.
3264 * device_lock(dev) is used here to guarantee that the device release path
3265 * will not be entered at the same time.
3266 */
3267static ssize_t iommu_group_store_type(struct iommu_group *group,
3268                                      const char *buf, size_t count)
3269{
3270        struct group_device *grp_dev;
3271        struct device *dev;
3272        int ret, req_type;
3273
3274        if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
3275                return -EACCES;
3276
3277        if (WARN_ON(!group))
3278                return -EINVAL;
3279
3280        if (sysfs_streq(buf, "identity"))
3281                req_type = IOMMU_DOMAIN_IDENTITY;
3282        else if (sysfs_streq(buf, "DMA"))
3283                req_type = IOMMU_DOMAIN_DMA;
3284        else if (sysfs_streq(buf, "DMA-FQ"))
3285                req_type = IOMMU_DOMAIN_DMA_FQ;
3286        else if (sysfs_streq(buf, "auto"))
3287                req_type = 0;
3288        else
3289                return -EINVAL;
3290
3291        /*
3292         * Lock/Unlock the group mutex here before device lock to
3293         * 1. Make sure that the iommu group has only one device (this is a
3294         *    prerequisite for step 2)
3295         * 2. Get struct *dev which is needed to lock device
3296         */
3297        mutex_lock(&group->mutex);
3298        if (iommu_group_device_count(group) != 1) {
3299                mutex_unlock(&group->mutex);
3300                pr_err_ratelimited("Cannot change default domain: Group has more than one device\n");
3301                return -EINVAL;
3302        }
3303
3304        /* Since group has only one device */
3305        grp_dev = list_first_entry(&group->devices, struct group_device, list);
3306        dev = grp_dev->dev;
3307        get_device(dev);
3308
3309        /*
3310         * Don't hold the group mutex because taking group mutex first and then
3311         * the device lock could potentially cause a deadlock as below. Assume
3312         * two threads T1 and T2. T1 is trying to change default domain of an
3313         * iommu group and T2 is trying to hot unplug a device or release [1] VF
3314         * of a PCIe device which is in the same iommu group. T1 takes group
3315         * mutex and before it could take device lock assume T2 has taken device
3316         * lock and is yet to take group mutex. Now, both the threads will be
3317         * waiting for the other thread to release lock. Below, lock order was
3318         * suggested.
3319         * device_lock(dev);
3320         *      mutex_lock(&group->mutex);
3321         *              iommu_change_dev_def_domain();
3322         *      mutex_unlock(&group->mutex);
3323         * device_unlock(dev);
3324         *
3325         * [1] Typical device release path
3326         * device_lock() from device/driver core code
3327         *  -> bus_notifier()
3328         *   -> iommu_bus_notifier()
3329         *    -> iommu_release_device()
3330         *     -> ops->release_device() vendor driver calls back iommu core code
3331         *      -> mutex_lock() from iommu core code
3332         */
3333        mutex_unlock(&group->mutex);
3334
3335        /* Check if the device in the group still has a driver bound to it */
3336        device_lock(dev);
3337        if (device_is_bound(dev) && !(req_type == IOMMU_DOMAIN_DMA_FQ &&
3338            group->default_domain->type == IOMMU_DOMAIN_DMA)) {
3339                pr_err_ratelimited("Device is still bound to driver\n");
3340                ret = -EBUSY;
3341                goto out;
3342        }
3343
3344        ret = iommu_change_dev_def_domain(group, dev, req_type);
3345        ret = ret ?: count;
3346
3347out:
3348        device_unlock(dev);
3349        put_device(dev);
3350
3351        return ret;
3352}
3353