linux/drivers/iommu/iommu.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
   4 * Author: Joerg Roedel <jroedel@suse.de>
   5 */
   6
   7#define pr_fmt(fmt)    "iommu: " fmt
   8
   9#include <linux/device.h>
  10#include <linux/kernel.h>
  11#include <linux/bug.h>
  12#include <linux/types.h>
  13#include <linux/init.h>
  14#include <linux/export.h>
  15#include <linux/slab.h>
  16#include <linux/errno.h>
  17#include <linux/iommu.h>
  18#include <linux/idr.h>
  19#include <linux/notifier.h>
  20#include <linux/err.h>
  21#include <linux/pci.h>
  22#include <linux/bitops.h>
  23#include <linux/property.h>
  24#include <linux/fsl/mc.h>
  25#include <trace/events/iommu.h>
  26
  27static struct kset *iommu_group_kset;
  28static DEFINE_IDA(iommu_group_ida);
  29
  30static unsigned int iommu_def_domain_type __read_mostly;
  31static bool iommu_dma_strict __read_mostly = true;
  32static u32 iommu_cmd_line __read_mostly;
  33
  34struct iommu_group {
  35        struct kobject kobj;
  36        struct kobject *devices_kobj;
  37        struct list_head devices;
  38        struct mutex mutex;
  39        struct blocking_notifier_head notifier;
  40        void *iommu_data;
  41        void (*iommu_data_release)(void *iommu_data);
  42        char *name;
  43        int id;
  44        struct iommu_domain *default_domain;
  45        struct iommu_domain *domain;
  46};
  47
  48struct group_device {
  49        struct list_head list;
  50        struct device *dev;
  51        char *name;
  52};
  53
  54struct iommu_group_attribute {
  55        struct attribute attr;
  56        ssize_t (*show)(struct iommu_group *group, char *buf);
  57        ssize_t (*store)(struct iommu_group *group,
  58                         const char *buf, size_t count);
  59};
  60
  61static const char * const iommu_group_resv_type_string[] = {
  62        [IOMMU_RESV_DIRECT]                     = "direct",
  63        [IOMMU_RESV_DIRECT_RELAXABLE]           = "direct-relaxable",
  64        [IOMMU_RESV_RESERVED]                   = "reserved",
  65        [IOMMU_RESV_MSI]                        = "msi",
  66        [IOMMU_RESV_SW_MSI]                     = "msi",
  67};
  68
  69#define IOMMU_CMD_LINE_DMA_API          BIT(0)
  70
  71static void iommu_set_cmd_line_dma_api(void)
  72{
  73        iommu_cmd_line |= IOMMU_CMD_LINE_DMA_API;
  74}
  75
  76static bool iommu_cmd_line_dma_api(void)
  77{
  78        return !!(iommu_cmd_line & IOMMU_CMD_LINE_DMA_API);
  79}
  80
  81#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store)           \
  82struct iommu_group_attribute iommu_group_attr_##_name =         \
  83        __ATTR(_name, _mode, _show, _store)
  84
  85#define to_iommu_group_attr(_attr)      \
  86        container_of(_attr, struct iommu_group_attribute, attr)
  87#define to_iommu_group(_kobj)           \
  88        container_of(_kobj, struct iommu_group, kobj)
  89
  90static LIST_HEAD(iommu_device_list);
  91static DEFINE_SPINLOCK(iommu_device_lock);
  92
  93/*
  94 * Use a function instead of an array here because the domain-type is a
  95 * bit-field, so an array would waste memory.
  96 */
  97static const char *iommu_domain_type_str(unsigned int t)
  98{
  99        switch (t) {
 100        case IOMMU_DOMAIN_BLOCKED:
 101                return "Blocked";
 102        case IOMMU_DOMAIN_IDENTITY:
 103                return "Passthrough";
 104        case IOMMU_DOMAIN_UNMANAGED:
 105                return "Unmanaged";
 106        case IOMMU_DOMAIN_DMA:
 107                return "Translated";
 108        default:
 109                return "Unknown";
 110        }
 111}
 112
 113static int __init iommu_subsys_init(void)
 114{
 115        bool cmd_line = iommu_cmd_line_dma_api();
 116
 117        if (!cmd_line) {
 118                if (IS_ENABLED(CONFIG_IOMMU_DEFAULT_PASSTHROUGH))
 119                        iommu_set_default_passthrough(false);
 120                else
 121                        iommu_set_default_translated(false);
 122
 123                if (iommu_default_passthrough() && mem_encrypt_active()) {
 124                        pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n");
 125                        iommu_set_default_translated(false);
 126                }
 127        }
 128
 129        pr_info("Default domain type: %s %s\n",
 130                iommu_domain_type_str(iommu_def_domain_type),
 131                cmd_line ? "(set via kernel command line)" : "");
 132
 133        return 0;
 134}
 135subsys_initcall(iommu_subsys_init);
 136
 137int iommu_device_register(struct iommu_device *iommu)
 138{
 139        spin_lock(&iommu_device_lock);
 140        list_add_tail(&iommu->list, &iommu_device_list);
 141        spin_unlock(&iommu_device_lock);
 142        return 0;
 143}
 144
 145void iommu_device_unregister(struct iommu_device *iommu)
 146{
 147        spin_lock(&iommu_device_lock);
 148        list_del(&iommu->list);
 149        spin_unlock(&iommu_device_lock);
 150}
 151
 152static struct iommu_param *iommu_get_dev_param(struct device *dev)
 153{
 154        struct iommu_param *param = dev->iommu_param;
 155
 156        if (param)
 157                return param;
 158
 159        param = kzalloc(sizeof(*param), GFP_KERNEL);
 160        if (!param)
 161                return NULL;
 162
 163        mutex_init(&param->lock);
 164        dev->iommu_param = param;
 165        return param;
 166}
 167
 168static void iommu_free_dev_param(struct device *dev)
 169{
 170        kfree(dev->iommu_param);
 171        dev->iommu_param = NULL;
 172}
 173
 174int iommu_probe_device(struct device *dev)
 175{
 176        const struct iommu_ops *ops = dev->bus->iommu_ops;
 177        int ret;
 178
 179        WARN_ON(dev->iommu_group);
 180        if (!ops)
 181                return -EINVAL;
 182
 183        if (!iommu_get_dev_param(dev))
 184                return -ENOMEM;
 185
 186        ret = ops->add_device(dev);
 187        if (ret)
 188                iommu_free_dev_param(dev);
 189
 190        return ret;
 191}
 192
 193void iommu_release_device(struct device *dev)
 194{
 195        const struct iommu_ops *ops = dev->bus->iommu_ops;
 196
 197        if (dev->iommu_group)
 198                ops->remove_device(dev);
 199
 200        iommu_free_dev_param(dev);
 201}
 202
 203static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
 204                                                 unsigned type);
 205static int __iommu_attach_device(struct iommu_domain *domain,
 206                                 struct device *dev);
 207static int __iommu_attach_group(struct iommu_domain *domain,
 208                                struct iommu_group *group);
 209static void __iommu_detach_group(struct iommu_domain *domain,
 210                                 struct iommu_group *group);
 211
 212static int __init iommu_set_def_domain_type(char *str)
 213{
 214        bool pt;
 215        int ret;
 216
 217        ret = kstrtobool(str, &pt);
 218        if (ret)
 219                return ret;
 220
 221        if (pt)
 222                iommu_set_default_passthrough(true);
 223        else
 224                iommu_set_default_translated(true);
 225
 226        return 0;
 227}
 228early_param("iommu.passthrough", iommu_set_def_domain_type);
 229
 230static int __init iommu_dma_setup(char *str)
 231{
 232        return kstrtobool(str, &iommu_dma_strict);
 233}
 234early_param("iommu.strict", iommu_dma_setup);
 235
 236static ssize_t iommu_group_attr_show(struct kobject *kobj,
 237                                     struct attribute *__attr, char *buf)
 238{
 239        struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
 240        struct iommu_group *group = to_iommu_group(kobj);
 241        ssize_t ret = -EIO;
 242
 243        if (attr->show)
 244                ret = attr->show(group, buf);
 245        return ret;
 246}
 247
 248static ssize_t iommu_group_attr_store(struct kobject *kobj,
 249                                      struct attribute *__attr,
 250                                      const char *buf, size_t count)
 251{
 252        struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
 253        struct iommu_group *group = to_iommu_group(kobj);
 254        ssize_t ret = -EIO;
 255
 256        if (attr->store)
 257                ret = attr->store(group, buf, count);
 258        return ret;
 259}
 260
 261static const struct sysfs_ops iommu_group_sysfs_ops = {
 262        .show = iommu_group_attr_show,
 263        .store = iommu_group_attr_store,
 264};
 265
 266static int iommu_group_create_file(struct iommu_group *group,
 267                                   struct iommu_group_attribute *attr)
 268{
 269        return sysfs_create_file(&group->kobj, &attr->attr);
 270}
 271
 272static void iommu_group_remove_file(struct iommu_group *group,
 273                                    struct iommu_group_attribute *attr)
 274{
 275        sysfs_remove_file(&group->kobj, &attr->attr);
 276}
 277
 278static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
 279{
 280        return sprintf(buf, "%s\n", group->name);
 281}
 282
 283/**
 284 * iommu_insert_resv_region - Insert a new region in the
 285 * list of reserved regions.
 286 * @new: new region to insert
 287 * @regions: list of regions
 288 *
 289 * Elements are sorted by start address and overlapping segments
 290 * of the same type are merged.
 291 */
 292int iommu_insert_resv_region(struct iommu_resv_region *new,
 293                             struct list_head *regions)
 294{
 295        struct iommu_resv_region *iter, *tmp, *nr, *top;
 296        LIST_HEAD(stack);
 297
 298        nr = iommu_alloc_resv_region(new->start, new->length,
 299                                     new->prot, new->type);
 300        if (!nr)
 301                return -ENOMEM;
 302
 303        /* First add the new element based on start address sorting */
 304        list_for_each_entry(iter, regions, list) {
 305                if (nr->start < iter->start ||
 306                    (nr->start == iter->start && nr->type <= iter->type))
 307                        break;
 308        }
 309        list_add_tail(&nr->list, &iter->list);
 310
 311        /* Merge overlapping segments of type nr->type in @regions, if any */
 312        list_for_each_entry_safe(iter, tmp, regions, list) {
 313                phys_addr_t top_end, iter_end = iter->start + iter->length - 1;
 314
 315                /* no merge needed on elements of different types than @new */
 316                if (iter->type != new->type) {
 317                        list_move_tail(&iter->list, &stack);
 318                        continue;
 319                }
 320
 321                /* look for the last stack element of same type as @iter */
 322                list_for_each_entry_reverse(top, &stack, list)
 323                        if (top->type == iter->type)
 324                                goto check_overlap;
 325
 326                list_move_tail(&iter->list, &stack);
 327                continue;
 328
 329check_overlap:
 330                top_end = top->start + top->length - 1;
 331
 332                if (iter->start > top_end + 1) {
 333                        list_move_tail(&iter->list, &stack);
 334                } else {
 335                        top->length = max(top_end, iter_end) - top->start + 1;
 336                        list_del(&iter->list);
 337                        kfree(iter);
 338                }
 339        }
 340        list_splice(&stack, regions);
 341        return 0;
 342}
 343
 344static int
 345iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
 346                                 struct list_head *group_resv_regions)
 347{
 348        struct iommu_resv_region *entry;
 349        int ret = 0;
 350
 351        list_for_each_entry(entry, dev_resv_regions, list) {
 352                ret = iommu_insert_resv_region(entry, group_resv_regions);
 353                if (ret)
 354                        break;
 355        }
 356        return ret;
 357}
 358
 359int iommu_get_group_resv_regions(struct iommu_group *group,
 360                                 struct list_head *head)
 361{
 362        struct group_device *device;
 363        int ret = 0;
 364
 365        mutex_lock(&group->mutex);
 366        list_for_each_entry(device, &group->devices, list) {
 367                struct list_head dev_resv_regions;
 368
 369                INIT_LIST_HEAD(&dev_resv_regions);
 370                iommu_get_resv_regions(device->dev, &dev_resv_regions);
 371                ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
 372                iommu_put_resv_regions(device->dev, &dev_resv_regions);
 373                if (ret)
 374                        break;
 375        }
 376        mutex_unlock(&group->mutex);
 377        return ret;
 378}
 379EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);
 380
 381static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
 382                                             char *buf)
 383{
 384        struct iommu_resv_region *region, *next;
 385        struct list_head group_resv_regions;
 386        char *str = buf;
 387
 388        INIT_LIST_HEAD(&group_resv_regions);
 389        iommu_get_group_resv_regions(group, &group_resv_regions);
 390
 391        list_for_each_entry_safe(region, next, &group_resv_regions, list) {
 392                str += sprintf(str, "0x%016llx 0x%016llx %s\n",
 393                               (long long int)region->start,
 394                               (long long int)(region->start +
 395                                                region->length - 1),
 396                               iommu_group_resv_type_string[region->type]);
 397                kfree(region);
 398        }
 399
 400        return (str - buf);
 401}
 402
 403static ssize_t iommu_group_show_type(struct iommu_group *group,
 404                                     char *buf)
 405{
 406        char *type = "unknown\n";
 407
 408        if (group->default_domain) {
 409                switch (group->default_domain->type) {
 410                case IOMMU_DOMAIN_BLOCKED:
 411                        type = "blocked\n";
 412                        break;
 413                case IOMMU_DOMAIN_IDENTITY:
 414                        type = "identity\n";
 415                        break;
 416                case IOMMU_DOMAIN_UNMANAGED:
 417                        type = "unmanaged\n";
 418                        break;
 419                case IOMMU_DOMAIN_DMA:
 420                        type = "DMA\n";
 421                        break;
 422                }
 423        }
 424        strcpy(buf, type);
 425
 426        return strlen(type);
 427}
 428
 429static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
 430
 431static IOMMU_GROUP_ATTR(reserved_regions, 0444,
 432                        iommu_group_show_resv_regions, NULL);
 433
 434static IOMMU_GROUP_ATTR(type, 0444, iommu_group_show_type, NULL);
 435
 436static void iommu_group_release(struct kobject *kobj)
 437{
 438        struct iommu_group *group = to_iommu_group(kobj);
 439
 440        pr_debug("Releasing group %d\n", group->id);
 441
 442        if (group->iommu_data_release)
 443                group->iommu_data_release(group->iommu_data);
 444
 445        ida_simple_remove(&iommu_group_ida, group->id);
 446
 447        if (group->default_domain)
 448                iommu_domain_free(group->default_domain);
 449
 450        kfree(group->name);
 451        kfree(group);
 452}
 453
 454static struct kobj_type iommu_group_ktype = {
 455        .sysfs_ops = &iommu_group_sysfs_ops,
 456        .release = iommu_group_release,
 457};
 458
 459/**
 460 * iommu_group_alloc - Allocate a new group
 461 *
 462 * This function is called by an iommu driver to allocate a new iommu
 463 * group.  The iommu group represents the minimum granularity of the iommu.
 464 * Upon successful return, the caller holds a reference to the supplied
 465 * group in order to hold the group until devices are added.  Use
 466 * iommu_group_put() to release this extra reference count, allowing the
 467 * group to be automatically reclaimed once it has no devices or external
 468 * references.
 469 */
 470struct iommu_group *iommu_group_alloc(void)
 471{
 472        struct iommu_group *group;
 473        int ret;
 474
 475        group = kzalloc(sizeof(*group), GFP_KERNEL);
 476        if (!group)
 477                return ERR_PTR(-ENOMEM);
 478
 479        group->kobj.kset = iommu_group_kset;
 480        mutex_init(&group->mutex);
 481        INIT_LIST_HEAD(&group->devices);
 482        BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
 483
 484        ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL);
 485        if (ret < 0) {
 486                kfree(group);
 487                return ERR_PTR(ret);
 488        }
 489        group->id = ret;
 490
 491        ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
 492                                   NULL, "%d", group->id);
 493        if (ret) {
 494                ida_simple_remove(&iommu_group_ida, group->id);
 495                kfree(group);
 496                return ERR_PTR(ret);
 497        }
 498
 499        group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
 500        if (!group->devices_kobj) {
 501                kobject_put(&group->kobj); /* triggers .release & free */
 502                return ERR_PTR(-ENOMEM);
 503        }
 504
 505        /*
 506         * The devices_kobj holds a reference on the group kobject, so
 507         * as long as that exists so will the group.  We can therefore
 508         * use the devices_kobj for reference counting.
 509         */
 510        kobject_put(&group->kobj);
 511
 512        ret = iommu_group_create_file(group,
 513                                      &iommu_group_attr_reserved_regions);
 514        if (ret)
 515                return ERR_PTR(ret);
 516
 517        ret = iommu_group_create_file(group, &iommu_group_attr_type);
 518        if (ret)
 519                return ERR_PTR(ret);
 520
 521        pr_debug("Allocated group %d\n", group->id);
 522
 523        return group;
 524}
 525EXPORT_SYMBOL_GPL(iommu_group_alloc);
 526
 527struct iommu_group *iommu_group_get_by_id(int id)
 528{
 529        struct kobject *group_kobj;
 530        struct iommu_group *group;
 531        const char *name;
 532
 533        if (!iommu_group_kset)
 534                return NULL;
 535
 536        name = kasprintf(GFP_KERNEL, "%d", id);
 537        if (!name)
 538                return NULL;
 539
 540        group_kobj = kset_find_obj(iommu_group_kset, name);
 541        kfree(name);
 542
 543        if (!group_kobj)
 544                return NULL;
 545
 546        group = container_of(group_kobj, struct iommu_group, kobj);
 547        BUG_ON(group->id != id);
 548
 549        kobject_get(group->devices_kobj);
 550        kobject_put(&group->kobj);
 551
 552        return group;
 553}
 554EXPORT_SYMBOL_GPL(iommu_group_get_by_id);
 555
 556/**
 557 * iommu_group_get_iommudata - retrieve iommu_data registered for a group
 558 * @group: the group
 559 *
 560 * iommu drivers can store data in the group for use when doing iommu
 561 * operations.  This function provides a way to retrieve it.  Caller
 562 * should hold a group reference.
 563 */
 564void *iommu_group_get_iommudata(struct iommu_group *group)
 565{
 566        return group->iommu_data;
 567}
 568EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
 569
 570/**
 571 * iommu_group_set_iommudata - set iommu_data for a group
 572 * @group: the group
 573 * @iommu_data: new data
 574 * @release: release function for iommu_data
 575 *
 576 * iommu drivers can store data in the group for use when doing iommu
 577 * operations.  This function provides a way to set the data after
 578 * the group has been allocated.  Caller should hold a group reference.
 579 */
 580void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
 581                               void (*release)(void *iommu_data))
 582{
 583        group->iommu_data = iommu_data;
 584        group->iommu_data_release = release;
 585}
 586EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
 587
 588/**
 589 * iommu_group_set_name - set name for a group
 590 * @group: the group
 591 * @name: name
 592 *
 593 * Allow iommu driver to set a name for a group.  When set it will
 594 * appear in a name attribute file under the group in sysfs.
 595 */
 596int iommu_group_set_name(struct iommu_group *group, const char *name)
 597{
 598        int ret;
 599
 600        if (group->name) {
 601                iommu_group_remove_file(group, &iommu_group_attr_name);
 602                kfree(group->name);
 603                group->name = NULL;
 604                if (!name)
 605                        return 0;
 606        }
 607
 608        group->name = kstrdup(name, GFP_KERNEL);
 609        if (!group->name)
 610                return -ENOMEM;
 611
 612        ret = iommu_group_create_file(group, &iommu_group_attr_name);
 613        if (ret) {
 614                kfree(group->name);
 615                group->name = NULL;
 616                return ret;
 617        }
 618
 619        return 0;
 620}
 621EXPORT_SYMBOL_GPL(iommu_group_set_name);
 622
 623static int iommu_group_create_direct_mappings(struct iommu_group *group,
 624                                              struct device *dev)
 625{
 626        struct iommu_domain *domain = group->default_domain;
 627        struct iommu_resv_region *entry;
 628        struct list_head mappings;
 629        unsigned long pg_size;
 630        int ret = 0;
 631
 632        if (!domain || domain->type != IOMMU_DOMAIN_DMA)
 633                return 0;
 634
 635        BUG_ON(!domain->pgsize_bitmap);
 636
 637        pg_size = 1UL << __ffs(domain->pgsize_bitmap);
 638        INIT_LIST_HEAD(&mappings);
 639
 640        iommu_get_resv_regions(dev, &mappings);
 641
 642        /* We need to consider overlapping regions for different devices */
 643        list_for_each_entry(entry, &mappings, list) {
 644                dma_addr_t start, end, addr;
 645
 646                if (domain->ops->apply_resv_region)
 647                        domain->ops->apply_resv_region(dev, domain, entry);
 648
 649                start = ALIGN(entry->start, pg_size);
 650                end   = ALIGN(entry->start + entry->length, pg_size);
 651
 652                if (entry->type != IOMMU_RESV_DIRECT &&
 653                    entry->type != IOMMU_RESV_DIRECT_RELAXABLE)
 654                        continue;
 655
 656                for (addr = start; addr < end; addr += pg_size) {
 657                        phys_addr_t phys_addr;
 658
 659                        phys_addr = iommu_iova_to_phys(domain, addr);
 660                        if (phys_addr)
 661                                continue;
 662
 663                        ret = iommu_map(domain, addr, addr, pg_size, entry->prot);
 664                        if (ret)
 665                                goto out;
 666                }
 667
 668        }
 669
 670        iommu_flush_tlb_all(domain);
 671
 672out:
 673        iommu_put_resv_regions(dev, &mappings);
 674
 675        return ret;
 676}
 677
 678/**
 679 * iommu_group_add_device - add a device to an iommu group
 680 * @group: the group into which to add the device (reference should be held)
 681 * @dev: the device
 682 *
 683 * This function is called by an iommu driver to add a device into a
 684 * group.  Adding a device increments the group reference count.
 685 */
 686int iommu_group_add_device(struct iommu_group *group, struct device *dev)
 687{
 688        int ret, i = 0;
 689        struct group_device *device;
 690
 691        device = kzalloc(sizeof(*device), GFP_KERNEL);
 692        if (!device)
 693                return -ENOMEM;
 694
 695        device->dev = dev;
 696
 697        ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
 698        if (ret)
 699                goto err_free_device;
 700
 701        device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
 702rename:
 703        if (!device->name) {
 704                ret = -ENOMEM;
 705                goto err_remove_link;
 706        }
 707
 708        ret = sysfs_create_link_nowarn(group->devices_kobj,
 709                                       &dev->kobj, device->name);
 710        if (ret) {
 711                if (ret == -EEXIST && i >= 0) {
 712                        /*
 713                         * Account for the slim chance of collision
 714                         * and append an instance to the name.
 715                         */
 716                        kfree(device->name);
 717                        device->name = kasprintf(GFP_KERNEL, "%s.%d",
 718                                                 kobject_name(&dev->kobj), i++);
 719                        goto rename;
 720                }
 721                goto err_free_name;
 722        }
 723
 724        kobject_get(group->devices_kobj);
 725
 726        dev->iommu_group = group;
 727
 728        iommu_group_create_direct_mappings(group, dev);
 729
 730        mutex_lock(&group->mutex);
 731        list_add_tail(&device->list, &group->devices);
 732        if (group->domain)
 733                ret = __iommu_attach_device(group->domain, dev);
 734        mutex_unlock(&group->mutex);
 735        if (ret)
 736                goto err_put_group;
 737
 738        /* Notify any listeners about change to group. */
 739        blocking_notifier_call_chain(&group->notifier,
 740                                     IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
 741
 742        trace_add_device_to_group(group->id, dev);
 743
 744        dev_info(dev, "Adding to iommu group %d\n", group->id);
 745
 746        return 0;
 747
 748err_put_group:
 749        mutex_lock(&group->mutex);
 750        list_del(&device->list);
 751        mutex_unlock(&group->mutex);
 752        dev->iommu_group = NULL;
 753        kobject_put(group->devices_kobj);
 754        sysfs_remove_link(group->devices_kobj, device->name);
 755err_free_name:
 756        kfree(device->name);
 757err_remove_link:
 758        sysfs_remove_link(&dev->kobj, "iommu_group");
 759err_free_device:
 760        kfree(device);
 761        dev_err(dev, "Failed to add to iommu group %d: %d\n", group->id, ret);
 762        return ret;
 763}
 764EXPORT_SYMBOL_GPL(iommu_group_add_device);
 765
 766/**
 767 * iommu_group_remove_device - remove a device from it's current group
 768 * @dev: device to be removed
 769 *
 770 * This function is called by an iommu driver to remove the device from
 771 * it's current group.  This decrements the iommu group reference count.
 772 */
 773void iommu_group_remove_device(struct device *dev)
 774{
 775        struct iommu_group *group = dev->iommu_group;
 776        struct group_device *tmp_device, *device = NULL;
 777
 778        dev_info(dev, "Removing from iommu group %d\n", group->id);
 779
 780        /* Pre-notify listeners that a device is being removed. */
 781        blocking_notifier_call_chain(&group->notifier,
 782                                     IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
 783
 784        mutex_lock(&group->mutex);
 785        list_for_each_entry(tmp_device, &group->devices, list) {
 786                if (tmp_device->dev == dev) {
 787                        device = tmp_device;
 788                        list_del(&device->list);
 789                        break;
 790                }
 791        }
 792        mutex_unlock(&group->mutex);
 793
 794        if (!device)
 795                return;
 796
 797        sysfs_remove_link(group->devices_kobj, device->name);
 798        sysfs_remove_link(&dev->kobj, "iommu_group");
 799
 800        trace_remove_device_from_group(group->id, dev);
 801
 802        kfree(device->name);
 803        kfree(device);
 804        dev->iommu_group = NULL;
 805        kobject_put(group->devices_kobj);
 806}
 807EXPORT_SYMBOL_GPL(iommu_group_remove_device);
 808
 809static int iommu_group_device_count(struct iommu_group *group)
 810{
 811        struct group_device *entry;
 812        int ret = 0;
 813
 814        list_for_each_entry(entry, &group->devices, list)
 815                ret++;
 816
 817        return ret;
 818}
 819
 820/**
 821 * iommu_group_for_each_dev - iterate over each device in the group
 822 * @group: the group
 823 * @data: caller opaque data to be passed to callback function
 824 * @fn: caller supplied callback function
 825 *
 826 * This function is called by group users to iterate over group devices.
 827 * Callers should hold a reference count to the group during callback.
 828 * The group->mutex is held across callbacks, which will block calls to
 829 * iommu_group_add/remove_device.
 830 */
 831static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
 832                                      int (*fn)(struct device *, void *))
 833{
 834        struct group_device *device;
 835        int ret = 0;
 836
 837        list_for_each_entry(device, &group->devices, list) {
 838                ret = fn(device->dev, data);
 839                if (ret)
 840                        break;
 841        }
 842        return ret;
 843}
 844
 845
 846int iommu_group_for_each_dev(struct iommu_group *group, void *data,
 847                             int (*fn)(struct device *, void *))
 848{
 849        int ret;
 850
 851        mutex_lock(&group->mutex);
 852        ret = __iommu_group_for_each_dev(group, data, fn);
 853        mutex_unlock(&group->mutex);
 854
 855        return ret;
 856}
 857EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
 858
 859/**
 860 * iommu_group_get - Return the group for a device and increment reference
 861 * @dev: get the group that this device belongs to
 862 *
 863 * This function is called by iommu drivers and users to get the group
 864 * for the specified device.  If found, the group is returned and the group
 865 * reference in incremented, else NULL.
 866 */
 867struct iommu_group *iommu_group_get(struct device *dev)
 868{
 869        struct iommu_group *group = dev->iommu_group;
 870
 871        if (group)
 872                kobject_get(group->devices_kobj);
 873
 874        return group;
 875}
 876EXPORT_SYMBOL_GPL(iommu_group_get);
 877
 878/**
 879 * iommu_group_ref_get - Increment reference on a group
 880 * @group: the group to use, must not be NULL
 881 *
 882 * This function is called by iommu drivers to take additional references on an
 883 * existing group.  Returns the given group for convenience.
 884 */
 885struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
 886{
 887        kobject_get(group->devices_kobj);
 888        return group;
 889}
 890
 891/**
 892 * iommu_group_put - Decrement group reference
 893 * @group: the group to use
 894 *
 895 * This function is called by iommu drivers and users to release the
 896 * iommu group.  Once the reference count is zero, the group is released.
 897 */
 898void iommu_group_put(struct iommu_group *group)
 899{
 900        if (group)
 901                kobject_put(group->devices_kobj);
 902}
 903EXPORT_SYMBOL_GPL(iommu_group_put);
 904
 905/**
 906 * iommu_group_register_notifier - Register a notifier for group changes
 907 * @group: the group to watch
 908 * @nb: notifier block to signal
 909 *
 910 * This function allows iommu group users to track changes in a group.
 911 * See include/linux/iommu.h for actions sent via this notifier.  Caller
 912 * should hold a reference to the group throughout notifier registration.
 913 */
 914int iommu_group_register_notifier(struct iommu_group *group,
 915                                  struct notifier_block *nb)
 916{
 917        return blocking_notifier_chain_register(&group->notifier, nb);
 918}
 919EXPORT_SYMBOL_GPL(iommu_group_register_notifier);
 920
 921/**
 922 * iommu_group_unregister_notifier - Unregister a notifier
 923 * @group: the group to watch
 924 * @nb: notifier block to signal
 925 *
 926 * Unregister a previously registered group notifier block.
 927 */
 928int iommu_group_unregister_notifier(struct iommu_group *group,
 929                                    struct notifier_block *nb)
 930{
 931        return blocking_notifier_chain_unregister(&group->notifier, nb);
 932}
 933EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
 934
 935/**
 936 * iommu_register_device_fault_handler() - Register a device fault handler
 937 * @dev: the device
 938 * @handler: the fault handler
 939 * @data: private data passed as argument to the handler
 940 *
 941 * When an IOMMU fault event is received, this handler gets called with the
 942 * fault event and data as argument. The handler should return 0 on success. If
 943 * the fault is recoverable (IOMMU_FAULT_PAGE_REQ), the consumer should also
 944 * complete the fault by calling iommu_page_response() with one of the following
 945 * response code:
 946 * - IOMMU_PAGE_RESP_SUCCESS: retry the translation
 947 * - IOMMU_PAGE_RESP_INVALID: terminate the fault
 948 * - IOMMU_PAGE_RESP_FAILURE: terminate the fault and stop reporting
 949 *   page faults if possible.
 950 *
 951 * Return 0 if the fault handler was installed successfully, or an error.
 952 */
 953int iommu_register_device_fault_handler(struct device *dev,
 954                                        iommu_dev_fault_handler_t handler,
 955                                        void *data)
 956{
 957        struct iommu_param *param = dev->iommu_param;
 958        int ret = 0;
 959
 960        if (!param)
 961                return -EINVAL;
 962
 963        mutex_lock(&param->lock);
 964        /* Only allow one fault handler registered for each device */
 965        if (param->fault_param) {
 966                ret = -EBUSY;
 967                goto done_unlock;
 968        }
 969
 970        get_device(dev);
 971        param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL);
 972        if (!param->fault_param) {
 973                put_device(dev);
 974                ret = -ENOMEM;
 975                goto done_unlock;
 976        }
 977        param->fault_param->handler = handler;
 978        param->fault_param->data = data;
 979        mutex_init(&param->fault_param->lock);
 980        INIT_LIST_HEAD(&param->fault_param->faults);
 981
 982done_unlock:
 983        mutex_unlock(&param->lock);
 984
 985        return ret;
 986}
 987EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler);
 988
 989/**
 990 * iommu_unregister_device_fault_handler() - Unregister the device fault handler
 991 * @dev: the device
 992 *
 993 * Remove the device fault handler installed with
 994 * iommu_register_device_fault_handler().
 995 *
 996 * Return 0 on success, or an error.
 997 */
 998int iommu_unregister_device_fault_handler(struct device *dev)
 999{
1000        struct iommu_param *param = dev->iommu_param;
1001        int ret = 0;
1002
1003        if (!param)
1004                return -EINVAL;
1005
1006        mutex_lock(&param->lock);
1007
1008        if (!param->fault_param)
1009                goto unlock;
1010
1011        /* we cannot unregister handler if there are pending faults */
1012        if (!list_empty(&param->fault_param->faults)) {
1013                ret = -EBUSY;
1014                goto unlock;
1015        }
1016
1017        kfree(param->fault_param);
1018        param->fault_param = NULL;
1019        put_device(dev);
1020unlock:
1021        mutex_unlock(&param->lock);
1022
1023        return ret;
1024}
1025EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler);
1026
1027/**
1028 * iommu_report_device_fault() - Report fault event to device driver
1029 * @dev: the device
1030 * @evt: fault event data
1031 *
1032 * Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
1033 * handler. When this function fails and the fault is recoverable, it is the
1034 * caller's responsibility to complete the fault.
1035 *
1036 * Return 0 on success, or an error.
1037 */
1038int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
1039{
1040        struct iommu_param *param = dev->iommu_param;
1041        struct iommu_fault_event *evt_pending = NULL;
1042        struct iommu_fault_param *fparam;
1043        int ret = 0;
1044
1045        if (!param || !evt)
1046                return -EINVAL;
1047
1048        /* we only report device fault if there is a handler registered */
1049        mutex_lock(&param->lock);
1050        fparam = param->fault_param;
1051        if (!fparam || !fparam->handler) {
1052                ret = -EINVAL;
1053                goto done_unlock;
1054        }
1055
1056        if (evt->fault.type == IOMMU_FAULT_PAGE_REQ &&
1057            (evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
1058                evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event),
1059                                      GFP_KERNEL);
1060                if (!evt_pending) {
1061                        ret = -ENOMEM;
1062                        goto done_unlock;
1063                }
1064                mutex_lock(&fparam->lock);
1065                list_add_tail(&evt_pending->list, &fparam->faults);
1066                mutex_unlock(&fparam->lock);
1067        }
1068
1069        ret = fparam->handler(&evt->fault, fparam->data);
1070        if (ret && evt_pending) {
1071                mutex_lock(&fparam->lock);
1072                list_del(&evt_pending->list);
1073                mutex_unlock(&fparam->lock);
1074                kfree(evt_pending);
1075        }
1076done_unlock:
1077        mutex_unlock(&param->lock);
1078        return ret;
1079}
1080EXPORT_SYMBOL_GPL(iommu_report_device_fault);
1081
1082int iommu_page_response(struct device *dev,
1083                        struct iommu_page_response *msg)
1084{
1085        bool pasid_valid;
1086        int ret = -EINVAL;
1087        struct iommu_fault_event *evt;
1088        struct iommu_fault_page_request *prm;
1089        struct iommu_param *param = dev->iommu_param;
1090        struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1091
1092        if (!domain || !domain->ops->page_response)
1093                return -ENODEV;
1094
1095        if (!param || !param->fault_param)
1096                return -EINVAL;
1097
1098        if (msg->version != IOMMU_PAGE_RESP_VERSION_1 ||
1099            msg->flags & ~IOMMU_PAGE_RESP_PASID_VALID)
1100                return -EINVAL;
1101
1102        /* Only send response if there is a fault report pending */
1103        mutex_lock(&param->fault_param->lock);
1104        if (list_empty(&param->fault_param->faults)) {
1105                dev_warn_ratelimited(dev, "no pending PRQ, drop response\n");
1106                goto done_unlock;
1107        }
1108        /*
1109         * Check if we have a matching page request pending to respond,
1110         * otherwise return -EINVAL
1111         */
1112        list_for_each_entry(evt, &param->fault_param->faults, list) {
1113                prm = &evt->fault.prm;
1114                pasid_valid = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
1115
1116                if ((pasid_valid && prm->pasid != msg->pasid) ||
1117                    prm->grpid != msg->grpid)
1118                        continue;
1119
1120                /* Sanitize the reply */
1121                msg->flags = pasid_valid ? IOMMU_PAGE_RESP_PASID_VALID : 0;
1122
1123                ret = domain->ops->page_response(dev, evt, msg);
1124                list_del(&evt->list);
1125                kfree(evt);
1126                break;
1127        }
1128
1129done_unlock:
1130        mutex_unlock(&param->fault_param->lock);
1131        return ret;
1132}
1133EXPORT_SYMBOL_GPL(iommu_page_response);
1134
1135/**
1136 * iommu_group_id - Return ID for a group
1137 * @group: the group to ID
1138 *
1139 * Return the unique ID for the group matching the sysfs group number.
1140 */
1141int iommu_group_id(struct iommu_group *group)
1142{
1143        return group->id;
1144}
1145EXPORT_SYMBOL_GPL(iommu_group_id);
1146
1147static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1148                                               unsigned long *devfns);
1149
1150/*
1151 * To consider a PCI device isolated, we require ACS to support Source
1152 * Validation, Request Redirection, Completer Redirection, and Upstream
1153 * Forwarding.  This effectively means that devices cannot spoof their
1154 * requester ID, requests and completions cannot be redirected, and all
1155 * transactions are forwarded upstream, even as it passes through a
1156 * bridge where the target device is downstream.
1157 */
1158#define REQ_ACS_FLAGS   (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
1159
1160/*
1161 * For multifunction devices which are not isolated from each other, find
1162 * all the other non-isolated functions and look for existing groups.  For
1163 * each function, we also need to look for aliases to or from other devices
1164 * that may already have a group.
1165 */
1166static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev,
1167                                                        unsigned long *devfns)
1168{
1169        struct pci_dev *tmp = NULL;
1170        struct iommu_group *group;
1171
1172        if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
1173                return NULL;
1174
1175        for_each_pci_dev(tmp) {
1176                if (tmp == pdev || tmp->bus != pdev->bus ||
1177                    PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
1178                    pci_acs_enabled(tmp, REQ_ACS_FLAGS))
1179                        continue;
1180
1181                group = get_pci_alias_group(tmp, devfns);
1182                if (group) {
1183                        pci_dev_put(tmp);
1184                        return group;
1185                }
1186        }
1187
1188        return NULL;
1189}
1190
1191/*
1192 * Look for aliases to or from the given device for existing groups. DMA
1193 * aliases are only supported on the same bus, therefore the search
1194 * space is quite small (especially since we're really only looking at pcie
1195 * device, and therefore only expect multiple slots on the root complex or
1196 * downstream switch ports).  It's conceivable though that a pair of
1197 * multifunction devices could have aliases between them that would cause a
1198 * loop.  To prevent this, we use a bitmap to track where we've been.
1199 */
1200static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
1201                                               unsigned long *devfns)
1202{
1203        struct pci_dev *tmp = NULL;
1204        struct iommu_group *group;
1205
1206        if (test_and_set_bit(pdev->devfn & 0xff, devfns))
1207                return NULL;
1208
1209        group = iommu_group_get(&pdev->dev);
1210        if (group)
1211                return group;
1212
1213        for_each_pci_dev(tmp) {
1214                if (tmp == pdev || tmp->bus != pdev->bus)
1215                        continue;
1216
1217                /* We alias them or they alias us */
1218                if (pci_devs_are_dma_aliases(pdev, tmp)) {
1219                        group = get_pci_alias_group(tmp, devfns);
1220                        if (group) {
1221                                pci_dev_put(tmp);
1222                                return group;
1223                        }
1224
1225                        group = get_pci_function_alias_group(tmp, devfns);
1226                        if (group) {
1227                                pci_dev_put(tmp);
1228                                return group;
1229                        }
1230                }
1231        }
1232
1233        return NULL;
1234}
1235
1236struct group_for_pci_data {
1237        struct pci_dev *pdev;
1238        struct iommu_group *group;
1239};
1240
1241/*
1242 * DMA alias iterator callback, return the last seen device.  Stop and return
1243 * the IOMMU group if we find one along the way.
1244 */
1245static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque)
1246{
1247        struct group_for_pci_data *data = opaque;
1248
1249        data->pdev = pdev;
1250        data->group = iommu_group_get(&pdev->dev);
1251
1252        return data->group != NULL;
1253}
1254
1255/*
1256 * Generic device_group call-back function. It just allocates one
1257 * iommu-group per device.
1258 */
1259struct iommu_group *generic_device_group(struct device *dev)
1260{
1261        return iommu_group_alloc();
1262}
1263
1264/*
1265 * Use standard PCI bus topology, isolation features, and DMA alias quirks
1266 * to find or create an IOMMU group for a device.
1267 */
1268struct iommu_group *pci_device_group(struct device *dev)
1269{
1270        struct pci_dev *pdev = to_pci_dev(dev);
1271        struct group_for_pci_data data;
1272        struct pci_bus *bus;
1273        struct iommu_group *group = NULL;
1274        u64 devfns[4] = { 0 };
1275
1276        if (WARN_ON(!dev_is_pci(dev)))
1277                return ERR_PTR(-EINVAL);
1278
1279        /*
1280         * Find the upstream DMA alias for the device.  A device must not
1281         * be aliased due to topology in order to have its own IOMMU group.
1282         * If we find an alias along the way that already belongs to a
1283         * group, use it.
1284         */
1285        if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data))
1286                return data.group;
1287
1288        pdev = data.pdev;
1289
1290        /*
1291         * Continue upstream from the point of minimum IOMMU granularity
1292         * due to aliases to the point where devices are protected from
1293         * peer-to-peer DMA by PCI ACS.  Again, if we find an existing
1294         * group, use it.
1295         */
1296        for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
1297                if (!bus->self)
1298                        continue;
1299
1300                if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
1301                        break;
1302
1303                pdev = bus->self;
1304
1305                group = iommu_group_get(&pdev->dev);
1306                if (group)
1307                        return group;
1308        }
1309
1310        /*
1311         * Look for existing groups on device aliases.  If we alias another
1312         * device or another device aliases us, use the same group.
1313         */
1314        group = get_pci_alias_group(pdev, (unsigned long *)devfns);
1315        if (group)
1316                return group;
1317
1318        /*
1319         * Look for existing groups on non-isolated functions on the same
1320         * slot and aliases of those funcions, if any.  No need to clear
1321         * the search bitmap, the tested devfns are still valid.
1322         */
1323        group = get_pci_function_alias_group(pdev, (unsigned long *)devfns);
1324        if (group)
1325                return group;
1326
1327        /* No shared group found, allocate new */
1328        return iommu_group_alloc();
1329}
1330
1331/* Get the IOMMU group for device on fsl-mc bus */
1332struct iommu_group *fsl_mc_device_group(struct device *dev)
1333{
1334        struct device *cont_dev = fsl_mc_cont_dev(dev);
1335        struct iommu_group *group;
1336
1337        group = iommu_group_get(cont_dev);
1338        if (!group)
1339                group = iommu_group_alloc();
1340        return group;
1341}
1342
1343/**
1344 * iommu_group_get_for_dev - Find or create the IOMMU group for a device
1345 * @dev: target device
1346 *
1347 * This function is intended to be called by IOMMU drivers and extended to
1348 * support common, bus-defined algorithms when determining or creating the
1349 * IOMMU group for a device.  On success, the caller will hold a reference
1350 * to the returned IOMMU group, which will already include the provided
1351 * device.  The reference should be released with iommu_group_put().
1352 */
1353struct iommu_group *iommu_group_get_for_dev(struct device *dev)
1354{
1355        const struct iommu_ops *ops = dev->bus->iommu_ops;
1356        struct iommu_group *group;
1357        int ret;
1358
1359        group = iommu_group_get(dev);
1360        if (group)
1361                return group;
1362
1363        if (!ops)
1364                return ERR_PTR(-EINVAL);
1365
1366        group = ops->device_group(dev);
1367        if (WARN_ON_ONCE(group == NULL))
1368                return ERR_PTR(-EINVAL);
1369
1370        if (IS_ERR(group))
1371                return group;
1372
1373        /*
1374         * Try to allocate a default domain - needs support from the
1375         * IOMMU driver.
1376         */
1377        if (!group->default_domain) {
1378                struct iommu_domain *dom;
1379
1380                dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type);
1381                if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) {
1382                        dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA);
1383                        if (dom) {
1384                                dev_warn(dev,
1385                                         "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
1386                                         iommu_def_domain_type);
1387                        }
1388                }
1389
1390                group->default_domain = dom;
1391                if (!group->domain)
1392                        group->domain = dom;
1393
1394                if (dom && !iommu_dma_strict) {
1395                        int attr = 1;
1396                        iommu_domain_set_attr(dom,
1397                                              DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
1398                                              &attr);
1399                }
1400        }
1401
1402        ret = iommu_group_add_device(group, dev);
1403        if (ret) {
1404                iommu_group_put(group);
1405                return ERR_PTR(ret);
1406        }
1407
1408        return group;
1409}
1410
1411struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
1412{
1413        return group->default_domain;
1414}
1415
1416static int add_iommu_group(struct device *dev, void *data)
1417{
1418        int ret = iommu_probe_device(dev);
1419
1420        /*
1421         * We ignore -ENODEV errors for now, as they just mean that the
1422         * device is not translated by an IOMMU. We still care about
1423         * other errors and fail to initialize when they happen.
1424         */
1425        if (ret == -ENODEV)
1426                ret = 0;
1427
1428        return ret;
1429}
1430
1431static int remove_iommu_group(struct device *dev, void *data)
1432{
1433        iommu_release_device(dev);
1434
1435        return 0;
1436}
1437
1438static int iommu_bus_notifier(struct notifier_block *nb,
1439                              unsigned long action, void *data)
1440{
1441        unsigned long group_action = 0;
1442        struct device *dev = data;
1443        struct iommu_group *group;
1444
1445        /*
1446         * ADD/DEL call into iommu driver ops if provided, which may
1447         * result in ADD/DEL notifiers to group->notifier
1448         */
1449        if (action == BUS_NOTIFY_ADD_DEVICE) {
1450                int ret;
1451
1452                ret = iommu_probe_device(dev);
1453                return (ret) ? NOTIFY_DONE : NOTIFY_OK;
1454        } else if (action == BUS_NOTIFY_REMOVED_DEVICE) {
1455                iommu_release_device(dev);
1456                return NOTIFY_OK;
1457        }
1458
1459        /*
1460         * Remaining BUS_NOTIFYs get filtered and republished to the
1461         * group, if anyone is listening
1462         */
1463        group = iommu_group_get(dev);
1464        if (!group)
1465                return 0;
1466
1467        switch (action) {
1468        case BUS_NOTIFY_BIND_DRIVER:
1469                group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
1470                break;
1471        case BUS_NOTIFY_BOUND_DRIVER:
1472                group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
1473                break;
1474        case BUS_NOTIFY_UNBIND_DRIVER:
1475                group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
1476                break;
1477        case BUS_NOTIFY_UNBOUND_DRIVER:
1478                group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
1479                break;
1480        }
1481
1482        if (group_action)
1483                blocking_notifier_call_chain(&group->notifier,
1484                                             group_action, dev);
1485
1486        iommu_group_put(group);
1487        return 0;
1488}
1489
1490static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
1491{
1492        int err;
1493        struct notifier_block *nb;
1494
1495        nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
1496        if (!nb)
1497                return -ENOMEM;
1498
1499        nb->notifier_call = iommu_bus_notifier;
1500
1501        err = bus_register_notifier(bus, nb);
1502        if (err)
1503                goto out_free;
1504
1505        err = bus_for_each_dev(bus, NULL, NULL, add_iommu_group);
1506        if (err)
1507                goto out_err;
1508
1509
1510        return 0;
1511
1512out_err:
1513        /* Clean up */
1514        bus_for_each_dev(bus, NULL, NULL, remove_iommu_group);
1515        bus_unregister_notifier(bus, nb);
1516
1517out_free:
1518        kfree(nb);
1519
1520        return err;
1521}
1522
1523/**
1524 * bus_set_iommu - set iommu-callbacks for the bus
1525 * @bus: bus.
1526 * @ops: the callbacks provided by the iommu-driver
1527 *
1528 * This function is called by an iommu driver to set the iommu methods
1529 * used for a particular bus. Drivers for devices on that bus can use
1530 * the iommu-api after these ops are registered.
1531 * This special function is needed because IOMMUs are usually devices on
1532 * the bus itself, so the iommu drivers are not initialized when the bus
1533 * is set up. With this function the iommu-driver can set the iommu-ops
1534 * afterwards.
1535 */
1536int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
1537{
1538        int err;
1539
1540        if (bus->iommu_ops != NULL)
1541                return -EBUSY;
1542
1543        bus->iommu_ops = ops;
1544
1545        /* Do IOMMU specific setup for this bus-type */
1546        err = iommu_bus_init(bus, ops);
1547        if (err)
1548                bus->iommu_ops = NULL;
1549
1550        return err;
1551}
1552EXPORT_SYMBOL_GPL(bus_set_iommu);
1553
1554bool iommu_present(struct bus_type *bus)
1555{
1556        return bus->iommu_ops != NULL;
1557}
1558EXPORT_SYMBOL_GPL(iommu_present);
1559
1560bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
1561{
1562        if (!bus->iommu_ops || !bus->iommu_ops->capable)
1563                return false;
1564
1565        return bus->iommu_ops->capable(cap);
1566}
1567EXPORT_SYMBOL_GPL(iommu_capable);
1568
1569/**
1570 * iommu_set_fault_handler() - set a fault handler for an iommu domain
1571 * @domain: iommu domain
1572 * @handler: fault handler
1573 * @token: user data, will be passed back to the fault handler
1574 *
1575 * This function should be used by IOMMU users which want to be notified
1576 * whenever an IOMMU fault happens.
1577 *
1578 * The fault handler itself should return 0 on success, and an appropriate
1579 * error code otherwise.
1580 */
1581void iommu_set_fault_handler(struct iommu_domain *domain,
1582                                        iommu_fault_handler_t handler,
1583                                        void *token)
1584{
1585        BUG_ON(!domain);
1586
1587        domain->handler = handler;
1588        domain->handler_token = token;
1589}
1590EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
1591
1592static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
1593                                                 unsigned type)
1594{
1595        struct iommu_domain *domain;
1596
1597        if (bus == NULL || bus->iommu_ops == NULL)
1598                return NULL;
1599
1600        domain = bus->iommu_ops->domain_alloc(type);
1601        if (!domain)
1602                return NULL;
1603
1604        domain->ops  = bus->iommu_ops;
1605        domain->type = type;
1606        /* Assume all sizes by default; the driver may override this later */
1607        domain->pgsize_bitmap  = bus->iommu_ops->pgsize_bitmap;
1608
1609        return domain;
1610}
1611
1612struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
1613{
1614        return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED);
1615}
1616EXPORT_SYMBOL_GPL(iommu_domain_alloc);
1617
1618void iommu_domain_free(struct iommu_domain *domain)
1619{
1620        domain->ops->domain_free(domain);
1621}
1622EXPORT_SYMBOL_GPL(iommu_domain_free);
1623
1624static int __iommu_attach_device(struct iommu_domain *domain,
1625                                 struct device *dev)
1626{
1627        int ret;
1628        if ((domain->ops->is_attach_deferred != NULL) &&
1629            domain->ops->is_attach_deferred(domain, dev))
1630                return 0;
1631
1632        if (unlikely(domain->ops->attach_dev == NULL))
1633                return -ENODEV;
1634
1635        ret = domain->ops->attach_dev(domain, dev);
1636        if (!ret)
1637                trace_attach_device_to_domain(dev);
1638        return ret;
1639}
1640
1641int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
1642{
1643        struct iommu_group *group;
1644        int ret;
1645
1646        group = iommu_group_get(dev);
1647        if (!group)
1648                return -ENODEV;
1649
1650        /*
1651         * Lock the group to make sure the device-count doesn't
1652         * change while we are attaching
1653         */
1654        mutex_lock(&group->mutex);
1655        ret = -EINVAL;
1656        if (iommu_group_device_count(group) != 1)
1657                goto out_unlock;
1658
1659        ret = __iommu_attach_group(domain, group);
1660
1661out_unlock:
1662        mutex_unlock(&group->mutex);
1663        iommu_group_put(group);
1664
1665        return ret;
1666}
1667EXPORT_SYMBOL_GPL(iommu_attach_device);
1668
1669int iommu_cache_invalidate(struct iommu_domain *domain, struct device *dev,
1670                           struct iommu_cache_invalidate_info *inv_info)
1671{
1672        if (unlikely(!domain->ops->cache_invalidate))
1673                return -ENODEV;
1674
1675        return domain->ops->cache_invalidate(domain, dev, inv_info);
1676}
1677EXPORT_SYMBOL_GPL(iommu_cache_invalidate);
1678
1679int iommu_sva_bind_gpasid(struct iommu_domain *domain,
1680                           struct device *dev, struct iommu_gpasid_bind_data *data)
1681{
1682        if (unlikely(!domain->ops->sva_bind_gpasid))
1683                return -ENODEV;
1684
1685        return domain->ops->sva_bind_gpasid(domain, dev, data);
1686}
1687EXPORT_SYMBOL_GPL(iommu_sva_bind_gpasid);
1688
1689int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
1690                             ioasid_t pasid)
1691{
1692        if (unlikely(!domain->ops->sva_unbind_gpasid))
1693                return -ENODEV;
1694
1695        return domain->ops->sva_unbind_gpasid(dev, pasid);
1696}
1697EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid);
1698
1699static void __iommu_detach_device(struct iommu_domain *domain,
1700                                  struct device *dev)
1701{
1702        if ((domain->ops->is_attach_deferred != NULL) &&
1703            domain->ops->is_attach_deferred(domain, dev))
1704                return;
1705
1706        if (unlikely(domain->ops->detach_dev == NULL))
1707                return;
1708
1709        domain->ops->detach_dev(domain, dev);
1710        trace_detach_device_from_domain(dev);
1711}
1712
1713void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
1714{
1715        struct iommu_group *group;
1716
1717        group = iommu_group_get(dev);
1718        if (!group)
1719                return;
1720
1721        mutex_lock(&group->mutex);
1722        if (iommu_group_device_count(group) != 1) {
1723                WARN_ON(1);
1724                goto out_unlock;
1725        }
1726
1727        __iommu_detach_group(domain, group);
1728
1729out_unlock:
1730        mutex_unlock(&group->mutex);
1731        iommu_group_put(group);
1732}
1733EXPORT_SYMBOL_GPL(iommu_detach_device);
1734
1735struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
1736{
1737        struct iommu_domain *domain;
1738        struct iommu_group *group;
1739
1740        group = iommu_group_get(dev);
1741        if (!group)
1742                return NULL;
1743
1744        domain = group->domain;
1745
1746        iommu_group_put(group);
1747
1748        return domain;
1749}
1750EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
1751
1752/*
1753 * For IOMMU_DOMAIN_DMA implementations which already provide their own
1754 * guarantees that the group and its default domain are valid and correct.
1755 */
1756struct iommu_domain *iommu_get_dma_domain(struct device *dev)
1757{
1758        return dev->iommu_group->default_domain;
1759}
1760
1761/*
1762 * IOMMU groups are really the natural working unit of the IOMMU, but
1763 * the IOMMU API works on domains and devices.  Bridge that gap by
1764 * iterating over the devices in a group.  Ideally we'd have a single
1765 * device which represents the requestor ID of the group, but we also
1766 * allow IOMMU drivers to create policy defined minimum sets, where
1767 * the physical hardware may be able to distiguish members, but we
1768 * wish to group them at a higher level (ex. untrusted multi-function
1769 * PCI devices).  Thus we attach each device.
1770 */
1771static int iommu_group_do_attach_device(struct device *dev, void *data)
1772{
1773        struct iommu_domain *domain = data;
1774
1775        return __iommu_attach_device(domain, dev);
1776}
1777
1778static int __iommu_attach_group(struct iommu_domain *domain,
1779                                struct iommu_group *group)
1780{
1781        int ret;
1782
1783        if (group->default_domain && group->domain != group->default_domain)
1784                return -EBUSY;
1785
1786        ret = __iommu_group_for_each_dev(group, domain,
1787                                         iommu_group_do_attach_device);
1788        if (ret == 0)
1789                group->domain = domain;
1790
1791        return ret;
1792}
1793
1794int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
1795{
1796        int ret;
1797
1798        mutex_lock(&group->mutex);
1799        ret = __iommu_attach_group(domain, group);
1800        mutex_unlock(&group->mutex);
1801
1802        return ret;
1803}
1804EXPORT_SYMBOL_GPL(iommu_attach_group);
1805
1806static int iommu_group_do_detach_device(struct device *dev, void *data)
1807{
1808        struct iommu_domain *domain = data;
1809
1810        __iommu_detach_device(domain, dev);
1811
1812        return 0;
1813}
1814
1815static void __iommu_detach_group(struct iommu_domain *domain,
1816                                 struct iommu_group *group)
1817{
1818        int ret;
1819
1820        if (!group->default_domain) {
1821                __iommu_group_for_each_dev(group, domain,
1822                                           iommu_group_do_detach_device);
1823                group->domain = NULL;
1824                return;
1825        }
1826
1827        if (group->domain == group->default_domain)
1828                return;
1829
1830        /* Detach by re-attaching to the default domain */
1831        ret = __iommu_group_for_each_dev(group, group->default_domain,
1832                                         iommu_group_do_attach_device);
1833        if (ret != 0)
1834                WARN_ON(1);
1835        else
1836                group->domain = group->default_domain;
1837}
1838
1839void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
1840{
1841        mutex_lock(&group->mutex);
1842        __iommu_detach_group(domain, group);
1843        mutex_unlock(&group->mutex);
1844}
1845EXPORT_SYMBOL_GPL(iommu_detach_group);
1846
1847phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1848{
1849        if (unlikely(domain->ops->iova_to_phys == NULL))
1850                return 0;
1851
1852        return domain->ops->iova_to_phys(domain, iova);
1853}
1854EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
1855
1856static size_t iommu_pgsize(struct iommu_domain *domain,
1857                           unsigned long addr_merge, size_t size)
1858{
1859        unsigned int pgsize_idx;
1860        size_t pgsize;
1861
1862        /* Max page size that still fits into 'size' */
1863        pgsize_idx = __fls(size);
1864
1865        /* need to consider alignment requirements ? */
1866        if (likely(addr_merge)) {
1867                /* Max page size allowed by address */
1868                unsigned int align_pgsize_idx = __ffs(addr_merge);
1869                pgsize_idx = min(pgsize_idx, align_pgsize_idx);
1870        }
1871
1872        /* build a mask of acceptable page sizes */
1873        pgsize = (1UL << (pgsize_idx + 1)) - 1;
1874
1875        /* throw away page sizes not supported by the hardware */
1876        pgsize &= domain->pgsize_bitmap;
1877
1878        /* make sure we're still sane */
1879        BUG_ON(!pgsize);
1880
1881        /* pick the biggest page */
1882        pgsize_idx = __fls(pgsize);
1883        pgsize = 1UL << pgsize_idx;
1884
1885        return pgsize;
1886}
1887
1888int __iommu_map(struct iommu_domain *domain, unsigned long iova,
1889              phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
1890{
1891        const struct iommu_ops *ops = domain->ops;
1892        unsigned long orig_iova = iova;
1893        unsigned int min_pagesz;
1894        size_t orig_size = size;
1895        phys_addr_t orig_paddr = paddr;
1896        int ret = 0;
1897
1898        if (unlikely(ops->map == NULL ||
1899                     domain->pgsize_bitmap == 0UL))
1900                return -ENODEV;
1901
1902        if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
1903                return -EINVAL;
1904
1905        /* find out the minimum page size supported */
1906        min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
1907
1908        /*
1909         * both the virtual address and the physical one, as well as
1910         * the size of the mapping, must be aligned (at least) to the
1911         * size of the smallest page supported by the hardware
1912         */
1913        if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
1914                pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
1915                       iova, &paddr, size, min_pagesz);
1916                return -EINVAL;
1917        }
1918
1919        pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
1920
1921        while (size) {
1922                size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
1923
1924                pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
1925                         iova, &paddr, pgsize);
1926                ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
1927
1928                if (ret)
1929                        break;
1930
1931                iova += pgsize;
1932                paddr += pgsize;
1933                size -= pgsize;
1934        }
1935
1936        if (ops->iotlb_sync_map)
1937                ops->iotlb_sync_map(domain);
1938
1939        /* unroll mapping in case something went wrong */
1940        if (ret)
1941                iommu_unmap(domain, orig_iova, orig_size - size);
1942        else
1943                trace_map(orig_iova, orig_paddr, orig_size);
1944
1945        return ret;
1946}
1947
1948int iommu_map(struct iommu_domain *domain, unsigned long iova,
1949              phys_addr_t paddr, size_t size, int prot)
1950{
1951        might_sleep();
1952        return __iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL);
1953}
1954EXPORT_SYMBOL_GPL(iommu_map);
1955
1956int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
1957              phys_addr_t paddr, size_t size, int prot)
1958{
1959        return __iommu_map(domain, iova, paddr, size, prot, GFP_ATOMIC);
1960}
1961EXPORT_SYMBOL_GPL(iommu_map_atomic);
1962
1963static size_t __iommu_unmap(struct iommu_domain *domain,
1964                            unsigned long iova, size_t size,
1965                            struct iommu_iotlb_gather *iotlb_gather)
1966{
1967        const struct iommu_ops *ops = domain->ops;
1968        size_t unmapped_page, unmapped = 0;
1969        unsigned long orig_iova = iova;
1970        unsigned int min_pagesz;
1971
1972        if (unlikely(ops->unmap == NULL ||
1973                     domain->pgsize_bitmap == 0UL))
1974                return 0;
1975
1976        if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
1977                return 0;
1978
1979        /* find out the minimum page size supported */
1980        min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
1981
1982        /*
1983         * The virtual address, as well as the size of the mapping, must be
1984         * aligned (at least) to the size of the smallest page supported
1985         * by the hardware
1986         */
1987        if (!IS_ALIGNED(iova | size, min_pagesz)) {
1988                pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
1989                       iova, size, min_pagesz);
1990                return 0;
1991        }
1992
1993        pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
1994
1995        /*
1996         * Keep iterating until we either unmap 'size' bytes (or more)
1997         * or we hit an area that isn't mapped.
1998         */
1999        while (unmapped < size) {
2000                size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
2001
2002                unmapped_page = ops->unmap(domain, iova, pgsize, iotlb_gather);
2003                if (!unmapped_page)
2004                        break;
2005
2006                pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
2007                         iova, unmapped_page);
2008
2009                iova += unmapped_page;
2010                unmapped += unmapped_page;
2011        }
2012
2013        trace_unmap(orig_iova, size, unmapped);
2014        return unmapped;
2015}
2016
2017size_t iommu_unmap(struct iommu_domain *domain,
2018                   unsigned long iova, size_t size)
2019{
2020        struct iommu_iotlb_gather iotlb_gather;
2021        size_t ret;
2022
2023        iommu_iotlb_gather_init(&iotlb_gather);
2024        ret = __iommu_unmap(domain, iova, size, &iotlb_gather);
2025        iommu_tlb_sync(domain, &iotlb_gather);
2026
2027        return ret;
2028}
2029EXPORT_SYMBOL_GPL(iommu_unmap);
2030
2031size_t iommu_unmap_fast(struct iommu_domain *domain,
2032                        unsigned long iova, size_t size,
2033                        struct iommu_iotlb_gather *iotlb_gather)
2034{
2035        return __iommu_unmap(domain, iova, size, iotlb_gather);
2036}
2037EXPORT_SYMBOL_GPL(iommu_unmap_fast);
2038
2039size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
2040                    struct scatterlist *sg, unsigned int nents, int prot,
2041                    gfp_t gfp)
2042{
2043        size_t len = 0, mapped = 0;
2044        phys_addr_t start;
2045        unsigned int i = 0;
2046        int ret;
2047
2048        while (i <= nents) {
2049                phys_addr_t s_phys = sg_phys(sg);
2050
2051                if (len && s_phys != start + len) {
2052                        ret = __iommu_map(domain, iova + mapped, start,
2053                                        len, prot, gfp);
2054
2055                        if (ret)
2056                                goto out_err;
2057
2058                        mapped += len;
2059                        len = 0;
2060                }
2061
2062                if (len) {
2063                        len += sg->length;
2064                } else {
2065                        len = sg->length;
2066                        start = s_phys;
2067                }
2068
2069                if (++i < nents)
2070                        sg = sg_next(sg);
2071        }
2072
2073        return mapped;
2074
2075out_err:
2076        /* undo mappings already done */
2077        iommu_unmap(domain, iova, mapped);
2078
2079        return 0;
2080
2081}
2082
2083size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
2084                    struct scatterlist *sg, unsigned int nents, int prot)
2085{
2086        might_sleep();
2087        return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_KERNEL);
2088}
2089EXPORT_SYMBOL_GPL(iommu_map_sg);
2090
2091size_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova,
2092                    struct scatterlist *sg, unsigned int nents, int prot)
2093{
2094        return __iommu_map_sg(domain, iova, sg, nents, prot, GFP_ATOMIC);
2095}
2096EXPORT_SYMBOL_GPL(iommu_map_sg_atomic);
2097
2098int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
2099                               phys_addr_t paddr, u64 size, int prot)
2100{
2101        if (unlikely(domain->ops->domain_window_enable == NULL))
2102                return -ENODEV;
2103
2104        return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size,
2105                                                 prot);
2106}
2107EXPORT_SYMBOL_GPL(iommu_domain_window_enable);
2108
2109void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
2110{
2111        if (unlikely(domain->ops->domain_window_disable == NULL))
2112                return;
2113
2114        return domain->ops->domain_window_disable(domain, wnd_nr);
2115}
2116EXPORT_SYMBOL_GPL(iommu_domain_window_disable);
2117
2118/**
2119 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
2120 * @domain: the iommu domain where the fault has happened
2121 * @dev: the device where the fault has happened
2122 * @iova: the faulting address
2123 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
2124 *
2125 * This function should be called by the low-level IOMMU implementations
2126 * whenever IOMMU faults happen, to allow high-level users, that are
2127 * interested in such events, to know about them.
2128 *
2129 * This event may be useful for several possible use cases:
2130 * - mere logging of the event
2131 * - dynamic TLB/PTE loading
2132 * - if restarting of the faulting device is required
2133 *
2134 * Returns 0 on success and an appropriate error code otherwise (if dynamic
2135 * PTE/TLB loading will one day be supported, implementations will be able
2136 * to tell whether it succeeded or not according to this return value).
2137 *
2138 * Specifically, -ENOSYS is returned if a fault handler isn't installed
2139 * (though fault handlers can also return -ENOSYS, in case they want to
2140 * elicit the default behavior of the IOMMU drivers).
2141 */
2142int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
2143                       unsigned long iova, int flags)
2144{
2145        int ret = -ENOSYS;
2146
2147        /*
2148         * if upper layers showed interest and installed a fault handler,
2149         * invoke it.
2150         */
2151        if (domain->handler)
2152                ret = domain->handler(domain, dev, iova, flags,
2153                                                domain->handler_token);
2154
2155        trace_io_page_fault(dev, iova, flags);
2156        return ret;
2157}
2158EXPORT_SYMBOL_GPL(report_iommu_fault);
2159
2160static int __init iommu_init(void)
2161{
2162        iommu_group_kset = kset_create_and_add("iommu_groups",
2163                                               NULL, kernel_kobj);
2164        BUG_ON(!iommu_group_kset);
2165
2166        iommu_debugfs_setup();
2167
2168        return 0;
2169}
2170core_initcall(iommu_init);
2171
2172int iommu_domain_get_attr(struct iommu_domain *domain,
2173                          enum iommu_attr attr, void *data)
2174{
2175        struct iommu_domain_geometry *geometry;
2176        bool *paging;
2177        int ret = 0;
2178
2179        switch (attr) {
2180        case DOMAIN_ATTR_GEOMETRY:
2181                geometry  = data;
2182                *geometry = domain->geometry;
2183
2184                break;
2185        case DOMAIN_ATTR_PAGING:
2186                paging  = data;
2187                *paging = (domain->pgsize_bitmap != 0UL);
2188                break;
2189        default:
2190                if (!domain->ops->domain_get_attr)
2191                        return -EINVAL;
2192
2193                ret = domain->ops->domain_get_attr(domain, attr, data);
2194        }
2195
2196        return ret;
2197}
2198EXPORT_SYMBOL_GPL(iommu_domain_get_attr);
2199
2200int iommu_domain_set_attr(struct iommu_domain *domain,
2201                          enum iommu_attr attr, void *data)
2202{
2203        int ret = 0;
2204
2205        switch (attr) {
2206        default:
2207                if (domain->ops->domain_set_attr == NULL)
2208                        return -EINVAL;
2209
2210                ret = domain->ops->domain_set_attr(domain, attr, data);
2211        }
2212
2213        return ret;
2214}
2215EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
2216
2217void iommu_get_resv_regions(struct device *dev, struct list_head *list)
2218{
2219        const struct iommu_ops *ops = dev->bus->iommu_ops;
2220
2221        if (ops && ops->get_resv_regions)
2222                ops->get_resv_regions(dev, list);
2223}
2224
2225void iommu_put_resv_regions(struct device *dev, struct list_head *list)
2226{
2227        const struct iommu_ops *ops = dev->bus->iommu_ops;
2228
2229        if (ops && ops->put_resv_regions)
2230                ops->put_resv_regions(dev, list);
2231}
2232
2233struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
2234                                                  size_t length, int prot,
2235                                                  enum iommu_resv_type type)
2236{
2237        struct iommu_resv_region *region;
2238
2239        region = kzalloc(sizeof(*region), GFP_KERNEL);
2240        if (!region)
2241                return NULL;
2242
2243        INIT_LIST_HEAD(&region->list);
2244        region->start = start;
2245        region->length = length;
2246        region->prot = prot;
2247        region->type = type;
2248        return region;
2249}
2250
2251static int
2252request_default_domain_for_dev(struct device *dev, unsigned long type)
2253{
2254        struct iommu_domain *domain;
2255        struct iommu_group *group;
2256        int ret;
2257
2258        /* Device must already be in a group before calling this function */
2259        group = iommu_group_get(dev);
2260        if (!group)
2261                return -EINVAL;
2262
2263        mutex_lock(&group->mutex);
2264
2265        ret = 0;
2266        if (group->default_domain && group->default_domain->type == type)
2267                goto out;
2268
2269        /* Don't change mappings of existing devices */
2270        ret = -EBUSY;
2271        if (iommu_group_device_count(group) != 1)
2272                goto out;
2273
2274        ret = -ENOMEM;
2275        domain = __iommu_domain_alloc(dev->bus, type);
2276        if (!domain)
2277                goto out;
2278
2279        /* Attach the device to the domain */
2280        ret = __iommu_attach_group(domain, group);
2281        if (ret) {
2282                iommu_domain_free(domain);
2283                goto out;
2284        }
2285
2286        /* Make the domain the default for this group */
2287        if (group->default_domain)
2288                iommu_domain_free(group->default_domain);
2289        group->default_domain = domain;
2290
2291        iommu_group_create_direct_mappings(group, dev);
2292
2293        dev_info(dev, "Using iommu %s mapping\n",
2294                 type == IOMMU_DOMAIN_DMA ? "dma" : "direct");
2295
2296        ret = 0;
2297out:
2298        mutex_unlock(&group->mutex);
2299        iommu_group_put(group);
2300
2301        return ret;
2302}
2303
2304/* Request that a device is direct mapped by the IOMMU */
2305int iommu_request_dm_for_dev(struct device *dev)
2306{
2307        return request_default_domain_for_dev(dev, IOMMU_DOMAIN_IDENTITY);
2308}
2309
2310/* Request that a device can't be direct mapped by the IOMMU */
2311int iommu_request_dma_domain_for_dev(struct device *dev)
2312{
2313        return request_default_domain_for_dev(dev, IOMMU_DOMAIN_DMA);
2314}
2315
2316void iommu_set_default_passthrough(bool cmd_line)
2317{
2318        if (cmd_line)
2319                iommu_set_cmd_line_dma_api();
2320
2321        iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
2322}
2323
2324void iommu_set_default_translated(bool cmd_line)
2325{
2326        if (cmd_line)
2327                iommu_set_cmd_line_dma_api();
2328
2329        iommu_def_domain_type = IOMMU_DOMAIN_DMA;
2330}
2331
2332bool iommu_default_passthrough(void)
2333{
2334        return iommu_def_domain_type == IOMMU_DOMAIN_IDENTITY;
2335}
2336EXPORT_SYMBOL_GPL(iommu_default_passthrough);
2337
2338const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
2339{
2340        const struct iommu_ops *ops = NULL;
2341        struct iommu_device *iommu;
2342
2343        spin_lock(&iommu_device_lock);
2344        list_for_each_entry(iommu, &iommu_device_list, list)
2345                if (iommu->fwnode == fwnode) {
2346                        ops = iommu->ops;
2347                        break;
2348                }
2349        spin_unlock(&iommu_device_lock);
2350        return ops;
2351}
2352
2353int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
2354                      const struct iommu_ops *ops)
2355{
2356        struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2357
2358        if (fwspec)
2359                return ops == fwspec->ops ? 0 : -EINVAL;
2360
2361        fwspec = kzalloc(sizeof(*fwspec), GFP_KERNEL);
2362        if (!fwspec)
2363                return -ENOMEM;
2364
2365        of_node_get(to_of_node(iommu_fwnode));
2366        fwspec->iommu_fwnode = iommu_fwnode;
2367        fwspec->ops = ops;
2368        dev_iommu_fwspec_set(dev, fwspec);
2369        return 0;
2370}
2371EXPORT_SYMBOL_GPL(iommu_fwspec_init);
2372
2373void iommu_fwspec_free(struct device *dev)
2374{
2375        struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2376
2377        if (fwspec) {
2378                fwnode_handle_put(fwspec->iommu_fwnode);
2379                kfree(fwspec);
2380                dev_iommu_fwspec_set(dev, NULL);
2381        }
2382}
2383EXPORT_SYMBOL_GPL(iommu_fwspec_free);
2384
2385int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
2386{
2387        struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
2388        size_t size;
2389        int i;
2390
2391        if (!fwspec)
2392                return -EINVAL;
2393
2394        size = offsetof(struct iommu_fwspec, ids[fwspec->num_ids + num_ids]);
2395        if (size > sizeof(*fwspec)) {
2396                fwspec = krealloc(fwspec, size, GFP_KERNEL);
2397                if (!fwspec)
2398                        return -ENOMEM;
2399
2400                dev_iommu_fwspec_set(dev, fwspec);
2401        }
2402
2403        for (i = 0; i < num_ids; i++)
2404                fwspec->ids[fwspec->num_ids + i] = ids[i];
2405
2406        fwspec->num_ids += num_ids;
2407        return 0;
2408}
2409EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
2410
2411/*
2412 * Per device IOMMU features.
2413 */
2414bool iommu_dev_has_feature(struct device *dev, enum iommu_dev_features feat)
2415{
2416        const struct iommu_ops *ops = dev->bus->iommu_ops;
2417
2418        if (ops && ops->dev_has_feat)
2419                return ops->dev_has_feat(dev, feat);
2420
2421        return false;
2422}
2423EXPORT_SYMBOL_GPL(iommu_dev_has_feature);
2424
2425int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features feat)
2426{
2427        const struct iommu_ops *ops = dev->bus->iommu_ops;
2428
2429        if (ops && ops->dev_enable_feat)
2430                return ops->dev_enable_feat(dev, feat);
2431
2432        return -ENODEV;
2433}
2434EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
2435
2436/*
2437 * The device drivers should do the necessary cleanups before calling this.
2438 * For example, before disabling the aux-domain feature, the device driver
2439 * should detach all aux-domains. Otherwise, this will return -EBUSY.
2440 */
2441int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
2442{
2443        const struct iommu_ops *ops = dev->bus->iommu_ops;
2444
2445        if (ops && ops->dev_disable_feat)
2446                return ops->dev_disable_feat(dev, feat);
2447
2448        return -EBUSY;
2449}
2450EXPORT_SYMBOL_GPL(iommu_dev_disable_feature);
2451
2452bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
2453{
2454        const struct iommu_ops *ops = dev->bus->iommu_ops;
2455
2456        if (ops && ops->dev_feat_enabled)
2457                return ops->dev_feat_enabled(dev, feat);
2458
2459        return false;
2460}
2461EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled);
2462
2463/*
2464 * Aux-domain specific attach/detach.
2465 *
2466 * Only works if iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX) returns
2467 * true. Also, as long as domains are attached to a device through this
2468 * interface, any tries to call iommu_attach_device() should fail
2469 * (iommu_detach_device() can't fail, so we fail when trying to re-attach).
2470 * This should make us safe against a device being attached to a guest as a
2471 * whole while there are still pasid users on it (aux and sva).
2472 */
2473int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
2474{
2475        int ret = -ENODEV;
2476
2477        if (domain->ops->aux_attach_dev)
2478                ret = domain->ops->aux_attach_dev(domain, dev);
2479
2480        if (!ret)
2481                trace_attach_device_to_domain(dev);
2482
2483        return ret;
2484}
2485EXPORT_SYMBOL_GPL(iommu_aux_attach_device);
2486
2487void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
2488{
2489        if (domain->ops->aux_detach_dev) {
2490                domain->ops->aux_detach_dev(domain, dev);
2491                trace_detach_device_from_domain(dev);
2492        }
2493}
2494EXPORT_SYMBOL_GPL(iommu_aux_detach_device);
2495
2496int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
2497{
2498        int ret = -ENODEV;
2499
2500        if (domain->ops->aux_get_pasid)
2501                ret = domain->ops->aux_get_pasid(domain, dev);
2502
2503        return ret;
2504}
2505EXPORT_SYMBOL_GPL(iommu_aux_get_pasid);
2506
2507/**
2508 * iommu_sva_bind_device() - Bind a process address space to a device
2509 * @dev: the device
2510 * @mm: the mm to bind, caller must hold a reference to it
2511 *
2512 * Create a bond between device and address space, allowing the device to access
2513 * the mm using the returned PASID. If a bond already exists between @device and
2514 * @mm, it is returned and an additional reference is taken. Caller must call
2515 * iommu_sva_unbind_device() to release each reference.
2516 *
2517 * iommu_dev_enable_feature(dev, IOMMU_DEV_FEAT_SVA) must be called first, to
2518 * initialize the required SVA features.
2519 *
2520 * On error, returns an ERR_PTR value.
2521 */
2522struct iommu_sva *
2523iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
2524{
2525        struct iommu_group *group;
2526        struct iommu_sva *handle = ERR_PTR(-EINVAL);
2527        const struct iommu_ops *ops = dev->bus->iommu_ops;
2528
2529        if (!ops || !ops->sva_bind)
2530                return ERR_PTR(-ENODEV);
2531
2532        group = iommu_group_get(dev);
2533        if (!group)
2534                return ERR_PTR(-ENODEV);
2535
2536        /* Ensure device count and domain don't change while we're binding */
2537        mutex_lock(&group->mutex);
2538
2539        /*
2540         * To keep things simple, SVA currently doesn't support IOMMU groups
2541         * with more than one device. Existing SVA-capable systems are not
2542         * affected by the problems that required IOMMU groups (lack of ACS
2543         * isolation, device ID aliasing and other hardware issues).
2544         */
2545        if (iommu_group_device_count(group) != 1)
2546                goto out_unlock;
2547
2548        handle = ops->sva_bind(dev, mm, drvdata);
2549
2550out_unlock:
2551        mutex_unlock(&group->mutex);
2552        iommu_group_put(group);
2553
2554        return handle;
2555}
2556EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
2557
2558/**
2559 * iommu_sva_unbind_device() - Remove a bond created with iommu_sva_bind_device
2560 * @handle: the handle returned by iommu_sva_bind_device()
2561 *
2562 * Put reference to a bond between device and address space. The device should
2563 * not be issuing any more transaction for this PASID. All outstanding page
2564 * requests for this PASID must have been flushed to the IOMMU.
2565 *
2566 * Returns 0 on success, or an error value
2567 */
2568void iommu_sva_unbind_device(struct iommu_sva *handle)
2569{
2570        struct iommu_group *group;
2571        struct device *dev = handle->dev;
2572        const struct iommu_ops *ops = dev->bus->iommu_ops;
2573
2574        if (!ops || !ops->sva_unbind)
2575                return;
2576
2577        group = iommu_group_get(dev);
2578        if (!group)
2579                return;
2580
2581        mutex_lock(&group->mutex);
2582        ops->sva_unbind(handle);
2583        mutex_unlock(&group->mutex);
2584
2585        iommu_group_put(group);
2586}
2587EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
2588
2589int iommu_sva_set_ops(struct iommu_sva *handle,
2590                      const struct iommu_sva_ops *sva_ops)
2591{
2592        if (handle->ops && handle->ops != sva_ops)
2593                return -EEXIST;
2594
2595        handle->ops = sva_ops;
2596        return 0;
2597}
2598EXPORT_SYMBOL_GPL(iommu_sva_set_ops);
2599
2600int iommu_sva_get_pasid(struct iommu_sva *handle)
2601{
2602        const struct iommu_ops *ops = handle->dev->bus->iommu_ops;
2603
2604        if (!ops || !ops->sva_get_pasid)
2605                return IOMMU_PASID_INVALID;
2606
2607        return ops->sva_get_pasid(handle);
2608}
2609EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
2610