linux/drivers/iommu/iommu.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
   3 * Author: Joerg Roedel <jroedel@suse.de>
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms of the GNU General Public License version 2 as published
   7 * by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write to the Free Software
  16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  17 */
  18
  19#define pr_fmt(fmt)    "iommu: " fmt
  20
  21#include <linux/device.h>
  22#include <linux/kernel.h>
  23#include <linux/bug.h>
  24#include <linux/types.h>
  25#include <linux/module.h>
  26#include <linux/slab.h>
  27#include <linux/errno.h>
  28#include <linux/iommu.h>
  29#include <linux/idr.h>
  30#include <linux/notifier.h>
  31#include <linux/err.h>
  32#include <linux/pci.h>
  33#include <linux/bitops.h>
  34#include <linux/property.h>
  35#include <trace/events/iommu.h>
  36
  37static struct kset *iommu_group_kset;
  38static DEFINE_IDA(iommu_group_ida);
  39static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_DMA;
  40
  41struct iommu_callback_data {
  42        const struct iommu_ops *ops;
  43};
  44
  45struct iommu_group {
  46        struct kobject kobj;
  47        struct kobject *devices_kobj;
  48        struct list_head devices;
  49        struct mutex mutex;
  50        struct blocking_notifier_head notifier;
  51        void *iommu_data;
  52        void (*iommu_data_release)(void *iommu_data);
  53        char *name;
  54        int id;
  55        struct iommu_domain *default_domain;
  56        struct iommu_domain *domain;
  57};
  58
  59struct group_device {
  60        struct list_head list;
  61        struct device *dev;
  62        char *name;
  63};
  64
  65struct iommu_group_attribute {
  66        struct attribute attr;
  67        ssize_t (*show)(struct iommu_group *group, char *buf);
  68        ssize_t (*store)(struct iommu_group *group,
  69                         const char *buf, size_t count);
  70};
  71
  72static const char * const iommu_group_resv_type_string[] = {
  73        [IOMMU_RESV_DIRECT]     = "direct",
  74        [IOMMU_RESV_RESERVED]   = "reserved",
  75        [IOMMU_RESV_MSI]        = "msi",
  76        [IOMMU_RESV_SW_MSI]     = "msi",
  77};
  78
  79#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store)           \
  80struct iommu_group_attribute iommu_group_attr_##_name =         \
  81        __ATTR(_name, _mode, _show, _store)
  82
  83#define to_iommu_group_attr(_attr)      \
  84        container_of(_attr, struct iommu_group_attribute, attr)
  85#define to_iommu_group(_kobj)           \
  86        container_of(_kobj, struct iommu_group, kobj)
  87
  88static LIST_HEAD(iommu_device_list);
  89static DEFINE_SPINLOCK(iommu_device_lock);
  90
  91int iommu_device_register(struct iommu_device *iommu)
  92{
  93        spin_lock(&iommu_device_lock);
  94        list_add_tail(&iommu->list, &iommu_device_list);
  95        spin_unlock(&iommu_device_lock);
  96
  97        return 0;
  98}
  99
 100void iommu_device_unregister(struct iommu_device *iommu)
 101{
 102        spin_lock(&iommu_device_lock);
 103        list_del(&iommu->list);
 104        spin_unlock(&iommu_device_lock);
 105}
 106
 107static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
 108                                                 unsigned type);
 109static int __iommu_attach_device(struct iommu_domain *domain,
 110                                 struct device *dev);
 111static int __iommu_attach_group(struct iommu_domain *domain,
 112                                struct iommu_group *group);
 113static void __iommu_detach_group(struct iommu_domain *domain,
 114                                 struct iommu_group *group);
 115
 116static int __init iommu_set_def_domain_type(char *str)
 117{
 118        bool pt;
 119        int ret;
 120
 121        ret = kstrtobool(str, &pt);
 122        if (ret)
 123                return ret;
 124
 125        iommu_def_domain_type = pt ? IOMMU_DOMAIN_IDENTITY : IOMMU_DOMAIN_DMA;
 126        return 0;
 127}
 128early_param("iommu.passthrough", iommu_set_def_domain_type);
 129
 130static ssize_t iommu_group_attr_show(struct kobject *kobj,
 131                                     struct attribute *__attr, char *buf)
 132{
 133        struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
 134        struct iommu_group *group = to_iommu_group(kobj);
 135        ssize_t ret = -EIO;
 136
 137        if (attr->show)
 138                ret = attr->show(group, buf);
 139        return ret;
 140}
 141
 142static ssize_t iommu_group_attr_store(struct kobject *kobj,
 143                                      struct attribute *__attr,
 144                                      const char *buf, size_t count)
 145{
 146        struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
 147        struct iommu_group *group = to_iommu_group(kobj);
 148        ssize_t ret = -EIO;
 149
 150        if (attr->store)
 151                ret = attr->store(group, buf, count);
 152        return ret;
 153}
 154
 155static const struct sysfs_ops iommu_group_sysfs_ops = {
 156        .show = iommu_group_attr_show,
 157        .store = iommu_group_attr_store,
 158};
 159
 160static int iommu_group_create_file(struct iommu_group *group,
 161                                   struct iommu_group_attribute *attr)
 162{
 163        return sysfs_create_file(&group->kobj, &attr->attr);
 164}
 165
 166static void iommu_group_remove_file(struct iommu_group *group,
 167                                    struct iommu_group_attribute *attr)
 168{
 169        sysfs_remove_file(&group->kobj, &attr->attr);
 170}
 171
 172static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
 173{
 174        return sprintf(buf, "%s\n", group->name);
 175}
 176
 177/**
 178 * iommu_insert_resv_region - Insert a new region in the
 179 * list of reserved regions.
 180 * @new: new region to insert
 181 * @regions: list of regions
 182 *
 183 * The new element is sorted by address with respect to the other
 184 * regions of the same type. In case it overlaps with another
 185 * region of the same type, regions are merged. In case it
 186 * overlaps with another region of different type, regions are
 187 * not merged.
 188 */
 189static int iommu_insert_resv_region(struct iommu_resv_region *new,
 190                                    struct list_head *regions)
 191{
 192        struct iommu_resv_region *region;
 193        phys_addr_t start = new->start;
 194        phys_addr_t end = new->start + new->length - 1;
 195        struct list_head *pos = regions->next;
 196
 197        while (pos != regions) {
 198                struct iommu_resv_region *entry =
 199                        list_entry(pos, struct iommu_resv_region, list);
 200                phys_addr_t a = entry->start;
 201                phys_addr_t b = entry->start + entry->length - 1;
 202                int type = entry->type;
 203
 204                if (end < a) {
 205                        goto insert;
 206                } else if (start > b) {
 207                        pos = pos->next;
 208                } else if ((start >= a) && (end <= b)) {
 209                        if (new->type == type)
 210                                goto done;
 211                        else
 212                                pos = pos->next;
 213                } else {
 214                        if (new->type == type) {
 215                                phys_addr_t new_start = min(a, start);
 216                                phys_addr_t new_end = max(b, end);
 217
 218                                list_del(&entry->list);
 219                                entry->start = new_start;
 220                                entry->length = new_end - new_start + 1;
 221                                iommu_insert_resv_region(entry, regions);
 222                        } else {
 223                                pos = pos->next;
 224                        }
 225                }
 226        }
 227insert:
 228        region = iommu_alloc_resv_region(new->start, new->length,
 229                                         new->prot, new->type);
 230        if (!region)
 231                return -ENOMEM;
 232
 233        list_add_tail(&region->list, pos);
 234done:
 235        return 0;
 236}
 237
 238static int
 239iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
 240                                 struct list_head *group_resv_regions)
 241{
 242        struct iommu_resv_region *entry;
 243        int ret = 0;
 244
 245        list_for_each_entry(entry, dev_resv_regions, list) {
 246                ret = iommu_insert_resv_region(entry, group_resv_regions);
 247                if (ret)
 248                        break;
 249        }
 250        return ret;
 251}
 252
 253int iommu_get_group_resv_regions(struct iommu_group *group,
 254                                 struct list_head *head)
 255{
 256        struct group_device *device;
 257        int ret = 0;
 258
 259        mutex_lock(&group->mutex);
 260        list_for_each_entry(device, &group->devices, list) {
 261                struct list_head dev_resv_regions;
 262
 263                INIT_LIST_HEAD(&dev_resv_regions);
 264                iommu_get_resv_regions(device->dev, &dev_resv_regions);
 265                ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
 266                iommu_put_resv_regions(device->dev, &dev_resv_regions);
 267                if (ret)
 268                        break;
 269        }
 270        mutex_unlock(&group->mutex);
 271        return ret;
 272}
 273EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);
 274
 275static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
 276                                             char *buf)
 277{
 278        struct iommu_resv_region *region, *next;
 279        struct list_head group_resv_regions;
 280        char *str = buf;
 281
 282        INIT_LIST_HEAD(&group_resv_regions);
 283        iommu_get_group_resv_regions(group, &group_resv_regions);
 284
 285        list_for_each_entry_safe(region, next, &group_resv_regions, list) {
 286                str += sprintf(str, "0x%016llx 0x%016llx %s\n",
 287                               (long long int)region->start,
 288                               (long long int)(region->start +
 289                                                region->length - 1),
 290                               iommu_group_resv_type_string[region->type]);
 291                kfree(region);
 292        }
 293
 294        return (str - buf);
 295}
 296
 297static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
 298
 299static IOMMU_GROUP_ATTR(reserved_regions, 0444,
 300                        iommu_group_show_resv_regions, NULL);
 301
 302static void iommu_group_release(struct kobject *kobj)
 303{
 304        struct iommu_group *group = to_iommu_group(kobj);
 305
 306        pr_debug("Releasing group %d\n", group->id);
 307
 308        if (group->iommu_data_release)
 309                group->iommu_data_release(group->iommu_data);
 310
 311        ida_simple_remove(&iommu_group_ida, group->id);
 312
 313        if (group->default_domain)
 314                iommu_domain_free(group->default_domain);
 315
 316        kfree(group->name);
 317        kfree(group);
 318}
 319
 320static struct kobj_type iommu_group_ktype = {
 321        .sysfs_ops = &iommu_group_sysfs_ops,
 322        .release = iommu_group_release,
 323};
 324
 325/**
 326 * iommu_group_alloc - Allocate a new group
 327 *
 328 * This function is called by an iommu driver to allocate a new iommu
 329 * group.  The iommu group represents the minimum granularity of the iommu.
 330 * Upon successful return, the caller holds a reference to the supplied
 331 * group in order to hold the group until devices are added.  Use
 332 * iommu_group_put() to release this extra reference count, allowing the
 333 * group to be automatically reclaimed once it has no devices or external
 334 * references.
 335 */
 336struct iommu_group *iommu_group_alloc(void)
 337{
 338        struct iommu_group *group;
 339        int ret;
 340
 341        group = kzalloc(sizeof(*group), GFP_KERNEL);
 342        if (!group)
 343                return ERR_PTR(-ENOMEM);
 344
 345        group->kobj.kset = iommu_group_kset;
 346        mutex_init(&group->mutex);
 347        INIT_LIST_HEAD(&group->devices);
 348        BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
 349
 350        ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL);
 351        if (ret < 0) {
 352                kfree(group);
 353                return ERR_PTR(ret);
 354        }
 355        group->id = ret;
 356
 357        ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
 358                                   NULL, "%d", group->id);
 359        if (ret) {
 360                ida_simple_remove(&iommu_group_ida, group->id);
 361                kfree(group);
 362                return ERR_PTR(ret);
 363        }
 364
 365        group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
 366        if (!group->devices_kobj) {
 367                kobject_put(&group->kobj); /* triggers .release & free */
 368                return ERR_PTR(-ENOMEM);
 369        }
 370
 371        /*
 372         * The devices_kobj holds a reference on the group kobject, so
 373         * as long as that exists so will the group.  We can therefore
 374         * use the devices_kobj for reference counting.
 375         */
 376        kobject_put(&group->kobj);
 377
 378        ret = iommu_group_create_file(group,
 379                                      &iommu_group_attr_reserved_regions);
 380        if (ret)
 381                return ERR_PTR(ret);
 382
 383        pr_debug("Allocated group %d\n", group->id);
 384
 385        return group;
 386}
 387EXPORT_SYMBOL_GPL(iommu_group_alloc);
 388
 389struct iommu_group *iommu_group_get_by_id(int id)
 390{
 391        struct kobject *group_kobj;
 392        struct iommu_group *group;
 393        const char *name;
 394
 395        if (!iommu_group_kset)
 396                return NULL;
 397
 398        name = kasprintf(GFP_KERNEL, "%d", id);
 399        if (!name)
 400                return NULL;
 401
 402        group_kobj = kset_find_obj(iommu_group_kset, name);
 403        kfree(name);
 404
 405        if (!group_kobj)
 406                return NULL;
 407
 408        group = container_of(group_kobj, struct iommu_group, kobj);
 409        BUG_ON(group->id != id);
 410
 411        kobject_get(group->devices_kobj);
 412        kobject_put(&group->kobj);
 413
 414        return group;
 415}
 416EXPORT_SYMBOL_GPL(iommu_group_get_by_id);
 417
 418/**
 419 * iommu_group_get_iommudata - retrieve iommu_data registered for a group
 420 * @group: the group
 421 *
 422 * iommu drivers can store data in the group for use when doing iommu
 423 * operations.  This function provides a way to retrieve it.  Caller
 424 * should hold a group reference.
 425 */
 426void *iommu_group_get_iommudata(struct iommu_group *group)
 427{
 428        return group->iommu_data;
 429}
 430EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
 431
 432/**
 433 * iommu_group_set_iommudata - set iommu_data for a group
 434 * @group: the group
 435 * @iommu_data: new data
 436 * @release: release function for iommu_data
 437 *
 438 * iommu drivers can store data in the group for use when doing iommu
 439 * operations.  This function provides a way to set the data after
 440 * the group has been allocated.  Caller should hold a group reference.
 441 */
 442void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
 443                               void (*release)(void *iommu_data))
 444{
 445        group->iommu_data = iommu_data;
 446        group->iommu_data_release = release;
 447}
 448EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
 449
 450/**
 451 * iommu_group_set_name - set name for a group
 452 * @group: the group
 453 * @name: name
 454 *
 455 * Allow iommu driver to set a name for a group.  When set it will
 456 * appear in a name attribute file under the group in sysfs.
 457 */
 458int iommu_group_set_name(struct iommu_group *group, const char *name)
 459{
 460        int ret;
 461
 462        if (group->name) {
 463                iommu_group_remove_file(group, &iommu_group_attr_name);
 464                kfree(group->name);
 465                group->name = NULL;
 466                if (!name)
 467                        return 0;
 468        }
 469
 470        group->name = kstrdup(name, GFP_KERNEL);
 471        if (!group->name)
 472                return -ENOMEM;
 473
 474        ret = iommu_group_create_file(group, &iommu_group_attr_name);
 475        if (ret) {
 476                kfree(group->name);
 477                group->name = NULL;
 478                return ret;
 479        }
 480
 481        return 0;
 482}
 483EXPORT_SYMBOL_GPL(iommu_group_set_name);
 484
 485static int iommu_group_create_direct_mappings(struct iommu_group *group,
 486                                              struct device *dev)
 487{
 488        struct iommu_domain *domain = group->default_domain;
 489        struct iommu_resv_region *entry;
 490        struct list_head mappings;
 491        unsigned long pg_size;
 492        int ret = 0;
 493
 494        if (!domain || domain->type != IOMMU_DOMAIN_DMA)
 495                return 0;
 496
 497        BUG_ON(!domain->pgsize_bitmap);
 498
 499        pg_size = 1UL << __ffs(domain->pgsize_bitmap);
 500        INIT_LIST_HEAD(&mappings);
 501
 502        iommu_get_resv_regions(dev, &mappings);
 503
 504        /* We need to consider overlapping regions for different devices */
 505        list_for_each_entry(entry, &mappings, list) {
 506                dma_addr_t start, end, addr;
 507
 508                if (domain->ops->apply_resv_region)
 509                        domain->ops->apply_resv_region(dev, domain, entry);
 510
 511                start = ALIGN(entry->start, pg_size);
 512                end   = ALIGN(entry->start + entry->length, pg_size);
 513
 514                if (entry->type != IOMMU_RESV_DIRECT)
 515                        continue;
 516
 517                for (addr = start; addr < end; addr += pg_size) {
 518                        phys_addr_t phys_addr;
 519
 520                        phys_addr = iommu_iova_to_phys(domain, addr);
 521                        if (phys_addr)
 522                                continue;
 523
 524                        ret = iommu_map(domain, addr, addr, pg_size, entry->prot);
 525                        if (ret)
 526                                goto out;
 527                }
 528
 529        }
 530
 531        iommu_flush_tlb_all(domain);
 532
 533out:
 534        iommu_put_resv_regions(dev, &mappings);
 535
 536        return ret;
 537}
 538
 539/**
 540 * iommu_group_add_device - add a device to an iommu group
 541 * @group: the group into which to add the device (reference should be held)
 542 * @dev: the device
 543 *
 544 * This function is called by an iommu driver to add a device into a
 545 * group.  Adding a device increments the group reference count.
 546 */
 547int iommu_group_add_device(struct iommu_group *group, struct device *dev)
 548{
 549        int ret, i = 0;
 550        struct group_device *device;
 551
 552        device = kzalloc(sizeof(*device), GFP_KERNEL);
 553        if (!device)
 554                return -ENOMEM;
 555
 556        device->dev = dev;
 557
 558        ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
 559        if (ret)
 560                goto err_free_device;
 561
 562        device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
 563rename:
 564        if (!device->name) {
 565                ret = -ENOMEM;
 566                goto err_remove_link;
 567        }
 568
 569        ret = sysfs_create_link_nowarn(group->devices_kobj,
 570                                       &dev->kobj, device->name);
 571        if (ret) {
 572                if (ret == -EEXIST && i >= 0) {
 573                        /*
 574                         * Account for the slim chance of collision
 575                         * and append an instance to the name.
 576                         */
 577                        kfree(device->name);
 578                        device->name = kasprintf(GFP_KERNEL, "%s.%d",
 579                                                 kobject_name(&dev->kobj), i++);
 580                        goto rename;
 581                }
 582                goto err_free_name;
 583        }
 584
 585        kobject_get(group->devices_kobj);
 586
 587        dev->iommu_group = group;
 588
 589        iommu_group_create_direct_mappings(group, dev);
 590
 591        mutex_lock(&group->mutex);
 592        list_add_tail(&device->list, &group->devices);
 593        if (group->domain)
 594                ret = __iommu_attach_device(group->domain, dev);
 595        mutex_unlock(&group->mutex);
 596        if (ret)
 597                goto err_put_group;
 598
 599        /* Notify any listeners about change to group. */
 600        blocking_notifier_call_chain(&group->notifier,
 601                                     IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
 602
 603        trace_add_device_to_group(group->id, dev);
 604
 605        pr_info("Adding device %s to group %d\n", dev_name(dev), group->id);
 606
 607        return 0;
 608
 609err_put_group:
 610        mutex_lock(&group->mutex);
 611        list_del(&device->list);
 612        mutex_unlock(&group->mutex);
 613        dev->iommu_group = NULL;
 614        kobject_put(group->devices_kobj);
 615err_free_name:
 616        kfree(device->name);
 617err_remove_link:
 618        sysfs_remove_link(&dev->kobj, "iommu_group");
 619err_free_device:
 620        kfree(device);
 621        pr_err("Failed to add device %s to group %d: %d\n", dev_name(dev), group->id, ret);
 622        return ret;
 623}
 624EXPORT_SYMBOL_GPL(iommu_group_add_device);
 625
 626/**
 627 * iommu_group_remove_device - remove a device from it's current group
 628 * @dev: device to be removed
 629 *
 630 * This function is called by an iommu driver to remove the device from
 631 * it's current group.  This decrements the iommu group reference count.
 632 */
 633void iommu_group_remove_device(struct device *dev)
 634{
 635        struct iommu_group *group = dev->iommu_group;
 636        struct group_device *tmp_device, *device = NULL;
 637
 638        pr_info("Removing device %s from group %d\n", dev_name(dev), group->id);
 639
 640        /* Pre-notify listeners that a device is being removed. */
 641        blocking_notifier_call_chain(&group->notifier,
 642                                     IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
 643
 644        mutex_lock(&group->mutex);
 645        list_for_each_entry(tmp_device, &group->devices, list) {
 646                if (tmp_device->dev == dev) {
 647                        device = tmp_device;
 648                        list_del(&device->list);
 649                        break;
 650                }
 651        }
 652        mutex_unlock(&group->mutex);
 653
 654        if (!device)
 655                return;
 656
 657        sysfs_remove_link(group->devices_kobj, device->name);
 658        sysfs_remove_link(&dev->kobj, "iommu_group");
 659
 660        trace_remove_device_from_group(group->id, dev);
 661
 662        kfree(device->name);
 663        kfree(device);
 664        dev->iommu_group = NULL;
 665        kobject_put(group->devices_kobj);
 666}
 667EXPORT_SYMBOL_GPL(iommu_group_remove_device);
 668
 669static int iommu_group_device_count(struct iommu_group *group)
 670{
 671        struct group_device *entry;
 672        int ret = 0;
 673
 674        list_for_each_entry(entry, &group->devices, list)
 675                ret++;
 676
 677        return ret;
 678}
 679
 680/**
 681 * iommu_group_for_each_dev - iterate over each device in the group
 682 * @group: the group
 683 * @data: caller opaque data to be passed to callback function
 684 * @fn: caller supplied callback function
 685 *
 686 * This function is called by group users to iterate over group devices.
 687 * Callers should hold a reference count to the group during callback.
 688 * The group->mutex is held across callbacks, which will block calls to
 689 * iommu_group_add/remove_device.
 690 */
 691static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
 692                                      int (*fn)(struct device *, void *))
 693{
 694        struct group_device *device;
 695        int ret = 0;
 696
 697        list_for_each_entry(device, &group->devices, list) {
 698                ret = fn(device->dev, data);
 699                if (ret)
 700                        break;
 701        }
 702        return ret;
 703}
 704
 705
 706int iommu_group_for_each_dev(struct iommu_group *group, void *data,
 707                             int (*fn)(struct device *, void *))
 708{
 709        int ret;
 710
 711        mutex_lock(&group->mutex);
 712        ret = __iommu_group_for_each_dev(group, data, fn);
 713        mutex_unlock(&group->mutex);
 714
 715        return ret;
 716}
 717EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
 718
 719/**
 720 * iommu_group_get - Return the group for a device and increment reference
 721 * @dev: get the group that this device belongs to
 722 *
 723 * This function is called by iommu drivers and users to get the group
 724 * for the specified device.  If found, the group is returned and the group
 725 * reference in incremented, else NULL.
 726 */
 727struct iommu_group *iommu_group_get(struct device *dev)
 728{
 729        struct iommu_group *group = dev->iommu_group;
 730
 731        if (group)
 732                kobject_get(group->devices_kobj);
 733
 734        return group;
 735}
 736EXPORT_SYMBOL_GPL(iommu_group_get);
 737
 738/**
 739 * iommu_group_ref_get - Increment reference on a group
 740 * @group: the group to use, must not be NULL
 741 *
 742 * This function is called by iommu drivers to take additional references on an
 743 * existing group.  Returns the given group for convenience.
 744 */
 745struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
 746{
 747        kobject_get(group->devices_kobj);
 748        return group;
 749}
 750
 751/**
 752 * iommu_group_put - Decrement group reference
 753 * @group: the group to use
 754 *
 755 * This function is called by iommu drivers and users to release the
 756 * iommu group.  Once the reference count is zero, the group is released.
 757 */
 758void iommu_group_put(struct iommu_group *group)
 759{
 760        if (group)
 761                kobject_put(group->devices_kobj);
 762}
 763EXPORT_SYMBOL_GPL(iommu_group_put);
 764
 765/**
 766 * iommu_group_register_notifier - Register a notifier for group changes
 767 * @group: the group to watch
 768 * @nb: notifier block to signal
 769 *
 770 * This function allows iommu group users to track changes in a group.
 771 * See include/linux/iommu.h for actions sent via this notifier.  Caller
 772 * should hold a reference to the group throughout notifier registration.
 773 */
 774int iommu_group_register_notifier(struct iommu_group *group,
 775                                  struct notifier_block *nb)
 776{
 777        return blocking_notifier_chain_register(&group->notifier, nb);
 778}
 779EXPORT_SYMBOL_GPL(iommu_group_register_notifier);
 780
 781/**
 782 * iommu_group_unregister_notifier - Unregister a notifier
 783 * @group: the group to watch
 784 * @nb: notifier block to signal
 785 *
 786 * Unregister a previously registered group notifier block.
 787 */
 788int iommu_group_unregister_notifier(struct iommu_group *group,
 789                                    struct notifier_block *nb)
 790{
 791        return blocking_notifier_chain_unregister(&group->notifier, nb);
 792}
 793EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
 794
 795/**
 796 * iommu_group_id - Return ID for a group
 797 * @group: the group to ID
 798 *
 799 * Return the unique ID for the group matching the sysfs group number.
 800 */
 801int iommu_group_id(struct iommu_group *group)
 802{
 803        return group->id;
 804}
 805EXPORT_SYMBOL_GPL(iommu_group_id);
 806
 807static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
 808                                               unsigned long *devfns);
 809
 810/*
 811 * To consider a PCI device isolated, we require ACS to support Source
 812 * Validation, Request Redirection, Completer Redirection, and Upstream
 813 * Forwarding.  This effectively means that devices cannot spoof their
 814 * requester ID, requests and completions cannot be redirected, and all
 815 * transactions are forwarded upstream, even as it passes through a
 816 * bridge where the target device is downstream.
 817 */
 818#define REQ_ACS_FLAGS   (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
 819
 820/*
 821 * For multifunction devices which are not isolated from each other, find
 822 * all the other non-isolated functions and look for existing groups.  For
 823 * each function, we also need to look for aliases to or from other devices
 824 * that may already have a group.
 825 */
 826static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev,
 827                                                        unsigned long *devfns)
 828{
 829        struct pci_dev *tmp = NULL;
 830        struct iommu_group *group;
 831
 832        if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
 833                return NULL;
 834
 835        for_each_pci_dev(tmp) {
 836                if (tmp == pdev || tmp->bus != pdev->bus ||
 837                    PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
 838                    pci_acs_enabled(tmp, REQ_ACS_FLAGS))
 839                        continue;
 840
 841                group = get_pci_alias_group(tmp, devfns);
 842                if (group) {
 843                        pci_dev_put(tmp);
 844                        return group;
 845                }
 846        }
 847
 848        return NULL;
 849}
 850
 851/*
 852 * Look for aliases to or from the given device for existing groups. DMA
 853 * aliases are only supported on the same bus, therefore the search
 854 * space is quite small (especially since we're really only looking at pcie
 855 * device, and therefore only expect multiple slots on the root complex or
 856 * downstream switch ports).  It's conceivable though that a pair of
 857 * multifunction devices could have aliases between them that would cause a
 858 * loop.  To prevent this, we use a bitmap to track where we've been.
 859 */
 860static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
 861                                               unsigned long *devfns)
 862{
 863        struct pci_dev *tmp = NULL;
 864        struct iommu_group *group;
 865
 866        if (test_and_set_bit(pdev->devfn & 0xff, devfns))
 867                return NULL;
 868
 869        group = iommu_group_get(&pdev->dev);
 870        if (group)
 871                return group;
 872
 873        for_each_pci_dev(tmp) {
 874                if (tmp == pdev || tmp->bus != pdev->bus)
 875                        continue;
 876
 877                /* We alias them or they alias us */
 878                if (pci_devs_are_dma_aliases(pdev, tmp)) {
 879                        group = get_pci_alias_group(tmp, devfns);
 880                        if (group) {
 881                                pci_dev_put(tmp);
 882                                return group;
 883                        }
 884
 885                        group = get_pci_function_alias_group(tmp, devfns);
 886                        if (group) {
 887                                pci_dev_put(tmp);
 888                                return group;
 889                        }
 890                }
 891        }
 892
 893        return NULL;
 894}
 895
 896struct group_for_pci_data {
 897        struct pci_dev *pdev;
 898        struct iommu_group *group;
 899};
 900
 901/*
 902 * DMA alias iterator callback, return the last seen device.  Stop and return
 903 * the IOMMU group if we find one along the way.
 904 */
 905static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque)
 906{
 907        struct group_for_pci_data *data = opaque;
 908
 909        data->pdev = pdev;
 910        data->group = iommu_group_get(&pdev->dev);
 911
 912        return data->group != NULL;
 913}
 914
 915/*
 916 * Generic device_group call-back function. It just allocates one
 917 * iommu-group per device.
 918 */
 919struct iommu_group *generic_device_group(struct device *dev)
 920{
 921        return iommu_group_alloc();
 922}
 923
 924/*
 925 * Use standard PCI bus topology, isolation features, and DMA alias quirks
 926 * to find or create an IOMMU group for a device.
 927 */
 928struct iommu_group *pci_device_group(struct device *dev)
 929{
 930        struct pci_dev *pdev = to_pci_dev(dev);
 931        struct group_for_pci_data data;
 932        struct pci_bus *bus;
 933        struct iommu_group *group = NULL;
 934        u64 devfns[4] = { 0 };
 935
 936        if (WARN_ON(!dev_is_pci(dev)))
 937                return ERR_PTR(-EINVAL);
 938
 939        /*
 940         * Find the upstream DMA alias for the device.  A device must not
 941         * be aliased due to topology in order to have its own IOMMU group.
 942         * If we find an alias along the way that already belongs to a
 943         * group, use it.
 944         */
 945        if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data))
 946                return data.group;
 947
 948        pdev = data.pdev;
 949
 950        /*
 951         * Continue upstream from the point of minimum IOMMU granularity
 952         * due to aliases to the point where devices are protected from
 953         * peer-to-peer DMA by PCI ACS.  Again, if we find an existing
 954         * group, use it.
 955         */
 956        for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
 957                if (!bus->self)
 958                        continue;
 959
 960                if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
 961                        break;
 962
 963                pdev = bus->self;
 964
 965                group = iommu_group_get(&pdev->dev);
 966                if (group)
 967                        return group;
 968        }
 969
 970        /*
 971         * Look for existing groups on device aliases.  If we alias another
 972         * device or another device aliases us, use the same group.
 973         */
 974        group = get_pci_alias_group(pdev, (unsigned long *)devfns);
 975        if (group)
 976                return group;
 977
 978        /*
 979         * Look for existing groups on non-isolated functions on the same
 980         * slot and aliases of those funcions, if any.  No need to clear
 981         * the search bitmap, the tested devfns are still valid.
 982         */
 983        group = get_pci_function_alias_group(pdev, (unsigned long *)devfns);
 984        if (group)
 985                return group;
 986
 987        /* No shared group found, allocate new */
 988        return iommu_group_alloc();
 989}
 990
 991/**
 992 * iommu_group_get_for_dev - Find or create the IOMMU group for a device
 993 * @dev: target device
 994 *
 995 * This function is intended to be called by IOMMU drivers and extended to
 996 * support common, bus-defined algorithms when determining or creating the
 997 * IOMMU group for a device.  On success, the caller will hold a reference
 998 * to the returned IOMMU group, which will already include the provided
 999 * device.  The reference should be released with iommu_group_put().
1000 */
1001struct iommu_group *iommu_group_get_for_dev(struct device *dev)
1002{
1003        const struct iommu_ops *ops = dev->bus->iommu_ops;
1004        struct iommu_group *group;
1005        int ret;
1006
1007        group = iommu_group_get(dev);
1008        if (group)
1009                return group;
1010
1011        if (!ops)
1012                return ERR_PTR(-EINVAL);
1013
1014        group = ops->device_group(dev);
1015        if (WARN_ON_ONCE(group == NULL))
1016                return ERR_PTR(-EINVAL);
1017
1018        if (IS_ERR(group))
1019                return group;
1020
1021        /*
1022         * Try to allocate a default domain - needs support from the
1023         * IOMMU driver.
1024         */
1025        if (!group->default_domain) {
1026                struct iommu_domain *dom;
1027
1028                dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type);
1029                if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) {
1030                        dev_warn(dev,
1031                                 "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
1032                                 iommu_def_domain_type);
1033                        dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA);
1034                }
1035
1036                group->default_domain = dom;
1037                if (!group->domain)
1038                        group->domain = dom;
1039        }
1040
1041        ret = iommu_group_add_device(group, dev);
1042        if (ret) {
1043                iommu_group_put(group);
1044                return ERR_PTR(ret);
1045        }
1046
1047        return group;
1048}
1049
1050struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
1051{
1052        return group->default_domain;
1053}
1054
1055static int add_iommu_group(struct device *dev, void *data)
1056{
1057        struct iommu_callback_data *cb = data;
1058        const struct iommu_ops *ops = cb->ops;
1059        int ret;
1060
1061        if (!ops->add_device)
1062                return 0;
1063
1064        WARN_ON(dev->iommu_group);
1065
1066        ret = ops->add_device(dev);
1067
1068        /*
1069         * We ignore -ENODEV errors for now, as they just mean that the
1070         * device is not translated by an IOMMU. We still care about
1071         * other errors and fail to initialize when they happen.
1072         */
1073        if (ret == -ENODEV)
1074                ret = 0;
1075
1076        return ret;
1077}
1078
1079static int remove_iommu_group(struct device *dev, void *data)
1080{
1081        struct iommu_callback_data *cb = data;
1082        const struct iommu_ops *ops = cb->ops;
1083
1084        if (ops->remove_device && dev->iommu_group)
1085                ops->remove_device(dev);
1086
1087        return 0;
1088}
1089
1090static int iommu_bus_notifier(struct notifier_block *nb,
1091                              unsigned long action, void *data)
1092{
1093        struct device *dev = data;
1094        const struct iommu_ops *ops = dev->bus->iommu_ops;
1095        struct iommu_group *group;
1096        unsigned long group_action = 0;
1097
1098        /*
1099         * ADD/DEL call into iommu driver ops if provided, which may
1100         * result in ADD/DEL notifiers to group->notifier
1101         */
1102        if (action == BUS_NOTIFY_ADD_DEVICE) {
1103                if (ops->add_device) {
1104                        int ret;
1105
1106                        ret = ops->add_device(dev);
1107                        return (ret) ? NOTIFY_DONE : NOTIFY_OK;
1108                }
1109        } else if (action == BUS_NOTIFY_REMOVED_DEVICE) {
1110                if (ops->remove_device && dev->iommu_group) {
1111                        ops->remove_device(dev);
1112                        return 0;
1113                }
1114        }
1115
1116        /*
1117         * Remaining BUS_NOTIFYs get filtered and republished to the
1118         * group, if anyone is listening
1119         */
1120        group = iommu_group_get(dev);
1121        if (!group)
1122                return 0;
1123
1124        switch (action) {
1125        case BUS_NOTIFY_BIND_DRIVER:
1126                group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
1127                break;
1128        case BUS_NOTIFY_BOUND_DRIVER:
1129                group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
1130                break;
1131        case BUS_NOTIFY_UNBIND_DRIVER:
1132                group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
1133                break;
1134        case BUS_NOTIFY_UNBOUND_DRIVER:
1135                group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
1136                break;
1137        }
1138
1139        if (group_action)
1140                blocking_notifier_call_chain(&group->notifier,
1141                                             group_action, dev);
1142
1143        iommu_group_put(group);
1144        return 0;
1145}
1146
1147static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
1148{
1149        int err;
1150        struct notifier_block *nb;
1151        struct iommu_callback_data cb = {
1152                .ops = ops,
1153        };
1154
1155        nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
1156        if (!nb)
1157                return -ENOMEM;
1158
1159        nb->notifier_call = iommu_bus_notifier;
1160
1161        err = bus_register_notifier(bus, nb);
1162        if (err)
1163                goto out_free;
1164
1165        err = bus_for_each_dev(bus, NULL, &cb, add_iommu_group);
1166        if (err)
1167                goto out_err;
1168
1169
1170        return 0;
1171
1172out_err:
1173        /* Clean up */
1174        bus_for_each_dev(bus, NULL, &cb, remove_iommu_group);
1175        bus_unregister_notifier(bus, nb);
1176
1177out_free:
1178        kfree(nb);
1179
1180        return err;
1181}
1182
1183/**
1184 * bus_set_iommu - set iommu-callbacks for the bus
1185 * @bus: bus.
1186 * @ops: the callbacks provided by the iommu-driver
1187 *
1188 * This function is called by an iommu driver to set the iommu methods
1189 * used for a particular bus. Drivers for devices on that bus can use
1190 * the iommu-api after these ops are registered.
1191 * This special function is needed because IOMMUs are usually devices on
1192 * the bus itself, so the iommu drivers are not initialized when the bus
1193 * is set up. With this function the iommu-driver can set the iommu-ops
1194 * afterwards.
1195 */
1196int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
1197{
1198        int err;
1199
1200        if (bus->iommu_ops != NULL)
1201                return -EBUSY;
1202
1203        bus->iommu_ops = ops;
1204
1205        /* Do IOMMU specific setup for this bus-type */
1206        err = iommu_bus_init(bus, ops);
1207        if (err)
1208                bus->iommu_ops = NULL;
1209
1210        return err;
1211}
1212EXPORT_SYMBOL_GPL(bus_set_iommu);
1213
1214bool iommu_present(struct bus_type *bus)
1215{
1216        return bus->iommu_ops != NULL;
1217}
1218EXPORT_SYMBOL_GPL(iommu_present);
1219
1220bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
1221{
1222        if (!bus->iommu_ops || !bus->iommu_ops->capable)
1223                return false;
1224
1225        return bus->iommu_ops->capable(cap);
1226}
1227EXPORT_SYMBOL_GPL(iommu_capable);
1228
1229/**
1230 * iommu_set_fault_handler() - set a fault handler for an iommu domain
1231 * @domain: iommu domain
1232 * @handler: fault handler
1233 * @token: user data, will be passed back to the fault handler
1234 *
1235 * This function should be used by IOMMU users which want to be notified
1236 * whenever an IOMMU fault happens.
1237 *
1238 * The fault handler itself should return 0 on success, and an appropriate
1239 * error code otherwise.
1240 */
1241void iommu_set_fault_handler(struct iommu_domain *domain,
1242                                        iommu_fault_handler_t handler,
1243                                        void *token)
1244{
1245        BUG_ON(!domain);
1246
1247        domain->handler = handler;
1248        domain->handler_token = token;
1249}
1250EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
1251
1252static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
1253                                                 unsigned type)
1254{
1255        struct iommu_domain *domain;
1256
1257        if (bus == NULL || bus->iommu_ops == NULL)
1258                return NULL;
1259
1260        domain = bus->iommu_ops->domain_alloc(type);
1261        if (!domain)
1262                return NULL;
1263
1264        domain->ops  = bus->iommu_ops;
1265        domain->type = type;
1266        /* Assume all sizes by default; the driver may override this later */
1267        domain->pgsize_bitmap  = bus->iommu_ops->pgsize_bitmap;
1268
1269        return domain;
1270}
1271
1272struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
1273{
1274        return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED);
1275}
1276EXPORT_SYMBOL_GPL(iommu_domain_alloc);
1277
1278void iommu_domain_free(struct iommu_domain *domain)
1279{
1280        domain->ops->domain_free(domain);
1281}
1282EXPORT_SYMBOL_GPL(iommu_domain_free);
1283
1284static int __iommu_attach_device(struct iommu_domain *domain,
1285                                 struct device *dev)
1286{
1287        int ret;
1288        if ((domain->ops->is_attach_deferred != NULL) &&
1289            domain->ops->is_attach_deferred(domain, dev))
1290                return 0;
1291
1292        if (unlikely(domain->ops->attach_dev == NULL))
1293                return -ENODEV;
1294
1295        ret = domain->ops->attach_dev(domain, dev);
1296        if (!ret)
1297                trace_attach_device_to_domain(dev);
1298        return ret;
1299}
1300
1301int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
1302{
1303        struct iommu_group *group;
1304        int ret;
1305
1306        group = iommu_group_get(dev);
1307        if (!group)
1308                return -ENODEV;
1309
1310        /*
1311         * Lock the group to make sure the device-count doesn't
1312         * change while we are attaching
1313         */
1314        mutex_lock(&group->mutex);
1315        ret = -EINVAL;
1316        if (iommu_group_device_count(group) != 1)
1317                goto out_unlock;
1318
1319        ret = __iommu_attach_group(domain, group);
1320
1321out_unlock:
1322        mutex_unlock(&group->mutex);
1323        iommu_group_put(group);
1324
1325        return ret;
1326}
1327EXPORT_SYMBOL_GPL(iommu_attach_device);
1328
1329static void __iommu_detach_device(struct iommu_domain *domain,
1330                                  struct device *dev)
1331{
1332        if ((domain->ops->is_attach_deferred != NULL) &&
1333            domain->ops->is_attach_deferred(domain, dev))
1334                return;
1335
1336        if (unlikely(domain->ops->detach_dev == NULL))
1337                return;
1338
1339        domain->ops->detach_dev(domain, dev);
1340        trace_detach_device_from_domain(dev);
1341}
1342
1343void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
1344{
1345        struct iommu_group *group;
1346
1347        group = iommu_group_get(dev);
1348        if (!group)
1349                return;
1350
1351        mutex_lock(&group->mutex);
1352        if (iommu_group_device_count(group) != 1) {
1353                WARN_ON(1);
1354                goto out_unlock;
1355        }
1356
1357        __iommu_detach_group(domain, group);
1358
1359out_unlock:
1360        mutex_unlock(&group->mutex);
1361        iommu_group_put(group);
1362}
1363EXPORT_SYMBOL_GPL(iommu_detach_device);
1364
1365struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
1366{
1367        struct iommu_domain *domain;
1368        struct iommu_group *group;
1369
1370        group = iommu_group_get(dev);
1371        if (!group)
1372                return NULL;
1373
1374        domain = group->domain;
1375
1376        iommu_group_put(group);
1377
1378        return domain;
1379}
1380EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
1381
1382/*
1383 * IOMMU groups are really the natrual working unit of the IOMMU, but
1384 * the IOMMU API works on domains and devices.  Bridge that gap by
1385 * iterating over the devices in a group.  Ideally we'd have a single
1386 * device which represents the requestor ID of the group, but we also
1387 * allow IOMMU drivers to create policy defined minimum sets, where
1388 * the physical hardware may be able to distiguish members, but we
1389 * wish to group them at a higher level (ex. untrusted multi-function
1390 * PCI devices).  Thus we attach each device.
1391 */
1392static int iommu_group_do_attach_device(struct device *dev, void *data)
1393{
1394        struct iommu_domain *domain = data;
1395
1396        return __iommu_attach_device(domain, dev);
1397}
1398
1399static int __iommu_attach_group(struct iommu_domain *domain,
1400                                struct iommu_group *group)
1401{
1402        int ret;
1403
1404        if (group->default_domain && group->domain != group->default_domain)
1405                return -EBUSY;
1406
1407        ret = __iommu_group_for_each_dev(group, domain,
1408                                         iommu_group_do_attach_device);
1409        if (ret == 0)
1410                group->domain = domain;
1411
1412        return ret;
1413}
1414
1415int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
1416{
1417        int ret;
1418
1419        mutex_lock(&group->mutex);
1420        ret = __iommu_attach_group(domain, group);
1421        mutex_unlock(&group->mutex);
1422
1423        return ret;
1424}
1425EXPORT_SYMBOL_GPL(iommu_attach_group);
1426
1427static int iommu_group_do_detach_device(struct device *dev, void *data)
1428{
1429        struct iommu_domain *domain = data;
1430
1431        __iommu_detach_device(domain, dev);
1432
1433        return 0;
1434}
1435
1436static void __iommu_detach_group(struct iommu_domain *domain,
1437                                 struct iommu_group *group)
1438{
1439        int ret;
1440
1441        if (!group->default_domain) {
1442                __iommu_group_for_each_dev(group, domain,
1443                                           iommu_group_do_detach_device);
1444                group->domain = NULL;
1445                return;
1446        }
1447
1448        if (group->domain == group->default_domain)
1449                return;
1450
1451        /* Detach by re-attaching to the default domain */
1452        ret = __iommu_group_for_each_dev(group, group->default_domain,
1453                                         iommu_group_do_attach_device);
1454        if (ret != 0)
1455                WARN_ON(1);
1456        else
1457                group->domain = group->default_domain;
1458}
1459
1460void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
1461{
1462        mutex_lock(&group->mutex);
1463        __iommu_detach_group(domain, group);
1464        mutex_unlock(&group->mutex);
1465}
1466EXPORT_SYMBOL_GPL(iommu_detach_group);
1467
1468phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1469{
1470        if (unlikely(domain->ops->iova_to_phys == NULL))
1471                return 0;
1472
1473        return domain->ops->iova_to_phys(domain, iova);
1474}
1475EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
1476
1477static size_t iommu_pgsize(struct iommu_domain *domain,
1478                           unsigned long addr_merge, size_t size)
1479{
1480        unsigned int pgsize_idx;
1481        size_t pgsize;
1482
1483        /* Max page size that still fits into 'size' */
1484        pgsize_idx = __fls(size);
1485
1486        /* need to consider alignment requirements ? */
1487        if (likely(addr_merge)) {
1488                /* Max page size allowed by address */
1489                unsigned int align_pgsize_idx = __ffs(addr_merge);
1490                pgsize_idx = min(pgsize_idx, align_pgsize_idx);
1491        }
1492
1493        /* build a mask of acceptable page sizes */
1494        pgsize = (1UL << (pgsize_idx + 1)) - 1;
1495
1496        /* throw away page sizes not supported by the hardware */
1497        pgsize &= domain->pgsize_bitmap;
1498
1499        /* make sure we're still sane */
1500        BUG_ON(!pgsize);
1501
1502        /* pick the biggest page */
1503        pgsize_idx = __fls(pgsize);
1504        pgsize = 1UL << pgsize_idx;
1505
1506        return pgsize;
1507}
1508
1509int iommu_map(struct iommu_domain *domain, unsigned long iova,
1510              phys_addr_t paddr, size_t size, int prot)
1511{
1512        unsigned long orig_iova = iova;
1513        unsigned int min_pagesz;
1514        size_t orig_size = size;
1515        phys_addr_t orig_paddr = paddr;
1516        int ret = 0;
1517
1518        if (unlikely(domain->ops->map == NULL ||
1519                     domain->pgsize_bitmap == 0UL))
1520                return -ENODEV;
1521
1522        if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
1523                return -EINVAL;
1524
1525        /* find out the minimum page size supported */
1526        min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
1527
1528        /*
1529         * both the virtual address and the physical one, as well as
1530         * the size of the mapping, must be aligned (at least) to the
1531         * size of the smallest page supported by the hardware
1532         */
1533        if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
1534                pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
1535                       iova, &paddr, size, min_pagesz);
1536                return -EINVAL;
1537        }
1538
1539        pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
1540
1541        while (size) {
1542                size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
1543
1544                pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
1545                         iova, &paddr, pgsize);
1546
1547                ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
1548                if (ret)
1549                        break;
1550
1551                iova += pgsize;
1552                paddr += pgsize;
1553                size -= pgsize;
1554        }
1555
1556        /* unroll mapping in case something went wrong */
1557        if (ret)
1558                iommu_unmap(domain, orig_iova, orig_size - size);
1559        else
1560                trace_map(orig_iova, orig_paddr, orig_size);
1561
1562        return ret;
1563}
1564EXPORT_SYMBOL_GPL(iommu_map);
1565
1566static size_t __iommu_unmap(struct iommu_domain *domain,
1567                            unsigned long iova, size_t size,
1568                            bool sync)
1569{
1570        const struct iommu_ops *ops = domain->ops;
1571        size_t unmapped_page, unmapped = 0;
1572        unsigned long orig_iova = iova;
1573        unsigned int min_pagesz;
1574
1575        if (unlikely(ops->unmap == NULL ||
1576                     domain->pgsize_bitmap == 0UL))
1577                return 0;
1578
1579        if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
1580                return 0;
1581
1582        /* find out the minimum page size supported */
1583        min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
1584
1585        /*
1586         * The virtual address, as well as the size of the mapping, must be
1587         * aligned (at least) to the size of the smallest page supported
1588         * by the hardware
1589         */
1590        if (!IS_ALIGNED(iova | size, min_pagesz)) {
1591                pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
1592                       iova, size, min_pagesz);
1593                return 0;
1594        }
1595
1596        pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
1597
1598        /*
1599         * Keep iterating until we either unmap 'size' bytes (or more)
1600         * or we hit an area that isn't mapped.
1601         */
1602        while (unmapped < size) {
1603                size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
1604
1605                unmapped_page = ops->unmap(domain, iova, pgsize);
1606                if (!unmapped_page)
1607                        break;
1608
1609                if (sync && ops->iotlb_range_add)
1610                        ops->iotlb_range_add(domain, iova, pgsize);
1611
1612                pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
1613                         iova, unmapped_page);
1614
1615                iova += unmapped_page;
1616                unmapped += unmapped_page;
1617        }
1618
1619        if (sync && ops->iotlb_sync)
1620                ops->iotlb_sync(domain);
1621
1622        trace_unmap(orig_iova, size, unmapped);
1623        return unmapped;
1624}
1625
1626size_t iommu_unmap(struct iommu_domain *domain,
1627                   unsigned long iova, size_t size)
1628{
1629        return __iommu_unmap(domain, iova, size, true);
1630}
1631EXPORT_SYMBOL_GPL(iommu_unmap);
1632
1633size_t iommu_unmap_fast(struct iommu_domain *domain,
1634                        unsigned long iova, size_t size)
1635{
1636        return __iommu_unmap(domain, iova, size, false);
1637}
1638EXPORT_SYMBOL_GPL(iommu_unmap_fast);
1639
1640size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
1641                         struct scatterlist *sg, unsigned int nents, int prot)
1642{
1643        struct scatterlist *s;
1644        size_t mapped = 0;
1645        unsigned int i, min_pagesz;
1646        int ret;
1647
1648        if (unlikely(domain->pgsize_bitmap == 0UL))
1649                return 0;
1650
1651        min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
1652
1653        for_each_sg(sg, s, nents, i) {
1654                phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
1655
1656                /*
1657                 * We are mapping on IOMMU page boundaries, so offset within
1658                 * the page must be 0. However, the IOMMU may support pages
1659                 * smaller than PAGE_SIZE, so s->offset may still represent
1660                 * an offset of that boundary within the CPU page.
1661                 */
1662                if (!IS_ALIGNED(s->offset, min_pagesz))
1663                        goto out_err;
1664
1665                ret = iommu_map(domain, iova + mapped, phys, s->length, prot);
1666                if (ret)
1667                        goto out_err;
1668
1669                mapped += s->length;
1670        }
1671
1672        return mapped;
1673
1674out_err:
1675        /* undo mappings already done */
1676        iommu_unmap(domain, iova, mapped);
1677
1678        return 0;
1679
1680}
1681EXPORT_SYMBOL_GPL(default_iommu_map_sg);
1682
1683int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
1684                               phys_addr_t paddr, u64 size, int prot)
1685{
1686        if (unlikely(domain->ops->domain_window_enable == NULL))
1687                return -ENODEV;
1688
1689        return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size,
1690                                                 prot);
1691}
1692EXPORT_SYMBOL_GPL(iommu_domain_window_enable);
1693
1694void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
1695{
1696        if (unlikely(domain->ops->domain_window_disable == NULL))
1697                return;
1698
1699        return domain->ops->domain_window_disable(domain, wnd_nr);
1700}
1701EXPORT_SYMBOL_GPL(iommu_domain_window_disable);
1702
1703/**
1704 * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
1705 * @domain: the iommu domain where the fault has happened
1706 * @dev: the device where the fault has happened
1707 * @iova: the faulting address
1708 * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
1709 *
1710 * This function should be called by the low-level IOMMU implementations
1711 * whenever IOMMU faults happen, to allow high-level users, that are
1712 * interested in such events, to know about them.
1713 *
1714 * This event may be useful for several possible use cases:
1715 * - mere logging of the event
1716 * - dynamic TLB/PTE loading
1717 * - if restarting of the faulting device is required
1718 *
1719 * Returns 0 on success and an appropriate error code otherwise (if dynamic
1720 * PTE/TLB loading will one day be supported, implementations will be able
1721 * to tell whether it succeeded or not according to this return value).
1722 *
1723 * Specifically, -ENOSYS is returned if a fault handler isn't installed
1724 * (though fault handlers can also return -ENOSYS, in case they want to
1725 * elicit the default behavior of the IOMMU drivers).
1726 */
1727int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
1728                       unsigned long iova, int flags)
1729{
1730        int ret = -ENOSYS;
1731
1732        /*
1733         * if upper layers showed interest and installed a fault handler,
1734         * invoke it.
1735         */
1736        if (domain->handler)
1737                ret = domain->handler(domain, dev, iova, flags,
1738                                                domain->handler_token);
1739
1740        trace_io_page_fault(dev, iova, flags);
1741        return ret;
1742}
1743EXPORT_SYMBOL_GPL(report_iommu_fault);
1744
1745static int __init iommu_init(void)
1746{
1747        iommu_group_kset = kset_create_and_add("iommu_groups",
1748                                               NULL, kernel_kobj);
1749        BUG_ON(!iommu_group_kset);
1750
1751        return 0;
1752}
1753core_initcall(iommu_init);
1754
1755int iommu_domain_get_attr(struct iommu_domain *domain,
1756                          enum iommu_attr attr, void *data)
1757{
1758        struct iommu_domain_geometry *geometry;
1759        bool *paging;
1760        int ret = 0;
1761        u32 *count;
1762
1763        switch (attr) {
1764        case DOMAIN_ATTR_GEOMETRY:
1765                geometry  = data;
1766                *geometry = domain->geometry;
1767
1768                break;
1769        case DOMAIN_ATTR_PAGING:
1770                paging  = data;
1771                *paging = (domain->pgsize_bitmap != 0UL);
1772                break;
1773        case DOMAIN_ATTR_WINDOWS:
1774                count = data;
1775
1776                if (domain->ops->domain_get_windows != NULL)
1777                        *count = domain->ops->domain_get_windows(domain);
1778                else
1779                        ret = -ENODEV;
1780
1781                break;
1782        default:
1783                if (!domain->ops->domain_get_attr)
1784                        return -EINVAL;
1785
1786                ret = domain->ops->domain_get_attr(domain, attr, data);
1787        }
1788
1789        return ret;
1790}
1791EXPORT_SYMBOL_GPL(iommu_domain_get_attr);
1792
1793int iommu_domain_set_attr(struct iommu_domain *domain,
1794                          enum iommu_attr attr, void *data)
1795{
1796        int ret = 0;
1797        u32 *count;
1798
1799        switch (attr) {
1800        case DOMAIN_ATTR_WINDOWS:
1801                count = data;
1802
1803                if (domain->ops->domain_set_windows != NULL)
1804                        ret = domain->ops->domain_set_windows(domain, *count);
1805                else
1806                        ret = -ENODEV;
1807
1808                break;
1809        default:
1810                if (domain->ops->domain_set_attr == NULL)
1811                        return -EINVAL;
1812
1813                ret = domain->ops->domain_set_attr(domain, attr, data);
1814        }
1815
1816        return ret;
1817}
1818EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
1819
1820void iommu_get_resv_regions(struct device *dev, struct list_head *list)
1821{
1822        const struct iommu_ops *ops = dev->bus->iommu_ops;
1823
1824        if (ops && ops->get_resv_regions)
1825                ops->get_resv_regions(dev, list);
1826}
1827
1828void iommu_put_resv_regions(struct device *dev, struct list_head *list)
1829{
1830        const struct iommu_ops *ops = dev->bus->iommu_ops;
1831
1832        if (ops && ops->put_resv_regions)
1833                ops->put_resv_regions(dev, list);
1834}
1835
1836struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
1837                                                  size_t length, int prot,
1838                                                  enum iommu_resv_type type)
1839{
1840        struct iommu_resv_region *region;
1841
1842        region = kzalloc(sizeof(*region), GFP_KERNEL);
1843        if (!region)
1844                return NULL;
1845
1846        INIT_LIST_HEAD(&region->list);
1847        region->start = start;
1848        region->length = length;
1849        region->prot = prot;
1850        region->type = type;
1851        return region;
1852}
1853
1854/* Request that a device is direct mapped by the IOMMU */
1855int iommu_request_dm_for_dev(struct device *dev)
1856{
1857        struct iommu_domain *dm_domain;
1858        struct iommu_group *group;
1859        int ret;
1860
1861        /* Device must already be in a group before calling this function */
1862        group = iommu_group_get_for_dev(dev);
1863        if (IS_ERR(group))
1864                return PTR_ERR(group);
1865
1866        mutex_lock(&group->mutex);
1867
1868        /* Check if the default domain is already direct mapped */
1869        ret = 0;
1870        if (group->default_domain &&
1871            group->default_domain->type == IOMMU_DOMAIN_IDENTITY)
1872                goto out;
1873
1874        /* Don't change mappings of existing devices */
1875        ret = -EBUSY;
1876        if (iommu_group_device_count(group) != 1)
1877                goto out;
1878
1879        /* Allocate a direct mapped domain */
1880        ret = -ENOMEM;
1881        dm_domain = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_IDENTITY);
1882        if (!dm_domain)
1883                goto out;
1884
1885        /* Attach the device to the domain */
1886        ret = __iommu_attach_group(dm_domain, group);
1887        if (ret) {
1888                iommu_domain_free(dm_domain);
1889                goto out;
1890        }
1891
1892        /* Make the direct mapped domain the default for this group */
1893        if (group->default_domain)
1894                iommu_domain_free(group->default_domain);
1895        group->default_domain = dm_domain;
1896
1897        pr_info("Using direct mapping for device %s\n", dev_name(dev));
1898
1899        ret = 0;
1900out:
1901        mutex_unlock(&group->mutex);
1902        iommu_group_put(group);
1903
1904        return ret;
1905}
1906
1907const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
1908{
1909        const struct iommu_ops *ops = NULL;
1910        struct iommu_device *iommu;
1911
1912        spin_lock(&iommu_device_lock);
1913        list_for_each_entry(iommu, &iommu_device_list, list)
1914                if (iommu->fwnode == fwnode) {
1915                        ops = iommu->ops;
1916                        break;
1917                }
1918        spin_unlock(&iommu_device_lock);
1919        return ops;
1920}
1921
1922int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
1923                      const struct iommu_ops *ops)
1924{
1925        struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1926
1927        if (fwspec)
1928                return ops == fwspec->ops ? 0 : -EINVAL;
1929
1930        fwspec = kzalloc(sizeof(*fwspec), GFP_KERNEL);
1931        if (!fwspec)
1932                return -ENOMEM;
1933
1934        of_node_get(to_of_node(iommu_fwnode));
1935        fwspec->iommu_fwnode = iommu_fwnode;
1936        fwspec->ops = ops;
1937        dev->iommu_fwspec = fwspec;
1938        return 0;
1939}
1940EXPORT_SYMBOL_GPL(iommu_fwspec_init);
1941
1942void iommu_fwspec_free(struct device *dev)
1943{
1944        struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1945
1946        if (fwspec) {
1947                fwnode_handle_put(fwspec->iommu_fwnode);
1948                kfree(fwspec);
1949                dev->iommu_fwspec = NULL;
1950        }
1951}
1952EXPORT_SYMBOL_GPL(iommu_fwspec_free);
1953
1954int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
1955{
1956        struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1957        size_t size;
1958        int i;
1959
1960        if (!fwspec)
1961                return -EINVAL;
1962
1963        size = offsetof(struct iommu_fwspec, ids[fwspec->num_ids + num_ids]);
1964        if (size > sizeof(*fwspec)) {
1965                fwspec = krealloc(dev->iommu_fwspec, size, GFP_KERNEL);
1966                if (!fwspec)
1967                        return -ENOMEM;
1968
1969                dev->iommu_fwspec = fwspec;
1970        }
1971
1972        for (i = 0; i < num_ids; i++)
1973                fwspec->ids[fwspec->num_ids + i] = ids[i];
1974
1975        fwspec->num_ids += num_ids;
1976        return 0;
1977}
1978EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
1979