linux/drivers/iommu/iommu.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
   3 * Author: Joerg Roedel <jroedel@suse.de>
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms of the GNU General Public License version 2 as published
   7 * by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write to the Free Software
  16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  17 */
  18
  19#define pr_fmt(fmt)    "iommu: " fmt
  20
  21#include <linux/device.h>
  22#include <linux/kernel.h>
  23#include <linux/bug.h>
  24#include <linux/types.h>
  25#include <linux/module.h>
  26#include <linux/slab.h>
  27#include <linux/errno.h>
  28#include <linux/iommu.h>
  29#include <linux/idr.h>
  30#include <linux/notifier.h>
  31#include <linux/err.h>
  32#include <linux/pci.h>
  33#include <linux/bitops.h>
  34#include <linux/property.h>
  35#include <trace/events/iommu.h>
  36
  37static struct kset *iommu_group_kset;
  38static DEFINE_IDA(iommu_group_ida);
  39
  40struct iommu_callback_data {
  41        const struct iommu_ops *ops;
  42};
  43
  44struct iommu_group {
  45        struct kobject kobj;
  46        struct kobject *devices_kobj;
  47        struct list_head devices;
  48        struct mutex mutex;
  49        struct blocking_notifier_head notifier;
  50        void *iommu_data;
  51        void (*iommu_data_release)(void *iommu_data);
  52        char *name;
  53        int id;
  54        struct iommu_domain *default_domain;
  55        struct iommu_domain *domain;
  56};
  57
  58struct iommu_device {
  59        struct list_head list;
  60        struct device *dev;
  61        char *name;
  62};
  63
  64struct iommu_group_attribute {
  65        struct attribute attr;
  66        ssize_t (*show)(struct iommu_group *group, char *buf);
  67        ssize_t (*store)(struct iommu_group *group,
  68                         const char *buf, size_t count);
  69};
  70
  71#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store)           \
  72struct iommu_group_attribute iommu_group_attr_##_name =         \
  73        __ATTR(_name, _mode, _show, _store)
  74
  75#define to_iommu_group_attr(_attr)      \
  76        container_of(_attr, struct iommu_group_attribute, attr)
  77#define to_iommu_group(_kobj)           \
  78        container_of(_kobj, struct iommu_group, kobj)
  79
  80static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
  81                                                 unsigned type);
  82static int __iommu_attach_device(struct iommu_domain *domain,
  83                                 struct device *dev);
  84static int __iommu_attach_group(struct iommu_domain *domain,
  85                                struct iommu_group *group);
  86static void __iommu_detach_group(struct iommu_domain *domain,
  87                                 struct iommu_group *group);
  88
  89static ssize_t iommu_group_attr_show(struct kobject *kobj,
  90                                     struct attribute *__attr, char *buf)
  91{
  92        struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
  93        struct iommu_group *group = to_iommu_group(kobj);
  94        ssize_t ret = -EIO;
  95
  96        if (attr->show)
  97                ret = attr->show(group, buf);
  98        return ret;
  99}
 100
 101static ssize_t iommu_group_attr_store(struct kobject *kobj,
 102                                      struct attribute *__attr,
 103                                      const char *buf, size_t count)
 104{
 105        struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
 106        struct iommu_group *group = to_iommu_group(kobj);
 107        ssize_t ret = -EIO;
 108
 109        if (attr->store)
 110                ret = attr->store(group, buf, count);
 111        return ret;
 112}
 113
 114static const struct sysfs_ops iommu_group_sysfs_ops = {
 115        .show = iommu_group_attr_show,
 116        .store = iommu_group_attr_store,
 117};
 118
 119static int iommu_group_create_file(struct iommu_group *group,
 120                                   struct iommu_group_attribute *attr)
 121{
 122        return sysfs_create_file(&group->kobj, &attr->attr);
 123}
 124
 125static void iommu_group_remove_file(struct iommu_group *group,
 126                                    struct iommu_group_attribute *attr)
 127{
 128        sysfs_remove_file(&group->kobj, &attr->attr);
 129}
 130
 131static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
 132{
 133        return sprintf(buf, "%s\n", group->name);
 134}
 135
 136static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
 137
 138static void iommu_group_release(struct kobject *kobj)
 139{
 140        struct iommu_group *group = to_iommu_group(kobj);
 141
 142        pr_debug("Releasing group %d\n", group->id);
 143
 144        if (group->iommu_data_release)
 145                group->iommu_data_release(group->iommu_data);
 146
 147        ida_simple_remove(&iommu_group_ida, group->id);
 148
 149        if (group->default_domain)
 150                iommu_domain_free(group->default_domain);
 151
 152        kfree(group->name);
 153        kfree(group);
 154}
 155
 156static struct kobj_type iommu_group_ktype = {
 157        .sysfs_ops = &iommu_group_sysfs_ops,
 158        .release = iommu_group_release,
 159};
 160
 161/**
 162 * iommu_group_alloc - Allocate a new group
 163 * @name: Optional name to associate with group, visible in sysfs
 164 *
 165 * This function is called by an iommu driver to allocate a new iommu
 166 * group.  The iommu group represents the minimum granularity of the iommu.
 167 * Upon successful return, the caller holds a reference to the supplied
 168 * group in order to hold the group until devices are added.  Use
 169 * iommu_group_put() to release this extra reference count, allowing the
 170 * group to be automatically reclaimed once it has no devices or external
 171 * references.
 172 */
 173struct iommu_group *iommu_group_alloc(void)
 174{
 175        struct iommu_group *group;
 176        int ret;
 177
 178        group = kzalloc(sizeof(*group), GFP_KERNEL);
 179        if (!group)
 180                return ERR_PTR(-ENOMEM);
 181
 182        group->kobj.kset = iommu_group_kset;
 183        mutex_init(&group->mutex);
 184        INIT_LIST_HEAD(&group->devices);
 185        BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
 186
 187        ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL);
 188        if (ret < 0) {
 189                kfree(group);
 190                return ERR_PTR(ret);
 191        }
 192        group->id = ret;
 193
 194        ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
 195                                   NULL, "%d", group->id);
 196        if (ret) {
 197                ida_simple_remove(&iommu_group_ida, group->id);
 198                kfree(group);
 199                return ERR_PTR(ret);
 200        }
 201
 202        group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
 203        if (!group->devices_kobj) {
 204                kobject_put(&group->kobj); /* triggers .release & free */
 205                return ERR_PTR(-ENOMEM);
 206        }
 207
 208        /*
 209         * The devices_kobj holds a reference on the group kobject, so
 210         * as long as that exists so will the group.  We can therefore
 211         * use the devices_kobj for reference counting.
 212         */
 213        kobject_put(&group->kobj);
 214
 215        pr_debug("Allocated group %d\n", group->id);
 216
 217        return group;
 218}
 219EXPORT_SYMBOL_GPL(iommu_group_alloc);
 220
 221struct iommu_group *iommu_group_get_by_id(int id)
 222{
 223        struct kobject *group_kobj;
 224        struct iommu_group *group;
 225        const char *name;
 226
 227        if (!iommu_group_kset)
 228                return NULL;
 229
 230        name = kasprintf(GFP_KERNEL, "%d", id);
 231        if (!name)
 232                return NULL;
 233
 234        group_kobj = kset_find_obj(iommu_group_kset, name);
 235        kfree(name);
 236
 237        if (!group_kobj)
 238                return NULL;
 239
 240        group = container_of(group_kobj, struct iommu_group, kobj);
 241        BUG_ON(group->id != id);
 242
 243        kobject_get(group->devices_kobj);
 244        kobject_put(&group->kobj);
 245
 246        return group;
 247}
 248EXPORT_SYMBOL_GPL(iommu_group_get_by_id);
 249
 250/**
 251 * iommu_group_get_iommudata - retrieve iommu_data registered for a group
 252 * @group: the group
 253 *
 254 * iommu drivers can store data in the group for use when doing iommu
 255 * operations.  This function provides a way to retrieve it.  Caller
 256 * should hold a group reference.
 257 */
 258void *iommu_group_get_iommudata(struct iommu_group *group)
 259{
 260        return group->iommu_data;
 261}
 262EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
 263
 264/**
 265 * iommu_group_set_iommudata - set iommu_data for a group
 266 * @group: the group
 267 * @iommu_data: new data
 268 * @release: release function for iommu_data
 269 *
 270 * iommu drivers can store data in the group for use when doing iommu
 271 * operations.  This function provides a way to set the data after
 272 * the group has been allocated.  Caller should hold a group reference.
 273 */
 274void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
 275                               void (*release)(void *iommu_data))
 276{
 277        group->iommu_data = iommu_data;
 278        group->iommu_data_release = release;
 279}
 280EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
 281
 282/**
 283 * iommu_group_set_name - set name for a group
 284 * @group: the group
 285 * @name: name
 286 *
 287 * Allow iommu driver to set a name for a group.  When set it will
 288 * appear in a name attribute file under the group in sysfs.
 289 */
 290int iommu_group_set_name(struct iommu_group *group, const char *name)
 291{
 292        int ret;
 293
 294        if (group->name) {
 295                iommu_group_remove_file(group, &iommu_group_attr_name);
 296                kfree(group->name);
 297                group->name = NULL;
 298                if (!name)
 299                        return 0;
 300        }
 301
 302        group->name = kstrdup(name, GFP_KERNEL);
 303        if (!group->name)
 304                return -ENOMEM;
 305
 306        ret = iommu_group_create_file(group, &iommu_group_attr_name);
 307        if (ret) {
 308                kfree(group->name);
 309                group->name = NULL;
 310                return ret;
 311        }
 312
 313        return 0;
 314}
 315EXPORT_SYMBOL_GPL(iommu_group_set_name);
 316
 317static int iommu_group_create_direct_mappings(struct iommu_group *group,
 318                                              struct device *dev)
 319{
 320        struct iommu_domain *domain = group->default_domain;
 321        struct iommu_dm_region *entry;
 322        struct list_head mappings;
 323        unsigned long pg_size;
 324        int ret = 0;
 325
 326        if (!domain || domain->type != IOMMU_DOMAIN_DMA)
 327                return 0;
 328
 329        BUG_ON(!domain->pgsize_bitmap);
 330
 331        pg_size = 1UL << __ffs(domain->pgsize_bitmap);
 332        INIT_LIST_HEAD(&mappings);
 333
 334        iommu_get_dm_regions(dev, &mappings);
 335
 336        /* We need to consider overlapping regions for different devices */
 337        list_for_each_entry(entry, &mappings, list) {
 338                dma_addr_t start, end, addr;
 339
 340                if (domain->ops->apply_dm_region)
 341                        domain->ops->apply_dm_region(dev, domain, entry);
 342
 343                start = ALIGN(entry->start, pg_size);
 344                end   = ALIGN(entry->start + entry->length, pg_size);
 345
 346                for (addr = start; addr < end; addr += pg_size) {
 347                        phys_addr_t phys_addr;
 348
 349                        phys_addr = iommu_iova_to_phys(domain, addr);
 350                        if (phys_addr)
 351                                continue;
 352
 353                        ret = iommu_map(domain, addr, addr, pg_size, entry->prot);
 354                        if (ret)
 355                                goto out;
 356                }
 357
 358        }
 359
 360out:
 361        iommu_put_dm_regions(dev, &mappings);
 362
 363        return ret;
 364}
 365
 366/**
 367 * iommu_group_add_device - add a device to an iommu group
 368 * @group: the group into which to add the device (reference should be held)
 369 * @dev: the device
 370 *
 371 * This function is called by an iommu driver to add a device into a
 372 * group.  Adding a device increments the group reference count.
 373 */
 374int iommu_group_add_device(struct iommu_group *group, struct device *dev)
 375{
 376        int ret, i = 0;
 377        struct iommu_device *device;
 378
 379        device = kzalloc(sizeof(*device), GFP_KERNEL);
 380        if (!device)
 381                return -ENOMEM;
 382
 383        device->dev = dev;
 384
 385        ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
 386        if (ret) {
 387                kfree(device);
 388                return ret;
 389        }
 390
 391        device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
 392rename:
 393        if (!device->name) {
 394                sysfs_remove_link(&dev->kobj, "iommu_group");
 395                kfree(device);
 396                return -ENOMEM;
 397        }
 398
 399        ret = sysfs_create_link_nowarn(group->devices_kobj,
 400                                       &dev->kobj, device->name);
 401        if (ret) {
 402                kfree(device->name);
 403                if (ret == -EEXIST && i >= 0) {
 404                        /*
 405                         * Account for the slim chance of collision
 406                         * and append an instance to the name.
 407                         */
 408                        device->name = kasprintf(GFP_KERNEL, "%s.%d",
 409                                                 kobject_name(&dev->kobj), i++);
 410                        goto rename;
 411                }
 412
 413                sysfs_remove_link(&dev->kobj, "iommu_group");
 414                kfree(device);
 415                return ret;
 416        }
 417
 418        kobject_get(group->devices_kobj);
 419
 420        dev->iommu_group = group;
 421
 422        iommu_group_create_direct_mappings(group, dev);
 423
 424        mutex_lock(&group->mutex);
 425        list_add_tail(&device->list, &group->devices);
 426        if (group->domain)
 427                __iommu_attach_device(group->domain, dev);
 428        mutex_unlock(&group->mutex);
 429
 430        /* Notify any listeners about change to group. */
 431        blocking_notifier_call_chain(&group->notifier,
 432                                     IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
 433
 434        trace_add_device_to_group(group->id, dev);
 435
 436        pr_info("Adding device %s to group %d\n", dev_name(dev), group->id);
 437
 438        return 0;
 439}
 440EXPORT_SYMBOL_GPL(iommu_group_add_device);
 441
 442/**
 443 * iommu_group_remove_device - remove a device from it's current group
 444 * @dev: device to be removed
 445 *
 446 * This function is called by an iommu driver to remove the device from
 447 * it's current group.  This decrements the iommu group reference count.
 448 */
 449void iommu_group_remove_device(struct device *dev)
 450{
 451        struct iommu_group *group = dev->iommu_group;
 452        struct iommu_device *tmp_device, *device = NULL;
 453
 454        pr_info("Removing device %s from group %d\n", dev_name(dev), group->id);
 455
 456        /* Pre-notify listeners that a device is being removed. */
 457        blocking_notifier_call_chain(&group->notifier,
 458                                     IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
 459
 460        mutex_lock(&group->mutex);
 461        list_for_each_entry(tmp_device, &group->devices, list) {
 462                if (tmp_device->dev == dev) {
 463                        device = tmp_device;
 464                        list_del(&device->list);
 465                        break;
 466                }
 467        }
 468        mutex_unlock(&group->mutex);
 469
 470        if (!device)
 471                return;
 472
 473        sysfs_remove_link(group->devices_kobj, device->name);
 474        sysfs_remove_link(&dev->kobj, "iommu_group");
 475
 476        trace_remove_device_from_group(group->id, dev);
 477
 478        kfree(device->name);
 479        kfree(device);
 480        dev->iommu_group = NULL;
 481        kobject_put(group->devices_kobj);
 482}
 483EXPORT_SYMBOL_GPL(iommu_group_remove_device);
 484
 485static int iommu_group_device_count(struct iommu_group *group)
 486{
 487        struct iommu_device *entry;
 488        int ret = 0;
 489
 490        list_for_each_entry(entry, &group->devices, list)
 491                ret++;
 492
 493        return ret;
 494}
 495
 496/**
 497 * iommu_group_for_each_dev - iterate over each device in the group
 498 * @group: the group
 499 * @data: caller opaque data to be passed to callback function
 500 * @fn: caller supplied callback function
 501 *
 502 * This function is called by group users to iterate over group devices.
 503 * Callers should hold a reference count to the group during callback.
 504 * The group->mutex is held across callbacks, which will block calls to
 505 * iommu_group_add/remove_device.
 506 */
 507static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
 508                                      int (*fn)(struct device *, void *))
 509{
 510        struct iommu_device *device;
 511        int ret = 0;
 512
 513        list_for_each_entry(device, &group->devices, list) {
 514                ret = fn(device->dev, data);
 515                if (ret)
 516                        break;
 517        }
 518        return ret;
 519}
 520
 521
 522int iommu_group_for_each_dev(struct iommu_group *group, void *data,
 523                             int (*fn)(struct device *, void *))
 524{
 525        int ret;
 526
 527        mutex_lock(&group->mutex);
 528        ret = __iommu_group_for_each_dev(group, data, fn);
 529        mutex_unlock(&group->mutex);
 530
 531        return ret;
 532}
 533EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
 534
 535/**
 536 * iommu_group_get - Return the group for a device and increment reference
 537 * @dev: get the group that this device belongs to
 538 *
 539 * This function is called by iommu drivers and users to get the group
 540 * for the specified device.  If found, the group is returned and the group
 541 * reference in incremented, else NULL.
 542 */
 543struct iommu_group *iommu_group_get(struct device *dev)
 544{
 545        struct iommu_group *group = dev->iommu_group;
 546
 547        if (group)
 548                kobject_get(group->devices_kobj);
 549
 550        return group;
 551}
 552EXPORT_SYMBOL_GPL(iommu_group_get);
 553
 554/**
 555 * iommu_group_put - Decrement group reference
 556 * @group: the group to use
 557 *
 558 * This function is called by iommu drivers and users to release the
 559 * iommu group.  Once the reference count is zero, the group is released.
 560 */
 561void iommu_group_put(struct iommu_group *group)
 562{
 563        if (group)
 564                kobject_put(group->devices_kobj);
 565}
 566EXPORT_SYMBOL_GPL(iommu_group_put);
 567
 568/**
 569 * iommu_group_register_notifier - Register a notifier for group changes
 570 * @group: the group to watch
 571 * @nb: notifier block to signal
 572 *
 573 * This function allows iommu group users to track changes in a group.
 574 * See include/linux/iommu.h for actions sent via this notifier.  Caller
 575 * should hold a reference to the group throughout notifier registration.
 576 */
 577int iommu_group_register_notifier(struct iommu_group *group,
 578                                  struct notifier_block *nb)
 579{
 580        return blocking_notifier_chain_register(&group->notifier, nb);
 581}
 582EXPORT_SYMBOL_GPL(iommu_group_register_notifier);
 583
 584/**
 585 * iommu_group_unregister_notifier - Unregister a notifier
 586 * @group: the group to watch
 587 * @nb: notifier block to signal
 588 *
 589 * Unregister a previously registered group notifier block.
 590 */
 591int iommu_group_unregister_notifier(struct iommu_group *group,
 592                                    struct notifier_block *nb)
 593{
 594        return blocking_notifier_chain_unregister(&group->notifier, nb);
 595}
 596EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
 597
 598/**
 599 * iommu_group_id - Return ID for a group
 600 * @group: the group to ID
 601 *
 602 * Return the unique ID for the group matching the sysfs group number.
 603 */
 604int iommu_group_id(struct iommu_group *group)
 605{
 606        return group->id;
 607}
 608EXPORT_SYMBOL_GPL(iommu_group_id);
 609
 610static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
 611                                               unsigned long *devfns);
 612
 613/*
 614 * To consider a PCI device isolated, we require ACS to support Source
 615 * Validation, Request Redirection, Completer Redirection, and Upstream
 616 * Forwarding.  This effectively means that devices cannot spoof their
 617 * requester ID, requests and completions cannot be redirected, and all
 618 * transactions are forwarded upstream, even as it passes through a
 619 * bridge where the target device is downstream.
 620 */
 621#define REQ_ACS_FLAGS   (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
 622
 623/*
 624 * For multifunction devices which are not isolated from each other, find
 625 * all the other non-isolated functions and look for existing groups.  For
 626 * each function, we also need to look for aliases to or from other devices
 627 * that may already have a group.
 628 */
 629static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev,
 630                                                        unsigned long *devfns)
 631{
 632        struct pci_dev *tmp = NULL;
 633        struct iommu_group *group;
 634
 635        if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
 636                return NULL;
 637
 638        for_each_pci_dev(tmp) {
 639                if (tmp == pdev || tmp->bus != pdev->bus ||
 640                    PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
 641                    pci_acs_enabled(tmp, REQ_ACS_FLAGS))
 642                        continue;
 643
 644                group = get_pci_alias_group(tmp, devfns);
 645                if (group) {
 646                        pci_dev_put(tmp);
 647                        return group;
 648                }
 649        }
 650
 651        return NULL;
 652}
 653
 654/*
 655 * Look for aliases to or from the given device for existing groups. DMA
 656 * aliases are only supported on the same bus, therefore the search
 657 * space is quite small (especially since we're really only looking at pcie
 658 * device, and therefore only expect multiple slots on the root complex or
 659 * downstream switch ports).  It's conceivable though that a pair of
 660 * multifunction devices could have aliases between them that would cause a
 661 * loop.  To prevent this, we use a bitmap to track where we've been.
 662 */
 663static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
 664                                               unsigned long *devfns)
 665{
 666        struct pci_dev *tmp = NULL;
 667        struct iommu_group *group;
 668
 669        if (test_and_set_bit(pdev->devfn & 0xff, devfns))
 670                return NULL;
 671
 672        group = iommu_group_get(&pdev->dev);
 673        if (group)
 674                return group;
 675
 676        for_each_pci_dev(tmp) {
 677                if (tmp == pdev || tmp->bus != pdev->bus)
 678                        continue;
 679
 680                /* We alias them or they alias us */
 681                if (pci_devs_are_dma_aliases(pdev, tmp)) {
 682                        group = get_pci_alias_group(tmp, devfns);
 683                        if (group) {
 684                                pci_dev_put(tmp);
 685                                return group;
 686                        }
 687
 688                        group = get_pci_function_alias_group(tmp, devfns);
 689                        if (group) {
 690                                pci_dev_put(tmp);
 691                                return group;
 692                        }
 693                }
 694        }
 695
 696        return NULL;
 697}
 698
 699struct group_for_pci_data {
 700        struct pci_dev *pdev;
 701        struct iommu_group *group;
 702};
 703
 704/*
 705 * DMA alias iterator callback, return the last seen device.  Stop and return
 706 * the IOMMU group if we find one along the way.
 707 */
 708static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque)
 709{
 710        struct group_for_pci_data *data = opaque;
 711
 712        data->pdev = pdev;
 713        data->group = iommu_group_get(&pdev->dev);
 714
 715        return data->group != NULL;
 716}
 717
 718/*
 719 * Generic device_group call-back function. It just allocates one
 720 * iommu-group per device.
 721 */
 722struct iommu_group *generic_device_group(struct device *dev)
 723{
 724        struct iommu_group *group;
 725
 726        group = iommu_group_alloc();
 727        if (IS_ERR(group))
 728                return NULL;
 729
 730        return group;
 731}
 732
 733/*
 734 * Use standard PCI bus topology, isolation features, and DMA alias quirks
 735 * to find or create an IOMMU group for a device.
 736 */
 737struct iommu_group *pci_device_group(struct device *dev)
 738{
 739        struct pci_dev *pdev = to_pci_dev(dev);
 740        struct group_for_pci_data data;
 741        struct pci_bus *bus;
 742        struct iommu_group *group = NULL;
 743        u64 devfns[4] = { 0 };
 744
 745        if (WARN_ON(!dev_is_pci(dev)))
 746                return ERR_PTR(-EINVAL);
 747
 748        /*
 749         * Find the upstream DMA alias for the device.  A device must not
 750         * be aliased due to topology in order to have its own IOMMU group.
 751         * If we find an alias along the way that already belongs to a
 752         * group, use it.
 753         */
 754        if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data))
 755                return data.group;
 756
 757        pdev = data.pdev;
 758
 759        /*
 760         * Continue upstream from the point of minimum IOMMU granularity
 761         * due to aliases to the point where devices are protected from
 762         * peer-to-peer DMA by PCI ACS.  Again, if we find an existing
 763         * group, use it.
 764         */
 765        for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
 766                if (!bus->self)
 767                        continue;
 768
 769                if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
 770                        break;
 771
 772                pdev = bus->self;
 773
 774                group = iommu_group_get(&pdev->dev);
 775                if (group)
 776                        return group;
 777        }
 778
 779        /*
 780         * Look for existing groups on device aliases.  If we alias another
 781         * device or another device aliases us, use the same group.
 782         */
 783        group = get_pci_alias_group(pdev, (unsigned long *)devfns);
 784        if (group)
 785                return group;
 786
 787        /*
 788         * Look for existing groups on non-isolated functions on the same
 789         * slot and aliases of those funcions, if any.  No need to clear
 790         * the search bitmap, the tested devfns are still valid.
 791         */
 792        group = get_pci_function_alias_group(pdev, (unsigned long *)devfns);
 793        if (group)
 794                return group;
 795
 796        /* No shared group found, allocate new */
 797        group = iommu_group_alloc();
 798        if (IS_ERR(group))
 799                return NULL;
 800
 801        return group;
 802}
 803
 804/**
 805 * iommu_group_get_for_dev - Find or create the IOMMU group for a device
 806 * @dev: target device
 807 *
 808 * This function is intended to be called by IOMMU drivers and extended to
 809 * support common, bus-defined algorithms when determining or creating the
 810 * IOMMU group for a device.  On success, the caller will hold a reference
 811 * to the returned IOMMU group, which will already include the provided
 812 * device.  The reference should be released with iommu_group_put().
 813 */
 814struct iommu_group *iommu_group_get_for_dev(struct device *dev)
 815{
 816        const struct iommu_ops *ops = dev->bus->iommu_ops;
 817        struct iommu_group *group;
 818        int ret;
 819
 820        group = iommu_group_get(dev);
 821        if (group)
 822                return group;
 823
 824        group = ERR_PTR(-EINVAL);
 825
 826        if (ops && ops->device_group)
 827                group = ops->device_group(dev);
 828
 829        if (IS_ERR(group))
 830                return group;
 831
 832        /*
 833         * Try to allocate a default domain - needs support from the
 834         * IOMMU driver.
 835         */
 836        if (!group->default_domain) {
 837                group->default_domain = __iommu_domain_alloc(dev->bus,
 838                                                             IOMMU_DOMAIN_DMA);
 839                if (!group->domain)
 840                        group->domain = group->default_domain;
 841        }
 842
 843        ret = iommu_group_add_device(group, dev);
 844        if (ret) {
 845                iommu_group_put(group);
 846                return ERR_PTR(ret);
 847        }
 848
 849        return group;
 850}
 851
 852struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
 853{
 854        return group->default_domain;
 855}
 856
 857static int add_iommu_group(struct device *dev, void *data)
 858{
 859        struct iommu_callback_data *cb = data;
 860        const struct iommu_ops *ops = cb->ops;
 861        int ret;
 862
 863        if (!ops->add_device)
 864                return 0;
 865
 866        WARN_ON(dev->iommu_group);
 867
 868        ret = ops->add_device(dev);
 869
 870        /*
 871         * We ignore -ENODEV errors for now, as they just mean that the
 872         * device is not translated by an IOMMU. We still care about
 873         * other errors and fail to initialize when they happen.
 874         */
 875        if (ret == -ENODEV)
 876                ret = 0;
 877
 878        return ret;
 879}
 880
 881static int remove_iommu_group(struct device *dev, void *data)
 882{
 883        struct iommu_callback_data *cb = data;
 884        const struct iommu_ops *ops = cb->ops;
 885
 886        if (ops->remove_device && dev->iommu_group)
 887                ops->remove_device(dev);
 888
 889        return 0;
 890}
 891
 892static int iommu_bus_notifier(struct notifier_block *nb,
 893                              unsigned long action, void *data)
 894{
 895        struct device *dev = data;
 896        const struct iommu_ops *ops = dev->bus->iommu_ops;
 897        struct iommu_group *group;
 898        unsigned long group_action = 0;
 899
 900        /*
 901         * ADD/DEL call into iommu driver ops if provided, which may
 902         * result in ADD/DEL notifiers to group->notifier
 903         */
 904        if (action == BUS_NOTIFY_ADD_DEVICE) {
 905                if (ops->add_device)
 906                        return ops->add_device(dev);
 907        } else if (action == BUS_NOTIFY_REMOVED_DEVICE) {
 908                if (ops->remove_device && dev->iommu_group) {
 909                        ops->remove_device(dev);
 910                        return 0;
 911                }
 912        }
 913
 914        /*
 915         * Remaining BUS_NOTIFYs get filtered and republished to the
 916         * group, if anyone is listening
 917         */
 918        group = iommu_group_get(dev);
 919        if (!group)
 920                return 0;
 921
 922        switch (action) {
 923        case BUS_NOTIFY_BIND_DRIVER:
 924                group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
 925                break;
 926        case BUS_NOTIFY_BOUND_DRIVER:
 927                group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
 928                break;
 929        case BUS_NOTIFY_UNBIND_DRIVER:
 930                group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
 931                break;
 932        case BUS_NOTIFY_UNBOUND_DRIVER:
 933                group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
 934                break;
 935        }
 936
 937        if (group_action)
 938                blocking_notifier_call_chain(&group->notifier,
 939                                             group_action, dev);
 940
 941        iommu_group_put(group);
 942        return 0;
 943}
 944
 945static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
 946{
 947        int err;
 948        struct notifier_block *nb;
 949        struct iommu_callback_data cb = {
 950                .ops = ops,
 951        };
 952
 953        nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
 954        if (!nb)
 955                return -ENOMEM;
 956
 957        nb->notifier_call = iommu_bus_notifier;
 958
 959        err = bus_register_notifier(bus, nb);
 960        if (err)
 961                goto out_free;
 962
 963        err = bus_for_each_dev(bus, NULL, &cb, add_iommu_group);
 964        if (err)
 965                goto out_err;
 966
 967
 968        return 0;
 969
 970out_err:
 971        /* Clean up */
 972        bus_for_each_dev(bus, NULL, &cb, remove_iommu_group);
 973        bus_unregister_notifier(bus, nb);
 974
 975out_free:
 976        kfree(nb);
 977
 978        return err;
 979}
 980
 981/**
 982 * bus_set_iommu - set iommu-callbacks for the bus
 983 * @bus: bus.
 984 * @ops: the callbacks provided by the iommu-driver
 985 *
 986 * This function is called by an iommu driver to set the iommu methods
 987 * used for a particular bus. Drivers for devices on that bus can use
 988 * the iommu-api after these ops are registered.
 989 * This special function is needed because IOMMUs are usually devices on
 990 * the bus itself, so the iommu drivers are not initialized when the bus
 991 * is set up. With this function the iommu-driver can set the iommu-ops
 992 * afterwards.
 993 */
 994int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
 995{
 996        int err;
 997
 998        if (bus->iommu_ops != NULL)
 999                return -EBUSY;
1000
1001        bus->iommu_ops = ops;
1002
1003        /* Do IOMMU specific setup for this bus-type */
1004        err = iommu_bus_init(bus, ops);
1005        if (err)
1006                bus->iommu_ops = NULL;
1007
1008        return err;
1009}
1010EXPORT_SYMBOL_GPL(bus_set_iommu);
1011
1012bool iommu_present(struct bus_type *bus)
1013{
1014        return bus->iommu_ops != NULL;
1015}
1016EXPORT_SYMBOL_GPL(iommu_present);
1017
1018bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
1019{
1020        if (!bus->iommu_ops || !bus->iommu_ops->capable)
1021                return false;
1022
1023        return bus->iommu_ops->capable(cap);
1024}
1025EXPORT_SYMBOL_GPL(iommu_capable);
1026
1027/**
1028 * iommu_set_fault_handler() - set a fault handler for an iommu domain
1029 * @domain: iommu domain
1030 * @handler: fault handler
1031 * @token: user data, will be passed back to the fault handler
1032 *
1033 * This function should be used by IOMMU users which want to be notified
1034 * whenever an IOMMU fault happens.
1035 *
1036 * The fault handler itself should return 0 on success, and an appropriate
1037 * error code otherwise.
1038 */
1039void iommu_set_fault_handler(struct iommu_domain *domain,
1040                                        iommu_fault_handler_t handler,
1041                                        void *token)
1042{
1043        BUG_ON(!domain);
1044
1045        domain->handler = handler;
1046        domain->handler_token = token;
1047}
1048EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
1049
1050static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
1051                                                 unsigned type)
1052{
1053        struct iommu_domain *domain;
1054
1055        if (bus == NULL || bus->iommu_ops == NULL)
1056                return NULL;
1057
1058        domain = bus->iommu_ops->domain_alloc(type);
1059        if (!domain)
1060                return NULL;
1061
1062        domain->ops  = bus->iommu_ops;
1063        domain->type = type;
1064        /* Assume all sizes by default; the driver may override this later */
1065        domain->pgsize_bitmap  = bus->iommu_ops->pgsize_bitmap;
1066
1067        return domain;
1068}
1069
1070struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
1071{
1072        return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED);
1073}
1074EXPORT_SYMBOL_GPL(iommu_domain_alloc);
1075
1076void iommu_domain_free(struct iommu_domain *domain)
1077{
1078        domain->ops->domain_free(domain);
1079}
1080EXPORT_SYMBOL_GPL(iommu_domain_free);
1081
1082static int __iommu_attach_device(struct iommu_domain *domain,
1083                                 struct device *dev)
1084{
1085        int ret;
1086        if (unlikely(domain->ops->attach_dev == NULL))
1087                return -ENODEV;
1088
1089        ret = domain->ops->attach_dev(domain, dev);
1090        if (!ret)
1091                trace_attach_device_to_domain(dev);
1092        return ret;
1093}
1094
1095int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
1096{
1097        struct iommu_group *group;
1098        int ret;
1099
1100        group = iommu_group_get(dev);
1101        /* FIXME: Remove this when groups a mandatory for iommu drivers */
1102        if (group == NULL)
1103                return __iommu_attach_device(domain, dev);
1104
1105        /*
1106         * We have a group - lock it to make sure the device-count doesn't
1107         * change while we are attaching
1108         */
1109        mutex_lock(&group->mutex);
1110        ret = -EINVAL;
1111        if (iommu_group_device_count(group) != 1)
1112                goto out_unlock;
1113
1114        ret = __iommu_attach_group(domain, group);
1115
1116out_unlock:
1117        mutex_unlock(&group->mutex);
1118        iommu_group_put(group);
1119
1120        return ret;
1121}
1122EXPORT_SYMBOL_GPL(iommu_attach_device);
1123
1124static void __iommu_detach_device(struct iommu_domain *domain,
1125                                  struct device *dev)
1126{
1127        if (unlikely(domain->ops->detach_dev == NULL))
1128                return;
1129
1130        domain->ops->detach_dev(domain, dev);
1131        trace_detach_device_from_domain(dev);
1132}
1133
1134void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
1135{
1136        struct iommu_group *group;
1137
1138        group = iommu_group_get(dev);
1139        /* FIXME: Remove this when groups a mandatory for iommu drivers */
1140        if (group == NULL)
1141                return __iommu_detach_device(domain, dev);
1142
1143        mutex_lock(&group->mutex);
1144        if (iommu_group_device_count(group) != 1) {
1145                WARN_ON(1);
1146                goto out_unlock;
1147        }
1148
1149        __iommu_detach_group(domain, group);
1150
1151out_unlock:
1152        mutex_unlock(&group->mutex);
1153        iommu_group_put(group);
1154}
1155EXPORT_SYMBOL_GPL(iommu_detach_device);
1156
1157struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
1158{
1159        struct iommu_domain *domain;
1160        struct iommu_group *group;
1161
1162        group = iommu_group_get(dev);
1163        /* FIXME: Remove this when groups a mandatory for iommu drivers */
1164        if (group == NULL)
1165                return NULL;
1166
1167        domain = group->domain;
1168
1169        iommu_group_put(group);
1170
1171        return domain;
1172}
1173EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
1174
1175/*
1176 * IOMMU groups are really the natrual working unit of the IOMMU, but
1177 * the IOMMU API works on domains and devices.  Bridge that gap by
1178 * iterating over the devices in a group.  Ideally we'd have a single
1179 * device which represents the requestor ID of the group, but we also
1180 * allow IOMMU drivers to create policy defined minimum sets, where
1181 * the physical hardware may be able to distiguish members, but we
1182 * wish to group them at a higher level (ex. untrusted multi-function
1183 * PCI devices).  Thus we attach each device.
1184 */
1185static int iommu_group_do_attach_device(struct device *dev, void *data)
1186{
1187        struct iommu_domain *domain = data;
1188
1189        return __iommu_attach_device(domain, dev);
1190}
1191
1192static int __iommu_attach_group(struct iommu_domain *domain,
1193                                struct iommu_group *group)
1194{
1195        int ret;
1196
1197        if (group->default_domain && group->domain != group->default_domain)
1198                return -EBUSY;
1199
1200        ret = __iommu_group_for_each_dev(group, domain,
1201                                         iommu_group_do_attach_device);
1202        if (ret == 0)
1203                group->domain = domain;
1204
1205        return ret;
1206}
1207
1208int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
1209{
1210        int ret;
1211
1212        mutex_lock(&group->mutex);
1213        ret = __iommu_attach_group(domain, group);
1214        mutex_unlock(&group->mutex);
1215
1216        return ret;
1217}
1218EXPORT_SYMBOL_GPL(iommu_attach_group);
1219
1220static int iommu_group_do_detach_device(struct device *dev, void *data)
1221{
1222        struct iommu_domain *domain = data;
1223
1224        __iommu_detach_device(domain, dev);
1225
1226        return 0;
1227}
1228
1229static void __iommu_detach_group(struct iommu_domain *domain,
1230                                 struct iommu_group *group)
1231{
1232        int ret;
1233
1234        if (!group->default_domain) {
1235                __iommu_group_for_each_dev(group, domain,
1236                                           iommu_group_do_detach_device);
1237                group->domain = NULL;
1238                return;
1239        }
1240
1241        if (group->domain == group->default_domain)
1242                return;
1243
1244        /* Detach by re-attaching to the default domain */
1245        ret = __iommu_group_for_each_dev(group, group->default_domain,
1246                                         iommu_group_do_attach_device);
1247        if (ret != 0)
1248                WARN_ON(1);
1249        else
1250                group->domain = group->default_domain;
1251}
1252
1253void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
1254{
1255        mutex_lock(&group->mutex);
1256        __iommu_detach_group(domain, group);
1257        mutex_unlock(&group->mutex);
1258}
1259EXPORT_SYMBOL_GPL(iommu_detach_group);
1260
1261phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1262{
1263        if (unlikely(domain->ops->iova_to_phys == NULL))
1264                return 0;
1265
1266        return domain->ops->iova_to_phys(domain, iova);
1267}
1268EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
1269
1270static size_t iommu_pgsize(struct iommu_domain *domain,
1271                           unsigned long addr_merge, size_t size)
1272{
1273        unsigned int pgsize_idx;
1274        size_t pgsize;
1275
1276        /* Max page size that still fits into 'size' */
1277        pgsize_idx = __fls(size);
1278
1279        /* need to consider alignment requirements ? */
1280        if (likely(addr_merge)) {
1281                /* Max page size allowed by address */
1282                unsigned int align_pgsize_idx = __ffs(addr_merge);
1283                pgsize_idx = min(pgsize_idx, align_pgsize_idx);
1284        }
1285
1286        /* build a mask of acceptable page sizes */
1287        pgsize = (1UL << (pgsize_idx + 1)) - 1;
1288
1289        /* throw away page sizes not supported by the hardware */
1290        pgsize &= domain->pgsize_bitmap;
1291
1292        /* make sure we're still sane */
1293        BUG_ON(!pgsize);
1294
1295        /* pick the biggest page */
1296        pgsize_idx = __fls(pgsize);
1297        pgsize = 1UL << pgsize_idx;
1298
1299        return pgsize;
1300}
1301
1302int iommu_map(struct iommu_domain *domain, unsigned long iova,
1303              phys_addr_t paddr, size_t size, int prot)
1304{
1305        unsigned long orig_iova = iova;
1306        unsigned int min_pagesz;
1307        size_t orig_size = size;
1308        phys_addr_t orig_paddr = paddr;
1309        int ret = 0;
1310
1311        if (unlikely(domain->ops->map == NULL ||
1312                     domain->pgsize_bitmap == 0UL))
1313                return -ENODEV;
1314
1315        if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
1316                return -EINVAL;
1317
1318        /* find out the minimum page size supported */
1319        min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
1320
1321        /*
1322         * both the virtual address and the physical one, as well as
1323         * the size of the mapping, must be aligned (at least) to the
1324         * size of the smallest page supported by the hardware
1325         */
1326        if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
1327                pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
1328                       iova, &paddr, size, min_pagesz);
1329                return -EINVAL;
1330        }
1331
1332        pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
1333
1334        while (size) {
1335                size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
1336
1337                pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
1338                         iova, &paddr, pgsize);
1339
1340                ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
1341                if (ret)
1342                        break;
1343
1344                iova += pgsize;
1345                paddr += pgsize;
1346                size -= pgsize;
1347        }
1348
1349        /* unroll mapping in case something went wrong */
1350        if (ret)
1351                iommu_unmap(domain, orig_iova, orig_size - size);
1352        else
1353                trace_map(orig_iova, orig_paddr, orig_size);
1354
1355        return ret;
1356}
1357EXPORT_SYMBOL_GPL(iommu_map);
1358
1359size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
1360{
1361        size_t unmapped_page, unmapped = 0;
1362        unsigned int min_pagesz;
1363        unsigned long orig_iova = iova;
1364
1365        if (unlikely(domain->ops->unmap == NULL ||
1366                     domain->pgsize_bitmap == 0UL))
1367                return -ENODEV;
1368
1369        if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
1370                return -EINVAL;
1371
1372        /* find out the minimum page size supported */
1373        min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
1374
1375        /*
1376         * The virtual address, as well as the size of the mapping, must be
1377         * aligned (at least) to the size of the smallest page supported
1378         * by the hardware
1379         */
1380        if (!IS_ALIGNED(iova | size, min_pagesz)) {
1381                pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
1382                       iova, size, min_pagesz);
1383                return -EINVAL;
1384        }
1385
1386        pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
1387
1388        /*
1389         * Keep iterating until we either unmap 'size' bytes (or more)
1390         * or we hit an area that isn't mapped.
1391         */
1392        while (unmapped < size) {
1393                size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
1394
1395                unmapped_page = domain->ops->unmap(domain, iova, pgsize);
1396                if (!unmapped_page)
1397                        break;
1398
1399                pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
1400                         iova, unmapped_page);
1401
1402                iova += unmapped_page;
1403                unmapped += unmapped_page;
1404        }
1405
1406        trace_unmap(orig_iova, size, unmapped);
1407        return unmapped;
1408}
1409EXPORT_SYMBOL_GPL(iommu_unmap);
1410
1411size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
1412                         struct scatterlist *sg, unsigned int nents, int prot)
1413{
1414        struct scatterlist *s;
1415        size_t mapped = 0;
1416        unsigned int i, min_pagesz;
1417        int ret;
1418
1419        if (unlikely(domain->pgsize_bitmap == 0UL))
1420                return 0;
1421
1422        min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
1423
1424        for_each_sg(sg, s, nents, i) {
1425                phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
1426
1427                /*
1428                 * We are mapping on IOMMU page boundaries, so offset within
1429                 * the page must be 0. However, the IOMMU may support pages
1430                 * smaller than PAGE_SIZE, so s->offset may still represent
1431                 * an offset of that boundary within the CPU page.
1432                 */
1433                if (!IS_ALIGNED(s->offset, min_pagesz))
1434                        goto out_err;
1435
1436                ret = iommu_map(domain, iova + mapped, phys, s->length, prot);
1437                if (ret)
1438                        goto out_err;
1439
1440                mapped += s->length;
1441        }
1442
1443        return mapped;
1444
1445out_err:
1446        /* undo mappings already done */
1447        iommu_unmap(domain, iova, mapped);
1448
1449        return 0;
1450
1451}
1452EXPORT_SYMBOL_GPL(default_iommu_map_sg);
1453
1454int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
1455                               phys_addr_t paddr, u64 size, int prot)
1456{
1457        if (unlikely(domain->ops->domain_window_enable == NULL))
1458                return -ENODEV;
1459
1460        return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size,
1461                                                 prot);
1462}
1463EXPORT_SYMBOL_GPL(iommu_domain_window_enable);
1464
1465void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
1466{
1467        if (unlikely(domain->ops->domain_window_disable == NULL))
1468                return;
1469
1470        return domain->ops->domain_window_disable(domain, wnd_nr);
1471}
1472EXPORT_SYMBOL_GPL(iommu_domain_window_disable);
1473
1474static int __init iommu_init(void)
1475{
1476        iommu_group_kset = kset_create_and_add("iommu_groups",
1477                                               NULL, kernel_kobj);
1478        BUG_ON(!iommu_group_kset);
1479
1480        return 0;
1481}
1482core_initcall(iommu_init);
1483
1484int iommu_domain_get_attr(struct iommu_domain *domain,
1485                          enum iommu_attr attr, void *data)
1486{
1487        struct iommu_domain_geometry *geometry;
1488        bool *paging;
1489        int ret = 0;
1490        u32 *count;
1491
1492        switch (attr) {
1493        case DOMAIN_ATTR_GEOMETRY:
1494                geometry  = data;
1495                *geometry = domain->geometry;
1496
1497                break;
1498        case DOMAIN_ATTR_PAGING:
1499                paging  = data;
1500                *paging = (domain->pgsize_bitmap != 0UL);
1501                break;
1502        case DOMAIN_ATTR_WINDOWS:
1503                count = data;
1504
1505                if (domain->ops->domain_get_windows != NULL)
1506                        *count = domain->ops->domain_get_windows(domain);
1507                else
1508                        ret = -ENODEV;
1509
1510                break;
1511        default:
1512                if (!domain->ops->domain_get_attr)
1513                        return -EINVAL;
1514
1515                ret = domain->ops->domain_get_attr(domain, attr, data);
1516        }
1517
1518        return ret;
1519}
1520EXPORT_SYMBOL_GPL(iommu_domain_get_attr);
1521
1522int iommu_domain_set_attr(struct iommu_domain *domain,
1523                          enum iommu_attr attr, void *data)
1524{
1525        int ret = 0;
1526        u32 *count;
1527
1528        switch (attr) {
1529        case DOMAIN_ATTR_WINDOWS:
1530                count = data;
1531
1532                if (domain->ops->domain_set_windows != NULL)
1533                        ret = domain->ops->domain_set_windows(domain, *count);
1534                else
1535                        ret = -ENODEV;
1536
1537                break;
1538        default:
1539                if (domain->ops->domain_set_attr == NULL)
1540                        return -EINVAL;
1541
1542                ret = domain->ops->domain_set_attr(domain, attr, data);
1543        }
1544
1545        return ret;
1546}
1547EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
1548
1549void iommu_get_dm_regions(struct device *dev, struct list_head *list)
1550{
1551        const struct iommu_ops *ops = dev->bus->iommu_ops;
1552
1553        if (ops && ops->get_dm_regions)
1554                ops->get_dm_regions(dev, list);
1555}
1556
1557void iommu_put_dm_regions(struct device *dev, struct list_head *list)
1558{
1559        const struct iommu_ops *ops = dev->bus->iommu_ops;
1560
1561        if (ops && ops->put_dm_regions)
1562                ops->put_dm_regions(dev, list);
1563}
1564
1565/* Request that a device is direct mapped by the IOMMU */
1566int iommu_request_dm_for_dev(struct device *dev)
1567{
1568        struct iommu_domain *dm_domain;
1569        struct iommu_group *group;
1570        int ret;
1571
1572        /* Device must already be in a group before calling this function */
1573        group = iommu_group_get_for_dev(dev);
1574        if (IS_ERR(group))
1575                return PTR_ERR(group);
1576
1577        mutex_lock(&group->mutex);
1578
1579        /* Check if the default domain is already direct mapped */
1580        ret = 0;
1581        if (group->default_domain &&
1582            group->default_domain->type == IOMMU_DOMAIN_IDENTITY)
1583                goto out;
1584
1585        /* Don't change mappings of existing devices */
1586        ret = -EBUSY;
1587        if (iommu_group_device_count(group) != 1)
1588                goto out;
1589
1590        /* Allocate a direct mapped domain */
1591        ret = -ENOMEM;
1592        dm_domain = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_IDENTITY);
1593        if (!dm_domain)
1594                goto out;
1595
1596        /* Attach the device to the domain */
1597        ret = __iommu_attach_group(dm_domain, group);
1598        if (ret) {
1599                iommu_domain_free(dm_domain);
1600                goto out;
1601        }
1602
1603        /* Make the direct mapped domain the default for this group */
1604        if (group->default_domain)
1605                iommu_domain_free(group->default_domain);
1606        group->default_domain = dm_domain;
1607
1608        pr_info("Using direct mapping for device %s\n", dev_name(dev));
1609
1610        ret = 0;
1611out:
1612        mutex_unlock(&group->mutex);
1613        iommu_group_put(group);
1614
1615        return ret;
1616}
1617
1618int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
1619                      const struct iommu_ops *ops)
1620{
1621        struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1622
1623        if (fwspec)
1624                return ops == fwspec->ops ? 0 : -EINVAL;
1625
1626        fwspec = kzalloc(sizeof(*fwspec), GFP_KERNEL);
1627        if (!fwspec)
1628                return -ENOMEM;
1629
1630        of_node_get(to_of_node(iommu_fwnode));
1631        fwspec->iommu_fwnode = iommu_fwnode;
1632        fwspec->ops = ops;
1633        dev->iommu_fwspec = fwspec;
1634        return 0;
1635}
1636EXPORT_SYMBOL_GPL(iommu_fwspec_init);
1637
1638void iommu_fwspec_free(struct device *dev)
1639{
1640        struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1641
1642        if (fwspec) {
1643                fwnode_handle_put(fwspec->iommu_fwnode);
1644                kfree(fwspec);
1645                dev->iommu_fwspec = NULL;
1646        }
1647}
1648EXPORT_SYMBOL_GPL(iommu_fwspec_free);
1649
1650int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
1651{
1652        struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1653        size_t size;
1654        int i;
1655
1656        if (!fwspec)
1657                return -EINVAL;
1658
1659        size = offsetof(struct iommu_fwspec, ids[fwspec->num_ids + num_ids]);
1660        if (size > sizeof(*fwspec)) {
1661                fwspec = krealloc(dev->iommu_fwspec, size, GFP_KERNEL);
1662                if (!fwspec)
1663                        return -ENOMEM;
1664        }
1665
1666        for (i = 0; i < num_ids; i++)
1667                fwspec->ids[fwspec->num_ids + i] = ids[i];
1668
1669        fwspec->num_ids += num_ids;
1670        dev->iommu_fwspec = fwspec;
1671        return 0;
1672}
1673EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
1674