linux/drivers/iommu/iommu.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
   3 * Author: Joerg Roedel <jroedel@suse.de>
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms of the GNU General Public License version 2 as published
   7 * by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write to the Free Software
  16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  17 */
  18
  19#define pr_fmt(fmt)    "iommu: " fmt
  20
  21#include <linux/device.h>
  22#include <linux/kernel.h>
  23#include <linux/bug.h>
  24#include <linux/types.h>
  25#include <linux/module.h>
  26#include <linux/slab.h>
  27#include <linux/errno.h>
  28#include <linux/iommu.h>
  29#include <linux/idr.h>
  30#include <linux/notifier.h>
  31#include <linux/err.h>
  32#include <linux/pci.h>
  33#include <linux/bitops.h>
  34#include <linux/property.h>
  35#include <trace/events/iommu.h>
  36
  37static struct kset *iommu_group_kset;
  38static DEFINE_IDA(iommu_group_ida);
  39
  40struct iommu_callback_data {
  41        const struct iommu_ops *ops;
  42};
  43
  44struct iommu_group {
  45        struct kobject kobj;
  46        struct kobject *devices_kobj;
  47        struct list_head devices;
  48        struct mutex mutex;
  49        struct blocking_notifier_head notifier;
  50        void *iommu_data;
  51        void (*iommu_data_release)(void *iommu_data);
  52        char *name;
  53        int id;
  54        struct iommu_domain *default_domain;
  55        struct iommu_domain *domain;
  56};
  57
  58struct iommu_device {
  59        struct list_head list;
  60        struct device *dev;
  61        char *name;
  62};
  63
  64struct iommu_group_attribute {
  65        struct attribute attr;
  66        ssize_t (*show)(struct iommu_group *group, char *buf);
  67        ssize_t (*store)(struct iommu_group *group,
  68                         const char *buf, size_t count);
  69};
  70
  71#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store)           \
  72struct iommu_group_attribute iommu_group_attr_##_name =         \
  73        __ATTR(_name, _mode, _show, _store)
  74
  75#define to_iommu_group_attr(_attr)      \
  76        container_of(_attr, struct iommu_group_attribute, attr)
  77#define to_iommu_group(_kobj)           \
  78        container_of(_kobj, struct iommu_group, kobj)
  79
  80static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
  81                                                 unsigned type);
  82static int __iommu_attach_device(struct iommu_domain *domain,
  83                                 struct device *dev);
  84static int __iommu_attach_group(struct iommu_domain *domain,
  85                                struct iommu_group *group);
  86static void __iommu_detach_group(struct iommu_domain *domain,
  87                                 struct iommu_group *group);
  88
  89static ssize_t iommu_group_attr_show(struct kobject *kobj,
  90                                     struct attribute *__attr, char *buf)
  91{
  92        struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
  93        struct iommu_group *group = to_iommu_group(kobj);
  94        ssize_t ret = -EIO;
  95
  96        if (attr->show)
  97                ret = attr->show(group, buf);
  98        return ret;
  99}
 100
 101static ssize_t iommu_group_attr_store(struct kobject *kobj,
 102                                      struct attribute *__attr,
 103                                      const char *buf, size_t count)
 104{
 105        struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
 106        struct iommu_group *group = to_iommu_group(kobj);
 107        ssize_t ret = -EIO;
 108
 109        if (attr->store)
 110                ret = attr->store(group, buf, count);
 111        return ret;
 112}
 113
 114static const struct sysfs_ops iommu_group_sysfs_ops = {
 115        .show = iommu_group_attr_show,
 116        .store = iommu_group_attr_store,
 117};
 118
 119static int iommu_group_create_file(struct iommu_group *group,
 120                                   struct iommu_group_attribute *attr)
 121{
 122        return sysfs_create_file(&group->kobj, &attr->attr);
 123}
 124
 125static void iommu_group_remove_file(struct iommu_group *group,
 126                                    struct iommu_group_attribute *attr)
 127{
 128        sysfs_remove_file(&group->kobj, &attr->attr);
 129}
 130
 131static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
 132{
 133        return sprintf(buf, "%s\n", group->name);
 134}
 135
 136static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
 137
 138static void iommu_group_release(struct kobject *kobj)
 139{
 140        struct iommu_group *group = to_iommu_group(kobj);
 141
 142        pr_debug("Releasing group %d\n", group->id);
 143
 144        if (group->iommu_data_release)
 145                group->iommu_data_release(group->iommu_data);
 146
 147        ida_simple_remove(&iommu_group_ida, group->id);
 148
 149        if (group->default_domain)
 150                iommu_domain_free(group->default_domain);
 151
 152        kfree(group->name);
 153        kfree(group);
 154}
 155
 156static struct kobj_type iommu_group_ktype = {
 157        .sysfs_ops = &iommu_group_sysfs_ops,
 158        .release = iommu_group_release,
 159};
 160
 161/**
 162 * iommu_group_alloc - Allocate a new group
 163 * @name: Optional name to associate with group, visible in sysfs
 164 *
 165 * This function is called by an iommu driver to allocate a new iommu
 166 * group.  The iommu group represents the minimum granularity of the iommu.
 167 * Upon successful return, the caller holds a reference to the supplied
 168 * group in order to hold the group until devices are added.  Use
 169 * iommu_group_put() to release this extra reference count, allowing the
 170 * group to be automatically reclaimed once it has no devices or external
 171 * references.
 172 */
 173struct iommu_group *iommu_group_alloc(void)
 174{
 175        struct iommu_group *group;
 176        int ret;
 177
 178        group = kzalloc(sizeof(*group), GFP_KERNEL);
 179        if (!group)
 180                return ERR_PTR(-ENOMEM);
 181
 182        group->kobj.kset = iommu_group_kset;
 183        mutex_init(&group->mutex);
 184        INIT_LIST_HEAD(&group->devices);
 185        BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
 186
 187        ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL);
 188        if (ret < 0) {
 189                kfree(group);
 190                return ERR_PTR(ret);
 191        }
 192        group->id = ret;
 193
 194        ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
 195                                   NULL, "%d", group->id);
 196        if (ret) {
 197                ida_simple_remove(&iommu_group_ida, group->id);
 198                kfree(group);
 199                return ERR_PTR(ret);
 200        }
 201
 202        group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
 203        if (!group->devices_kobj) {
 204                kobject_put(&group->kobj); /* triggers .release & free */
 205                return ERR_PTR(-ENOMEM);
 206        }
 207
 208        /*
 209         * The devices_kobj holds a reference on the group kobject, so
 210         * as long as that exists so will the group.  We can therefore
 211         * use the devices_kobj for reference counting.
 212         */
 213        kobject_put(&group->kobj);
 214
 215        pr_debug("Allocated group %d\n", group->id);
 216
 217        return group;
 218}
 219EXPORT_SYMBOL_GPL(iommu_group_alloc);
 220
 221struct iommu_group *iommu_group_get_by_id(int id)
 222{
 223        struct kobject *group_kobj;
 224        struct iommu_group *group;
 225        const char *name;
 226
 227        if (!iommu_group_kset)
 228                return NULL;
 229
 230        name = kasprintf(GFP_KERNEL, "%d", id);
 231        if (!name)
 232                return NULL;
 233
 234        group_kobj = kset_find_obj(iommu_group_kset, name);
 235        kfree(name);
 236
 237        if (!group_kobj)
 238                return NULL;
 239
 240        group = container_of(group_kobj, struct iommu_group, kobj);
 241        BUG_ON(group->id != id);
 242
 243        kobject_get(group->devices_kobj);
 244        kobject_put(&group->kobj);
 245
 246        return group;
 247}
 248EXPORT_SYMBOL_GPL(iommu_group_get_by_id);
 249
 250/**
 251 * iommu_group_get_iommudata - retrieve iommu_data registered for a group
 252 * @group: the group
 253 *
 254 * iommu drivers can store data in the group for use when doing iommu
 255 * operations.  This function provides a way to retrieve it.  Caller
 256 * should hold a group reference.
 257 */
 258void *iommu_group_get_iommudata(struct iommu_group *group)
 259{
 260        return group->iommu_data;
 261}
 262EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
 263
 264/**
 265 * iommu_group_set_iommudata - set iommu_data for a group
 266 * @group: the group
 267 * @iommu_data: new data
 268 * @release: release function for iommu_data
 269 *
 270 * iommu drivers can store data in the group for use when doing iommu
 271 * operations.  This function provides a way to set the data after
 272 * the group has been allocated.  Caller should hold a group reference.
 273 */
 274void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
 275                               void (*release)(void *iommu_data))
 276{
 277        group->iommu_data = iommu_data;
 278        group->iommu_data_release = release;
 279}
 280EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
 281
 282/**
 283 * iommu_group_set_name - set name for a group
 284 * @group: the group
 285 * @name: name
 286 *
 287 * Allow iommu driver to set a name for a group.  When set it will
 288 * appear in a name attribute file under the group in sysfs.
 289 */
 290int iommu_group_set_name(struct iommu_group *group, const char *name)
 291{
 292        int ret;
 293
 294        if (group->name) {
 295                iommu_group_remove_file(group, &iommu_group_attr_name);
 296                kfree(group->name);
 297                group->name = NULL;
 298                if (!name)
 299                        return 0;
 300        }
 301
 302        group->name = kstrdup(name, GFP_KERNEL);
 303        if (!group->name)
 304                return -ENOMEM;
 305
 306        ret = iommu_group_create_file(group, &iommu_group_attr_name);
 307        if (ret) {
 308                kfree(group->name);
 309                group->name = NULL;
 310                return ret;
 311        }
 312
 313        return 0;
 314}
 315EXPORT_SYMBOL_GPL(iommu_group_set_name);
 316
 317static int iommu_group_create_direct_mappings(struct iommu_group *group,
 318                                              struct device *dev)
 319{
 320        struct iommu_domain *domain = group->default_domain;
 321        struct iommu_dm_region *entry;
 322        struct list_head mappings;
 323        unsigned long pg_size;
 324        int ret = 0;
 325
 326        if (!domain || domain->type != IOMMU_DOMAIN_DMA)
 327                return 0;
 328
 329        BUG_ON(!domain->pgsize_bitmap);
 330
 331        pg_size = 1UL << __ffs(domain->pgsize_bitmap);
 332        INIT_LIST_HEAD(&mappings);
 333
 334        iommu_get_dm_regions(dev, &mappings);
 335
 336        /* We need to consider overlapping regions for different devices */
 337        list_for_each_entry(entry, &mappings, list) {
 338                dma_addr_t start, end, addr;
 339
 340                if (domain->ops->apply_dm_region)
 341                        domain->ops->apply_dm_region(dev, domain, entry);
 342
 343                start = ALIGN(entry->start, pg_size);
 344                end   = ALIGN(entry->start + entry->length, pg_size);
 345
 346                for (addr = start; addr < end; addr += pg_size) {
 347                        phys_addr_t phys_addr;
 348
 349                        phys_addr = iommu_iova_to_phys(domain, addr);
 350                        if (phys_addr)
 351                                continue;
 352
 353                        ret = iommu_map(domain, addr, addr, pg_size, entry->prot);
 354                        if (ret)
 355                                goto out;
 356                }
 357
 358        }
 359
 360out:
 361        iommu_put_dm_regions(dev, &mappings);
 362
 363        return ret;
 364}
 365
 366/**
 367 * iommu_group_add_device - add a device to an iommu group
 368 * @group: the group into which to add the device (reference should be held)
 369 * @dev: the device
 370 *
 371 * This function is called by an iommu driver to add a device into a
 372 * group.  Adding a device increments the group reference count.
 373 */
 374int iommu_group_add_device(struct iommu_group *group, struct device *dev)
 375{
 376        int ret, i = 0;
 377        struct iommu_device *device;
 378
 379        device = kzalloc(sizeof(*device), GFP_KERNEL);
 380        if (!device)
 381                return -ENOMEM;
 382
 383        device->dev = dev;
 384
 385        ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
 386        if (ret) {
 387                kfree(device);
 388                return ret;
 389        }
 390
 391        device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
 392rename:
 393        if (!device->name) {
 394                sysfs_remove_link(&dev->kobj, "iommu_group");
 395                kfree(device);
 396                return -ENOMEM;
 397        }
 398
 399        ret = sysfs_create_link_nowarn(group->devices_kobj,
 400                                       &dev->kobj, device->name);
 401        if (ret) {
 402                kfree(device->name);
 403                if (ret == -EEXIST && i >= 0) {
 404                        /*
 405                         * Account for the slim chance of collision
 406                         * and append an instance to the name.
 407                         */
 408                        device->name = kasprintf(GFP_KERNEL, "%s.%d",
 409                                                 kobject_name(&dev->kobj), i++);
 410                        goto rename;
 411                }
 412
 413                sysfs_remove_link(&dev->kobj, "iommu_group");
 414                kfree(device);
 415                return ret;
 416        }
 417
 418        kobject_get(group->devices_kobj);
 419
 420        dev->iommu_group = group;
 421
 422        iommu_group_create_direct_mappings(group, dev);
 423
 424        mutex_lock(&group->mutex);
 425        list_add_tail(&device->list, &group->devices);
 426        if (group->domain)
 427                __iommu_attach_device(group->domain, dev);
 428        mutex_unlock(&group->mutex);
 429
 430        /* Notify any listeners about change to group. */
 431        blocking_notifier_call_chain(&group->notifier,
 432                                     IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
 433
 434        trace_add_device_to_group(group->id, dev);
 435
 436        pr_info("Adding device %s to group %d\n", dev_name(dev), group->id);
 437
 438        return 0;
 439}
 440EXPORT_SYMBOL_GPL(iommu_group_add_device);
 441
 442/**
 443 * iommu_group_remove_device - remove a device from it's current group
 444 * @dev: device to be removed
 445 *
 446 * This function is called by an iommu driver to remove the device from
 447 * it's current group.  This decrements the iommu group reference count.
 448 */
 449void iommu_group_remove_device(struct device *dev)
 450{
 451        struct iommu_group *group = dev->iommu_group;
 452        struct iommu_device *tmp_device, *device = NULL;
 453
 454        pr_info("Removing device %s from group %d\n", dev_name(dev), group->id);
 455
 456        /* Pre-notify listeners that a device is being removed. */
 457        blocking_notifier_call_chain(&group->notifier,
 458                                     IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
 459
 460        mutex_lock(&group->mutex);
 461        list_for_each_entry(tmp_device, &group->devices, list) {
 462                if (tmp_device->dev == dev) {
 463                        device = tmp_device;
 464                        list_del(&device->list);
 465                        break;
 466                }
 467        }
 468        mutex_unlock(&group->mutex);
 469
 470        if (!device)
 471                return;
 472
 473        sysfs_remove_link(group->devices_kobj, device->name);
 474        sysfs_remove_link(&dev->kobj, "iommu_group");
 475
 476        trace_remove_device_from_group(group->id, dev);
 477
 478        kfree(device->name);
 479        kfree(device);
 480        dev->iommu_group = NULL;
 481        kobject_put(group->devices_kobj);
 482}
 483EXPORT_SYMBOL_GPL(iommu_group_remove_device);
 484
 485static int iommu_group_device_count(struct iommu_group *group)
 486{
 487        struct iommu_device *entry;
 488        int ret = 0;
 489
 490        list_for_each_entry(entry, &group->devices, list)
 491                ret++;
 492
 493        return ret;
 494}
 495
 496/**
 497 * iommu_group_for_each_dev - iterate over each device in the group
 498 * @group: the group
 499 * @data: caller opaque data to be passed to callback function
 500 * @fn: caller supplied callback function
 501 *
 502 * This function is called by group users to iterate over group devices.
 503 * Callers should hold a reference count to the group during callback.
 504 * The group->mutex is held across callbacks, which will block calls to
 505 * iommu_group_add/remove_device.
 506 */
 507static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
 508                                      int (*fn)(struct device *, void *))
 509{
 510        struct iommu_device *device;
 511        int ret = 0;
 512
 513        list_for_each_entry(device, &group->devices, list) {
 514                ret = fn(device->dev, data);
 515                if (ret)
 516                        break;
 517        }
 518        return ret;
 519}
 520
 521
 522int iommu_group_for_each_dev(struct iommu_group *group, void *data,
 523                             int (*fn)(struct device *, void *))
 524{
 525        int ret;
 526
 527        mutex_lock(&group->mutex);
 528        ret = __iommu_group_for_each_dev(group, data, fn);
 529        mutex_unlock(&group->mutex);
 530
 531        return ret;
 532}
 533EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
 534
 535/**
 536 * iommu_group_get - Return the group for a device and increment reference
 537 * @dev: get the group that this device belongs to
 538 *
 539 * This function is called by iommu drivers and users to get the group
 540 * for the specified device.  If found, the group is returned and the group
 541 * reference in incremented, else NULL.
 542 */
 543struct iommu_group *iommu_group_get(struct device *dev)
 544{
 545        struct iommu_group *group = dev->iommu_group;
 546
 547        if (group)
 548                kobject_get(group->devices_kobj);
 549
 550        return group;
 551}
 552EXPORT_SYMBOL_GPL(iommu_group_get);
 553
 554/**
 555 * iommu_group_ref_get - Increment reference on a group
 556 * @group: the group to use, must not be NULL
 557 *
 558 * This function is called by iommu drivers to take additional references on an
 559 * existing group.  Returns the given group for convenience.
 560 */
 561struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
 562{
 563        kobject_get(group->devices_kobj);
 564        return group;
 565}
 566
 567/**
 568 * iommu_group_put - Decrement group reference
 569 * @group: the group to use
 570 *
 571 * This function is called by iommu drivers and users to release the
 572 * iommu group.  Once the reference count is zero, the group is released.
 573 */
 574void iommu_group_put(struct iommu_group *group)
 575{
 576        if (group)
 577                kobject_put(group->devices_kobj);
 578}
 579EXPORT_SYMBOL_GPL(iommu_group_put);
 580
 581/**
 582 * iommu_group_register_notifier - Register a notifier for group changes
 583 * @group: the group to watch
 584 * @nb: notifier block to signal
 585 *
 586 * This function allows iommu group users to track changes in a group.
 587 * See include/linux/iommu.h for actions sent via this notifier.  Caller
 588 * should hold a reference to the group throughout notifier registration.
 589 */
 590int iommu_group_register_notifier(struct iommu_group *group,
 591                                  struct notifier_block *nb)
 592{
 593        return blocking_notifier_chain_register(&group->notifier, nb);
 594}
 595EXPORT_SYMBOL_GPL(iommu_group_register_notifier);
 596
 597/**
 598 * iommu_group_unregister_notifier - Unregister a notifier
 599 * @group: the group to watch
 600 * @nb: notifier block to signal
 601 *
 602 * Unregister a previously registered group notifier block.
 603 */
 604int iommu_group_unregister_notifier(struct iommu_group *group,
 605                                    struct notifier_block *nb)
 606{
 607        return blocking_notifier_chain_unregister(&group->notifier, nb);
 608}
 609EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
 610
 611/**
 612 * iommu_group_id - Return ID for a group
 613 * @group: the group to ID
 614 *
 615 * Return the unique ID for the group matching the sysfs group number.
 616 */
 617int iommu_group_id(struct iommu_group *group)
 618{
 619        return group->id;
 620}
 621EXPORT_SYMBOL_GPL(iommu_group_id);
 622
 623static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
 624                                               unsigned long *devfns);
 625
 626/*
 627 * To consider a PCI device isolated, we require ACS to support Source
 628 * Validation, Request Redirection, Completer Redirection, and Upstream
 629 * Forwarding.  This effectively means that devices cannot spoof their
 630 * requester ID, requests and completions cannot be redirected, and all
 631 * transactions are forwarded upstream, even as it passes through a
 632 * bridge where the target device is downstream.
 633 */
 634#define REQ_ACS_FLAGS   (PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF)
 635
 636/*
 637 * For multifunction devices which are not isolated from each other, find
 638 * all the other non-isolated functions and look for existing groups.  For
 639 * each function, we also need to look for aliases to or from other devices
 640 * that may already have a group.
 641 */
 642static struct iommu_group *get_pci_function_alias_group(struct pci_dev *pdev,
 643                                                        unsigned long *devfns)
 644{
 645        struct pci_dev *tmp = NULL;
 646        struct iommu_group *group;
 647
 648        if (!pdev->multifunction || pci_acs_enabled(pdev, REQ_ACS_FLAGS))
 649                return NULL;
 650
 651        for_each_pci_dev(tmp) {
 652                if (tmp == pdev || tmp->bus != pdev->bus ||
 653                    PCI_SLOT(tmp->devfn) != PCI_SLOT(pdev->devfn) ||
 654                    pci_acs_enabled(tmp, REQ_ACS_FLAGS))
 655                        continue;
 656
 657                group = get_pci_alias_group(tmp, devfns);
 658                if (group) {
 659                        pci_dev_put(tmp);
 660                        return group;
 661                }
 662        }
 663
 664        return NULL;
 665}
 666
 667/*
 668 * Look for aliases to or from the given device for existing groups. DMA
 669 * aliases are only supported on the same bus, therefore the search
 670 * space is quite small (especially since we're really only looking at pcie
 671 * device, and therefore only expect multiple slots on the root complex or
 672 * downstream switch ports).  It's conceivable though that a pair of
 673 * multifunction devices could have aliases between them that would cause a
 674 * loop.  To prevent this, we use a bitmap to track where we've been.
 675 */
 676static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev,
 677                                               unsigned long *devfns)
 678{
 679        struct pci_dev *tmp = NULL;
 680        struct iommu_group *group;
 681
 682        if (test_and_set_bit(pdev->devfn & 0xff, devfns))
 683                return NULL;
 684
 685        group = iommu_group_get(&pdev->dev);
 686        if (group)
 687                return group;
 688
 689        for_each_pci_dev(tmp) {
 690                if (tmp == pdev || tmp->bus != pdev->bus)
 691                        continue;
 692
 693                /* We alias them or they alias us */
 694                if (pci_devs_are_dma_aliases(pdev, tmp)) {
 695                        group = get_pci_alias_group(tmp, devfns);
 696                        if (group) {
 697                                pci_dev_put(tmp);
 698                                return group;
 699                        }
 700
 701                        group = get_pci_function_alias_group(tmp, devfns);
 702                        if (group) {
 703                                pci_dev_put(tmp);
 704                                return group;
 705                        }
 706                }
 707        }
 708
 709        return NULL;
 710}
 711
 712struct group_for_pci_data {
 713        struct pci_dev *pdev;
 714        struct iommu_group *group;
 715};
 716
 717/*
 718 * DMA alias iterator callback, return the last seen device.  Stop and return
 719 * the IOMMU group if we find one along the way.
 720 */
 721static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque)
 722{
 723        struct group_for_pci_data *data = opaque;
 724
 725        data->pdev = pdev;
 726        data->group = iommu_group_get(&pdev->dev);
 727
 728        return data->group != NULL;
 729}
 730
 731/*
 732 * Generic device_group call-back function. It just allocates one
 733 * iommu-group per device.
 734 */
 735struct iommu_group *generic_device_group(struct device *dev)
 736{
 737        struct iommu_group *group;
 738
 739        group = iommu_group_alloc();
 740        if (IS_ERR(group))
 741                return NULL;
 742
 743        return group;
 744}
 745
 746/*
 747 * Use standard PCI bus topology, isolation features, and DMA alias quirks
 748 * to find or create an IOMMU group for a device.
 749 */
 750struct iommu_group *pci_device_group(struct device *dev)
 751{
 752        struct pci_dev *pdev = to_pci_dev(dev);
 753        struct group_for_pci_data data;
 754        struct pci_bus *bus;
 755        struct iommu_group *group = NULL;
 756        u64 devfns[4] = { 0 };
 757
 758        if (WARN_ON(!dev_is_pci(dev)))
 759                return ERR_PTR(-EINVAL);
 760
 761        /*
 762         * Find the upstream DMA alias for the device.  A device must not
 763         * be aliased due to topology in order to have its own IOMMU group.
 764         * If we find an alias along the way that already belongs to a
 765         * group, use it.
 766         */
 767        if (pci_for_each_dma_alias(pdev, get_pci_alias_or_group, &data))
 768                return data.group;
 769
 770        pdev = data.pdev;
 771
 772        /*
 773         * Continue upstream from the point of minimum IOMMU granularity
 774         * due to aliases to the point where devices are protected from
 775         * peer-to-peer DMA by PCI ACS.  Again, if we find an existing
 776         * group, use it.
 777         */
 778        for (bus = pdev->bus; !pci_is_root_bus(bus); bus = bus->parent) {
 779                if (!bus->self)
 780                        continue;
 781
 782                if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS))
 783                        break;
 784
 785                pdev = bus->self;
 786
 787                group = iommu_group_get(&pdev->dev);
 788                if (group)
 789                        return group;
 790        }
 791
 792        /*
 793         * Look for existing groups on device aliases.  If we alias another
 794         * device or another device aliases us, use the same group.
 795         */
 796        group = get_pci_alias_group(pdev, (unsigned long *)devfns);
 797        if (group)
 798                return group;
 799
 800        /*
 801         * Look for existing groups on non-isolated functions on the same
 802         * slot and aliases of those funcions, if any.  No need to clear
 803         * the search bitmap, the tested devfns are still valid.
 804         */
 805        group = get_pci_function_alias_group(pdev, (unsigned long *)devfns);
 806        if (group)
 807                return group;
 808
 809        /* No shared group found, allocate new */
 810        group = iommu_group_alloc();
 811        if (IS_ERR(group))
 812                return NULL;
 813
 814        return group;
 815}
 816
 817/**
 818 * iommu_group_get_for_dev - Find or create the IOMMU group for a device
 819 * @dev: target device
 820 *
 821 * This function is intended to be called by IOMMU drivers and extended to
 822 * support common, bus-defined algorithms when determining or creating the
 823 * IOMMU group for a device.  On success, the caller will hold a reference
 824 * to the returned IOMMU group, which will already include the provided
 825 * device.  The reference should be released with iommu_group_put().
 826 */
 827struct iommu_group *iommu_group_get_for_dev(struct device *dev)
 828{
 829        const struct iommu_ops *ops = dev->bus->iommu_ops;
 830        struct iommu_group *group;
 831        int ret;
 832
 833        group = iommu_group_get(dev);
 834        if (group)
 835                return group;
 836
 837        group = ERR_PTR(-EINVAL);
 838
 839        if (ops && ops->device_group)
 840                group = ops->device_group(dev);
 841
 842        if (IS_ERR(group))
 843                return group;
 844
 845        /*
 846         * Try to allocate a default domain - needs support from the
 847         * IOMMU driver.
 848         */
 849        if (!group->default_domain) {
 850                group->default_domain = __iommu_domain_alloc(dev->bus,
 851                                                             IOMMU_DOMAIN_DMA);
 852                if (!group->domain)
 853                        group->domain = group->default_domain;
 854        }
 855
 856        ret = iommu_group_add_device(group, dev);
 857        if (ret) {
 858                iommu_group_put(group);
 859                return ERR_PTR(ret);
 860        }
 861
 862        return group;
 863}
 864
 865struct iommu_domain *iommu_group_default_domain(struct iommu_group *group)
 866{
 867        return group->default_domain;
 868}
 869
 870static int add_iommu_group(struct device *dev, void *data)
 871{
 872        struct iommu_callback_data *cb = data;
 873        const struct iommu_ops *ops = cb->ops;
 874        int ret;
 875
 876        if (!ops->add_device)
 877                return 0;
 878
 879        WARN_ON(dev->iommu_group);
 880
 881        ret = ops->add_device(dev);
 882
 883        /*
 884         * We ignore -ENODEV errors for now, as they just mean that the
 885         * device is not translated by an IOMMU. We still care about
 886         * other errors and fail to initialize when they happen.
 887         */
 888        if (ret == -ENODEV)
 889                ret = 0;
 890
 891        return ret;
 892}
 893
 894static int remove_iommu_group(struct device *dev, void *data)
 895{
 896        struct iommu_callback_data *cb = data;
 897        const struct iommu_ops *ops = cb->ops;
 898
 899        if (ops->remove_device && dev->iommu_group)
 900                ops->remove_device(dev);
 901
 902        return 0;
 903}
 904
 905static int iommu_bus_notifier(struct notifier_block *nb,
 906                              unsigned long action, void *data)
 907{
 908        struct device *dev = data;
 909        const struct iommu_ops *ops = dev->bus->iommu_ops;
 910        struct iommu_group *group;
 911        unsigned long group_action = 0;
 912
 913        /*
 914         * ADD/DEL call into iommu driver ops if provided, which may
 915         * result in ADD/DEL notifiers to group->notifier
 916         */
 917        if (action == BUS_NOTIFY_ADD_DEVICE) {
 918                if (ops->add_device)
 919                        return ops->add_device(dev);
 920        } else if (action == BUS_NOTIFY_REMOVED_DEVICE) {
 921                if (ops->remove_device && dev->iommu_group) {
 922                        ops->remove_device(dev);
 923                        return 0;
 924                }
 925        }
 926
 927        /*
 928         * Remaining BUS_NOTIFYs get filtered and republished to the
 929         * group, if anyone is listening
 930         */
 931        group = iommu_group_get(dev);
 932        if (!group)
 933                return 0;
 934
 935        switch (action) {
 936        case BUS_NOTIFY_BIND_DRIVER:
 937                group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
 938                break;
 939        case BUS_NOTIFY_BOUND_DRIVER:
 940                group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
 941                break;
 942        case BUS_NOTIFY_UNBIND_DRIVER:
 943                group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
 944                break;
 945        case BUS_NOTIFY_UNBOUND_DRIVER:
 946                group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
 947                break;
 948        }
 949
 950        if (group_action)
 951                blocking_notifier_call_chain(&group->notifier,
 952                                             group_action, dev);
 953
 954        iommu_group_put(group);
 955        return 0;
 956}
 957
 958static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops)
 959{
 960        int err;
 961        struct notifier_block *nb;
 962        struct iommu_callback_data cb = {
 963                .ops = ops,
 964        };
 965
 966        nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
 967        if (!nb)
 968                return -ENOMEM;
 969
 970        nb->notifier_call = iommu_bus_notifier;
 971
 972        err = bus_register_notifier(bus, nb);
 973        if (err)
 974                goto out_free;
 975
 976        err = bus_for_each_dev(bus, NULL, &cb, add_iommu_group);
 977        if (err)
 978                goto out_err;
 979
 980
 981        return 0;
 982
 983out_err:
 984        /* Clean up */
 985        bus_for_each_dev(bus, NULL, &cb, remove_iommu_group);
 986        bus_unregister_notifier(bus, nb);
 987
 988out_free:
 989        kfree(nb);
 990
 991        return err;
 992}
 993
 994/**
 995 * bus_set_iommu - set iommu-callbacks for the bus
 996 * @bus: bus.
 997 * @ops: the callbacks provided by the iommu-driver
 998 *
 999 * This function is called by an iommu driver to set the iommu methods
1000 * used for a particular bus. Drivers for devices on that bus can use
1001 * the iommu-api after these ops are registered.
1002 * This special function is needed because IOMMUs are usually devices on
1003 * the bus itself, so the iommu drivers are not initialized when the bus
1004 * is set up. With this function the iommu-driver can set the iommu-ops
1005 * afterwards.
1006 */
1007int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
1008{
1009        int err;
1010
1011        if (bus->iommu_ops != NULL)
1012                return -EBUSY;
1013
1014        bus->iommu_ops = ops;
1015
1016        /* Do IOMMU specific setup for this bus-type */
1017        err = iommu_bus_init(bus, ops);
1018        if (err)
1019                bus->iommu_ops = NULL;
1020
1021        return err;
1022}
1023EXPORT_SYMBOL_GPL(bus_set_iommu);
1024
1025bool iommu_present(struct bus_type *bus)
1026{
1027        return bus->iommu_ops != NULL;
1028}
1029EXPORT_SYMBOL_GPL(iommu_present);
1030
1031bool iommu_capable(struct bus_type *bus, enum iommu_cap cap)
1032{
1033        if (!bus->iommu_ops || !bus->iommu_ops->capable)
1034                return false;
1035
1036        return bus->iommu_ops->capable(cap);
1037}
1038EXPORT_SYMBOL_GPL(iommu_capable);
1039
1040/**
1041 * iommu_set_fault_handler() - set a fault handler for an iommu domain
1042 * @domain: iommu domain
1043 * @handler: fault handler
1044 * @token: user data, will be passed back to the fault handler
1045 *
1046 * This function should be used by IOMMU users which want to be notified
1047 * whenever an IOMMU fault happens.
1048 *
1049 * The fault handler itself should return 0 on success, and an appropriate
1050 * error code otherwise.
1051 */
1052void iommu_set_fault_handler(struct iommu_domain *domain,
1053                                        iommu_fault_handler_t handler,
1054                                        void *token)
1055{
1056        BUG_ON(!domain);
1057
1058        domain->handler = handler;
1059        domain->handler_token = token;
1060}
1061EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
1062
1063static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
1064                                                 unsigned type)
1065{
1066        struct iommu_domain *domain;
1067
1068        if (bus == NULL || bus->iommu_ops == NULL)
1069                return NULL;
1070
1071        domain = bus->iommu_ops->domain_alloc(type);
1072        if (!domain)
1073                return NULL;
1074
1075        domain->ops  = bus->iommu_ops;
1076        domain->type = type;
1077        /* Assume all sizes by default; the driver may override this later */
1078        domain->pgsize_bitmap  = bus->iommu_ops->pgsize_bitmap;
1079
1080        return domain;
1081}
1082
1083struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
1084{
1085        return __iommu_domain_alloc(bus, IOMMU_DOMAIN_UNMANAGED);
1086}
1087EXPORT_SYMBOL_GPL(iommu_domain_alloc);
1088
1089void iommu_domain_free(struct iommu_domain *domain)
1090{
1091        domain->ops->domain_free(domain);
1092}
1093EXPORT_SYMBOL_GPL(iommu_domain_free);
1094
1095static int __iommu_attach_device(struct iommu_domain *domain,
1096                                 struct device *dev)
1097{
1098        int ret;
1099        if (unlikely(domain->ops->attach_dev == NULL))
1100                return -ENODEV;
1101
1102        ret = domain->ops->attach_dev(domain, dev);
1103        if (!ret)
1104                trace_attach_device_to_domain(dev);
1105        return ret;
1106}
1107
1108int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
1109{
1110        struct iommu_group *group;
1111        int ret;
1112
1113        group = iommu_group_get(dev);
1114        /* FIXME: Remove this when groups a mandatory for iommu drivers */
1115        if (group == NULL)
1116                return __iommu_attach_device(domain, dev);
1117
1118        /*
1119         * We have a group - lock it to make sure the device-count doesn't
1120         * change while we are attaching
1121         */
1122        mutex_lock(&group->mutex);
1123        ret = -EINVAL;
1124        if (iommu_group_device_count(group) != 1)
1125                goto out_unlock;
1126
1127        ret = __iommu_attach_group(domain, group);
1128
1129out_unlock:
1130        mutex_unlock(&group->mutex);
1131        iommu_group_put(group);
1132
1133        return ret;
1134}
1135EXPORT_SYMBOL_GPL(iommu_attach_device);
1136
1137static void __iommu_detach_device(struct iommu_domain *domain,
1138                                  struct device *dev)
1139{
1140        if (unlikely(domain->ops->detach_dev == NULL))
1141                return;
1142
1143        domain->ops->detach_dev(domain, dev);
1144        trace_detach_device_from_domain(dev);
1145}
1146
1147void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
1148{
1149        struct iommu_group *group;
1150
1151        group = iommu_group_get(dev);
1152        /* FIXME: Remove this when groups a mandatory for iommu drivers */
1153        if (group == NULL)
1154                return __iommu_detach_device(domain, dev);
1155
1156        mutex_lock(&group->mutex);
1157        if (iommu_group_device_count(group) != 1) {
1158                WARN_ON(1);
1159                goto out_unlock;
1160        }
1161
1162        __iommu_detach_group(domain, group);
1163
1164out_unlock:
1165        mutex_unlock(&group->mutex);
1166        iommu_group_put(group);
1167}
1168EXPORT_SYMBOL_GPL(iommu_detach_device);
1169
1170struct iommu_domain *iommu_get_domain_for_dev(struct device *dev)
1171{
1172        struct iommu_domain *domain;
1173        struct iommu_group *group;
1174
1175        group = iommu_group_get(dev);
1176        /* FIXME: Remove this when groups a mandatory for iommu drivers */
1177        if (group == NULL)
1178                return NULL;
1179
1180        domain = group->domain;
1181
1182        iommu_group_put(group);
1183
1184        return domain;
1185}
1186EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev);
1187
1188/*
1189 * IOMMU groups are really the natrual working unit of the IOMMU, but
1190 * the IOMMU API works on domains and devices.  Bridge that gap by
1191 * iterating over the devices in a group.  Ideally we'd have a single
1192 * device which represents the requestor ID of the group, but we also
1193 * allow IOMMU drivers to create policy defined minimum sets, where
1194 * the physical hardware may be able to distiguish members, but we
1195 * wish to group them at a higher level (ex. untrusted multi-function
1196 * PCI devices).  Thus we attach each device.
1197 */
1198static int iommu_group_do_attach_device(struct device *dev, void *data)
1199{
1200        struct iommu_domain *domain = data;
1201
1202        return __iommu_attach_device(domain, dev);
1203}
1204
1205static int __iommu_attach_group(struct iommu_domain *domain,
1206                                struct iommu_group *group)
1207{
1208        int ret;
1209
1210        if (group->default_domain && group->domain != group->default_domain)
1211                return -EBUSY;
1212
1213        ret = __iommu_group_for_each_dev(group, domain,
1214                                         iommu_group_do_attach_device);
1215        if (ret == 0)
1216                group->domain = domain;
1217
1218        return ret;
1219}
1220
1221int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
1222{
1223        int ret;
1224
1225        mutex_lock(&group->mutex);
1226        ret = __iommu_attach_group(domain, group);
1227        mutex_unlock(&group->mutex);
1228
1229        return ret;
1230}
1231EXPORT_SYMBOL_GPL(iommu_attach_group);
1232
1233static int iommu_group_do_detach_device(struct device *dev, void *data)
1234{
1235        struct iommu_domain *domain = data;
1236
1237        __iommu_detach_device(domain, dev);
1238
1239        return 0;
1240}
1241
1242static void __iommu_detach_group(struct iommu_domain *domain,
1243                                 struct iommu_group *group)
1244{
1245        int ret;
1246
1247        if (!group->default_domain) {
1248                __iommu_group_for_each_dev(group, domain,
1249                                           iommu_group_do_detach_device);
1250                group->domain = NULL;
1251                return;
1252        }
1253
1254        if (group->domain == group->default_domain)
1255                return;
1256
1257        /* Detach by re-attaching to the default domain */
1258        ret = __iommu_group_for_each_dev(group, group->default_domain,
1259                                         iommu_group_do_attach_device);
1260        if (ret != 0)
1261                WARN_ON(1);
1262        else
1263                group->domain = group->default_domain;
1264}
1265
1266void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
1267{
1268        mutex_lock(&group->mutex);
1269        __iommu_detach_group(domain, group);
1270        mutex_unlock(&group->mutex);
1271}
1272EXPORT_SYMBOL_GPL(iommu_detach_group);
1273
1274phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1275{
1276        if (unlikely(domain->ops->iova_to_phys == NULL))
1277                return 0;
1278
1279        return domain->ops->iova_to_phys(domain, iova);
1280}
1281EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
1282
1283static size_t iommu_pgsize(struct iommu_domain *domain,
1284                           unsigned long addr_merge, size_t size)
1285{
1286        unsigned int pgsize_idx;
1287        size_t pgsize;
1288
1289        /* Max page size that still fits into 'size' */
1290        pgsize_idx = __fls(size);
1291
1292        /* need to consider alignment requirements ? */
1293        if (likely(addr_merge)) {
1294                /* Max page size allowed by address */
1295                unsigned int align_pgsize_idx = __ffs(addr_merge);
1296                pgsize_idx = min(pgsize_idx, align_pgsize_idx);
1297        }
1298
1299        /* build a mask of acceptable page sizes */
1300        pgsize = (1UL << (pgsize_idx + 1)) - 1;
1301
1302        /* throw away page sizes not supported by the hardware */
1303        pgsize &= domain->pgsize_bitmap;
1304
1305        /* make sure we're still sane */
1306        BUG_ON(!pgsize);
1307
1308        /* pick the biggest page */
1309        pgsize_idx = __fls(pgsize);
1310        pgsize = 1UL << pgsize_idx;
1311
1312        return pgsize;
1313}
1314
1315int iommu_map(struct iommu_domain *domain, unsigned long iova,
1316              phys_addr_t paddr, size_t size, int prot)
1317{
1318        unsigned long orig_iova = iova;
1319        unsigned int min_pagesz;
1320        size_t orig_size = size;
1321        phys_addr_t orig_paddr = paddr;
1322        int ret = 0;
1323
1324        if (unlikely(domain->ops->map == NULL ||
1325                     domain->pgsize_bitmap == 0UL))
1326                return -ENODEV;
1327
1328        if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
1329                return -EINVAL;
1330
1331        /* find out the minimum page size supported */
1332        min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
1333
1334        /*
1335         * both the virtual address and the physical one, as well as
1336         * the size of the mapping, must be aligned (at least) to the
1337         * size of the smallest page supported by the hardware
1338         */
1339        if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
1340                pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
1341                       iova, &paddr, size, min_pagesz);
1342                return -EINVAL;
1343        }
1344
1345        pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
1346
1347        while (size) {
1348                size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
1349
1350                pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
1351                         iova, &paddr, pgsize);
1352
1353                ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
1354                if (ret)
1355                        break;
1356
1357                iova += pgsize;
1358                paddr += pgsize;
1359                size -= pgsize;
1360        }
1361
1362        /* unroll mapping in case something went wrong */
1363        if (ret)
1364                iommu_unmap(domain, orig_iova, orig_size - size);
1365        else
1366                trace_map(orig_iova, orig_paddr, orig_size);
1367
1368        return ret;
1369}
1370EXPORT_SYMBOL_GPL(iommu_map);
1371
1372size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
1373{
1374        size_t unmapped_page, unmapped = 0;
1375        unsigned int min_pagesz;
1376        unsigned long orig_iova = iova;
1377
1378        if (unlikely(domain->ops->unmap == NULL ||
1379                     domain->pgsize_bitmap == 0UL))
1380                return -ENODEV;
1381
1382        if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
1383                return -EINVAL;
1384
1385        /* find out the minimum page size supported */
1386        min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
1387
1388        /*
1389         * The virtual address, as well as the size of the mapping, must be
1390         * aligned (at least) to the size of the smallest page supported
1391         * by the hardware
1392         */
1393        if (!IS_ALIGNED(iova | size, min_pagesz)) {
1394                pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
1395                       iova, size, min_pagesz);
1396                return -EINVAL;
1397        }
1398
1399        pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
1400
1401        /*
1402         * Keep iterating until we either unmap 'size' bytes (or more)
1403         * or we hit an area that isn't mapped.
1404         */
1405        while (unmapped < size) {
1406                size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
1407
1408                unmapped_page = domain->ops->unmap(domain, iova, pgsize);
1409                if (!unmapped_page)
1410                        break;
1411
1412                pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
1413                         iova, unmapped_page);
1414
1415                iova += unmapped_page;
1416                unmapped += unmapped_page;
1417        }
1418
1419        trace_unmap(orig_iova, size, unmapped);
1420        return unmapped;
1421}
1422EXPORT_SYMBOL_GPL(iommu_unmap);
1423
1424size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
1425                         struct scatterlist *sg, unsigned int nents, int prot)
1426{
1427        struct scatterlist *s;
1428        size_t mapped = 0;
1429        unsigned int i, min_pagesz;
1430        int ret;
1431
1432        if (unlikely(domain->pgsize_bitmap == 0UL))
1433                return 0;
1434
1435        min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
1436
1437        for_each_sg(sg, s, nents, i) {
1438                phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
1439
1440                /*
1441                 * We are mapping on IOMMU page boundaries, so offset within
1442                 * the page must be 0. However, the IOMMU may support pages
1443                 * smaller than PAGE_SIZE, so s->offset may still represent
1444                 * an offset of that boundary within the CPU page.
1445                 */
1446                if (!IS_ALIGNED(s->offset, min_pagesz))
1447                        goto out_err;
1448
1449                ret = iommu_map(domain, iova + mapped, phys, s->length, prot);
1450                if (ret)
1451                        goto out_err;
1452
1453                mapped += s->length;
1454        }
1455
1456        return mapped;
1457
1458out_err:
1459        /* undo mappings already done */
1460        iommu_unmap(domain, iova, mapped);
1461
1462        return 0;
1463
1464}
1465EXPORT_SYMBOL_GPL(default_iommu_map_sg);
1466
1467int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
1468                               phys_addr_t paddr, u64 size, int prot)
1469{
1470        if (unlikely(domain->ops->domain_window_enable == NULL))
1471                return -ENODEV;
1472
1473        return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size,
1474                                                 prot);
1475}
1476EXPORT_SYMBOL_GPL(iommu_domain_window_enable);
1477
1478void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
1479{
1480        if (unlikely(domain->ops->domain_window_disable == NULL))
1481                return;
1482
1483        return domain->ops->domain_window_disable(domain, wnd_nr);
1484}
1485EXPORT_SYMBOL_GPL(iommu_domain_window_disable);
1486
1487static int __init iommu_init(void)
1488{
1489        iommu_group_kset = kset_create_and_add("iommu_groups",
1490                                               NULL, kernel_kobj);
1491        BUG_ON(!iommu_group_kset);
1492
1493        return 0;
1494}
1495core_initcall(iommu_init);
1496
1497int iommu_domain_get_attr(struct iommu_domain *domain,
1498                          enum iommu_attr attr, void *data)
1499{
1500        struct iommu_domain_geometry *geometry;
1501        bool *paging;
1502        int ret = 0;
1503        u32 *count;
1504
1505        switch (attr) {
1506        case DOMAIN_ATTR_GEOMETRY:
1507                geometry  = data;
1508                *geometry = domain->geometry;
1509
1510                break;
1511        case DOMAIN_ATTR_PAGING:
1512                paging  = data;
1513                *paging = (domain->pgsize_bitmap != 0UL);
1514                break;
1515        case DOMAIN_ATTR_WINDOWS:
1516                count = data;
1517
1518                if (domain->ops->domain_get_windows != NULL)
1519                        *count = domain->ops->domain_get_windows(domain);
1520                else
1521                        ret = -ENODEV;
1522
1523                break;
1524        default:
1525                if (!domain->ops->domain_get_attr)
1526                        return -EINVAL;
1527
1528                ret = domain->ops->domain_get_attr(domain, attr, data);
1529        }
1530
1531        return ret;
1532}
1533EXPORT_SYMBOL_GPL(iommu_domain_get_attr);
1534
1535int iommu_domain_set_attr(struct iommu_domain *domain,
1536                          enum iommu_attr attr, void *data)
1537{
1538        int ret = 0;
1539        u32 *count;
1540
1541        switch (attr) {
1542        case DOMAIN_ATTR_WINDOWS:
1543                count = data;
1544
1545                if (domain->ops->domain_set_windows != NULL)
1546                        ret = domain->ops->domain_set_windows(domain, *count);
1547                else
1548                        ret = -ENODEV;
1549
1550                break;
1551        default:
1552                if (domain->ops->domain_set_attr == NULL)
1553                        return -EINVAL;
1554
1555                ret = domain->ops->domain_set_attr(domain, attr, data);
1556        }
1557
1558        return ret;
1559}
1560EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
1561
1562void iommu_get_dm_regions(struct device *dev, struct list_head *list)
1563{
1564        const struct iommu_ops *ops = dev->bus->iommu_ops;
1565
1566        if (ops && ops->get_dm_regions)
1567                ops->get_dm_regions(dev, list);
1568}
1569
1570void iommu_put_dm_regions(struct device *dev, struct list_head *list)
1571{
1572        const struct iommu_ops *ops = dev->bus->iommu_ops;
1573
1574        if (ops && ops->put_dm_regions)
1575                ops->put_dm_regions(dev, list);
1576}
1577
1578/* Request that a device is direct mapped by the IOMMU */
1579int iommu_request_dm_for_dev(struct device *dev)
1580{
1581        struct iommu_domain *dm_domain;
1582        struct iommu_group *group;
1583        int ret;
1584
1585        /* Device must already be in a group before calling this function */
1586        group = iommu_group_get_for_dev(dev);
1587        if (IS_ERR(group))
1588                return PTR_ERR(group);
1589
1590        mutex_lock(&group->mutex);
1591
1592        /* Check if the default domain is already direct mapped */
1593        ret = 0;
1594        if (group->default_domain &&
1595            group->default_domain->type == IOMMU_DOMAIN_IDENTITY)
1596                goto out;
1597
1598        /* Don't change mappings of existing devices */
1599        ret = -EBUSY;
1600        if (iommu_group_device_count(group) != 1)
1601                goto out;
1602
1603        /* Allocate a direct mapped domain */
1604        ret = -ENOMEM;
1605        dm_domain = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_IDENTITY);
1606        if (!dm_domain)
1607                goto out;
1608
1609        /* Attach the device to the domain */
1610        ret = __iommu_attach_group(dm_domain, group);
1611        if (ret) {
1612                iommu_domain_free(dm_domain);
1613                goto out;
1614        }
1615
1616        /* Make the direct mapped domain the default for this group */
1617        if (group->default_domain)
1618                iommu_domain_free(group->default_domain);
1619        group->default_domain = dm_domain;
1620
1621        pr_info("Using direct mapping for device %s\n", dev_name(dev));
1622
1623        ret = 0;
1624out:
1625        mutex_unlock(&group->mutex);
1626        iommu_group_put(group);
1627
1628        return ret;
1629}
1630
1631struct iommu_instance {
1632        struct list_head list;
1633        struct fwnode_handle *fwnode;
1634        const struct iommu_ops *ops;
1635};
1636static LIST_HEAD(iommu_instance_list);
1637static DEFINE_SPINLOCK(iommu_instance_lock);
1638
1639void iommu_register_instance(struct fwnode_handle *fwnode,
1640                             const struct iommu_ops *ops)
1641{
1642        struct iommu_instance *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
1643
1644        if (WARN_ON(!iommu))
1645                return;
1646
1647        of_node_get(to_of_node(fwnode));
1648        INIT_LIST_HEAD(&iommu->list);
1649        iommu->fwnode = fwnode;
1650        iommu->ops = ops;
1651        spin_lock(&iommu_instance_lock);
1652        list_add_tail(&iommu->list, &iommu_instance_list);
1653        spin_unlock(&iommu_instance_lock);
1654}
1655
1656const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode)
1657{
1658        struct iommu_instance *instance;
1659        const struct iommu_ops *ops = NULL;
1660
1661        spin_lock(&iommu_instance_lock);
1662        list_for_each_entry(instance, &iommu_instance_list, list)
1663                if (instance->fwnode == fwnode) {
1664                        ops = instance->ops;
1665                        break;
1666                }
1667        spin_unlock(&iommu_instance_lock);
1668        return ops;
1669}
1670
1671int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
1672                      const struct iommu_ops *ops)
1673{
1674        struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1675
1676        if (fwspec)
1677                return ops == fwspec->ops ? 0 : -EINVAL;
1678
1679        fwspec = kzalloc(sizeof(*fwspec), GFP_KERNEL);
1680        if (!fwspec)
1681                return -ENOMEM;
1682
1683        of_node_get(to_of_node(iommu_fwnode));
1684        fwspec->iommu_fwnode = iommu_fwnode;
1685        fwspec->ops = ops;
1686        dev->iommu_fwspec = fwspec;
1687        return 0;
1688}
1689EXPORT_SYMBOL_GPL(iommu_fwspec_init);
1690
1691void iommu_fwspec_free(struct device *dev)
1692{
1693        struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1694
1695        if (fwspec) {
1696                fwnode_handle_put(fwspec->iommu_fwnode);
1697                kfree(fwspec);
1698                dev->iommu_fwspec = NULL;
1699        }
1700}
1701EXPORT_SYMBOL_GPL(iommu_fwspec_free);
1702
1703int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
1704{
1705        struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1706        size_t size;
1707        int i;
1708
1709        if (!fwspec)
1710                return -EINVAL;
1711
1712        size = offsetof(struct iommu_fwspec, ids[fwspec->num_ids + num_ids]);
1713        if (size > sizeof(*fwspec)) {
1714                fwspec = krealloc(dev->iommu_fwspec, size, GFP_KERNEL);
1715                if (!fwspec)
1716                        return -ENOMEM;
1717        }
1718
1719        for (i = 0; i < num_ids; i++)
1720                fwspec->ids[fwspec->num_ids + i] = ids[i];
1721
1722        fwspec->num_ids += num_ids;
1723        dev->iommu_fwspec = fwspec;
1724        return 0;
1725}
1726EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
1727