linux/drivers/iommu/iommu.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
   3 * Author: Joerg Roedel <joerg.roedel@amd.com>
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms of the GNU General Public License version 2 as published
   7 * by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write to the Free Software
  16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  17 */
  18
  19#define pr_fmt(fmt)    "%s: " fmt, __func__
  20
  21#include <linux/device.h>
  22#include <linux/kernel.h>
  23#include <linux/bug.h>
  24#include <linux/types.h>
  25#include <linux/module.h>
  26#include <linux/slab.h>
  27#include <linux/errno.h>
  28#include <linux/iommu.h>
  29#include <linux/idr.h>
  30#include <linux/notifier.h>
  31#include <linux/err.h>
  32
  33static struct kset *iommu_group_kset;
  34static struct ida iommu_group_ida;
  35static struct mutex iommu_group_mutex;
  36
  37struct iommu_group {
  38        struct kobject kobj;
  39        struct kobject *devices_kobj;
  40        struct list_head devices;
  41        struct mutex mutex;
  42        struct blocking_notifier_head notifier;
  43        void *iommu_data;
  44        void (*iommu_data_release)(void *iommu_data);
  45        char *name;
  46        int id;
  47};
  48
  49struct iommu_device {
  50        struct list_head list;
  51        struct device *dev;
  52        char *name;
  53};
  54
  55struct iommu_group_attribute {
  56        struct attribute attr;
  57        ssize_t (*show)(struct iommu_group *group, char *buf);
  58        ssize_t (*store)(struct iommu_group *group,
  59                         const char *buf, size_t count);
  60};
  61
  62#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store)           \
  63struct iommu_group_attribute iommu_group_attr_##_name =         \
  64        __ATTR(_name, _mode, _show, _store)
  65
  66#define to_iommu_group_attr(_attr)      \
  67        container_of(_attr, struct iommu_group_attribute, attr)
  68#define to_iommu_group(_kobj)           \
  69        container_of(_kobj, struct iommu_group, kobj)
  70
  71static ssize_t iommu_group_attr_show(struct kobject *kobj,
  72                                     struct attribute *__attr, char *buf)
  73{
  74        struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
  75        struct iommu_group *group = to_iommu_group(kobj);
  76        ssize_t ret = -EIO;
  77
  78        if (attr->show)
  79                ret = attr->show(group, buf);
  80        return ret;
  81}
  82
  83static ssize_t iommu_group_attr_store(struct kobject *kobj,
  84                                      struct attribute *__attr,
  85                                      const char *buf, size_t count)
  86{
  87        struct iommu_group_attribute *attr = to_iommu_group_attr(__attr);
  88        struct iommu_group *group = to_iommu_group(kobj);
  89        ssize_t ret = -EIO;
  90
  91        if (attr->store)
  92                ret = attr->store(group, buf, count);
  93        return ret;
  94}
  95
  96static const struct sysfs_ops iommu_group_sysfs_ops = {
  97        .show = iommu_group_attr_show,
  98        .store = iommu_group_attr_store,
  99};
 100
 101static int iommu_group_create_file(struct iommu_group *group,
 102                                   struct iommu_group_attribute *attr)
 103{
 104        return sysfs_create_file(&group->kobj, &attr->attr);
 105}
 106
 107static void iommu_group_remove_file(struct iommu_group *group,
 108                                    struct iommu_group_attribute *attr)
 109{
 110        sysfs_remove_file(&group->kobj, &attr->attr);
 111}
 112
 113static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
 114{
 115        return sprintf(buf, "%s\n", group->name);
 116}
 117
 118static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
 119
 120static void iommu_group_release(struct kobject *kobj)
 121{
 122        struct iommu_group *group = to_iommu_group(kobj);
 123
 124        if (group->iommu_data_release)
 125                group->iommu_data_release(group->iommu_data);
 126
 127        mutex_lock(&iommu_group_mutex);
 128        ida_remove(&iommu_group_ida, group->id);
 129        mutex_unlock(&iommu_group_mutex);
 130
 131        kfree(group->name);
 132        kfree(group);
 133}
 134
 135static struct kobj_type iommu_group_ktype = {
 136        .sysfs_ops = &iommu_group_sysfs_ops,
 137        .release = iommu_group_release,
 138};
 139
 140/**
 141 * iommu_group_alloc - Allocate a new group
 142 * @name: Optional name to associate with group, visible in sysfs
 143 *
 144 * This function is called by an iommu driver to allocate a new iommu
 145 * group.  The iommu group represents the minimum granularity of the iommu.
 146 * Upon successful return, the caller holds a reference to the supplied
 147 * group in order to hold the group until devices are added.  Use
 148 * iommu_group_put() to release this extra reference count, allowing the
 149 * group to be automatically reclaimed once it has no devices or external
 150 * references.
 151 */
 152struct iommu_group *iommu_group_alloc(void)
 153{
 154        struct iommu_group *group;
 155        int ret;
 156
 157        group = kzalloc(sizeof(*group), GFP_KERNEL);
 158        if (!group)
 159                return ERR_PTR(-ENOMEM);
 160
 161        group->kobj.kset = iommu_group_kset;
 162        mutex_init(&group->mutex);
 163        INIT_LIST_HEAD(&group->devices);
 164        BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
 165
 166        mutex_lock(&iommu_group_mutex);
 167
 168again:
 169        if (unlikely(0 == ida_pre_get(&iommu_group_ida, GFP_KERNEL))) {
 170                kfree(group);
 171                mutex_unlock(&iommu_group_mutex);
 172                return ERR_PTR(-ENOMEM);
 173        }
 174
 175        if (-EAGAIN == ida_get_new(&iommu_group_ida, &group->id))
 176                goto again;
 177
 178        mutex_unlock(&iommu_group_mutex);
 179
 180        ret = kobject_init_and_add(&group->kobj, &iommu_group_ktype,
 181                                   NULL, "%d", group->id);
 182        if (ret) {
 183                mutex_lock(&iommu_group_mutex);
 184                ida_remove(&iommu_group_ida, group->id);
 185                mutex_unlock(&iommu_group_mutex);
 186                kfree(group);
 187                return ERR_PTR(ret);
 188        }
 189
 190        group->devices_kobj = kobject_create_and_add("devices", &group->kobj);
 191        if (!group->devices_kobj) {
 192                kobject_put(&group->kobj); /* triggers .release & free */
 193                return ERR_PTR(-ENOMEM);
 194        }
 195
 196        /*
 197         * The devices_kobj holds a reference on the group kobject, so
 198         * as long as that exists so will the group.  We can therefore
 199         * use the devices_kobj for reference counting.
 200         */
 201        kobject_put(&group->kobj);
 202
 203        return group;
 204}
 205EXPORT_SYMBOL_GPL(iommu_group_alloc);
 206
 207struct iommu_group *iommu_group_get_by_id(int id)
 208{
 209        struct kobject *group_kobj;
 210        struct iommu_group *group;
 211        const char *name;
 212
 213        if (!iommu_group_kset)
 214                return NULL;
 215
 216        name = kasprintf(GFP_KERNEL, "%d", id);
 217        if (!name)
 218                return NULL;
 219
 220        group_kobj = kset_find_obj(iommu_group_kset, name);
 221        kfree(name);
 222
 223        if (!group_kobj)
 224                return NULL;
 225
 226        group = container_of(group_kobj, struct iommu_group, kobj);
 227        BUG_ON(group->id != id);
 228
 229        kobject_get(group->devices_kobj);
 230        kobject_put(&group->kobj);
 231
 232        return group;
 233}
 234EXPORT_SYMBOL_GPL(iommu_group_get_by_id);
 235
 236/**
 237 * iommu_group_get_iommudata - retrieve iommu_data registered for a group
 238 * @group: the group
 239 *
 240 * iommu drivers can store data in the group for use when doing iommu
 241 * operations.  This function provides a way to retrieve it.  Caller
 242 * should hold a group reference.
 243 */
 244void *iommu_group_get_iommudata(struct iommu_group *group)
 245{
 246        return group->iommu_data;
 247}
 248EXPORT_SYMBOL_GPL(iommu_group_get_iommudata);
 249
 250/**
 251 * iommu_group_set_iommudata - set iommu_data for a group
 252 * @group: the group
 253 * @iommu_data: new data
 254 * @release: release function for iommu_data
 255 *
 256 * iommu drivers can store data in the group for use when doing iommu
 257 * operations.  This function provides a way to set the data after
 258 * the group has been allocated.  Caller should hold a group reference.
 259 */
 260void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
 261                               void (*release)(void *iommu_data))
 262{
 263        group->iommu_data = iommu_data;
 264        group->iommu_data_release = release;
 265}
 266EXPORT_SYMBOL_GPL(iommu_group_set_iommudata);
 267
 268/**
 269 * iommu_group_set_name - set name for a group
 270 * @group: the group
 271 * @name: name
 272 *
 273 * Allow iommu driver to set a name for a group.  When set it will
 274 * appear in a name attribute file under the group in sysfs.
 275 */
 276int iommu_group_set_name(struct iommu_group *group, const char *name)
 277{
 278        int ret;
 279
 280        if (group->name) {
 281                iommu_group_remove_file(group, &iommu_group_attr_name);
 282                kfree(group->name);
 283                group->name = NULL;
 284                if (!name)
 285                        return 0;
 286        }
 287
 288        group->name = kstrdup(name, GFP_KERNEL);
 289        if (!group->name)
 290                return -ENOMEM;
 291
 292        ret = iommu_group_create_file(group, &iommu_group_attr_name);
 293        if (ret) {
 294                kfree(group->name);
 295                group->name = NULL;
 296                return ret;
 297        }
 298
 299        return 0;
 300}
 301EXPORT_SYMBOL_GPL(iommu_group_set_name);
 302
 303/**
 304 * iommu_group_add_device - add a device to an iommu group
 305 * @group: the group into which to add the device (reference should be held)
 306 * @dev: the device
 307 *
 308 * This function is called by an iommu driver to add a device into a
 309 * group.  Adding a device increments the group reference count.
 310 */
 311int iommu_group_add_device(struct iommu_group *group, struct device *dev)
 312{
 313        int ret, i = 0;
 314        struct iommu_device *device;
 315
 316        device = kzalloc(sizeof(*device), GFP_KERNEL);
 317        if (!device)
 318                return -ENOMEM;
 319
 320        device->dev = dev;
 321
 322        ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
 323        if (ret) {
 324                kfree(device);
 325                return ret;
 326        }
 327
 328        device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
 329rename:
 330        if (!device->name) {
 331                sysfs_remove_link(&dev->kobj, "iommu_group");
 332                kfree(device);
 333                return -ENOMEM;
 334        }
 335
 336        ret = sysfs_create_link_nowarn(group->devices_kobj,
 337                                       &dev->kobj, device->name);
 338        if (ret) {
 339                kfree(device->name);
 340                if (ret == -EEXIST && i >= 0) {
 341                        /*
 342                         * Account for the slim chance of collision
 343                         * and append an instance to the name.
 344                         */
 345                        device->name = kasprintf(GFP_KERNEL, "%s.%d",
 346                                                 kobject_name(&dev->kobj), i++);
 347                        goto rename;
 348                }
 349
 350                sysfs_remove_link(&dev->kobj, "iommu_group");
 351                kfree(device);
 352                return ret;
 353        }
 354
 355        kobject_get(group->devices_kobj);
 356
 357        dev->iommu_group = group;
 358
 359        mutex_lock(&group->mutex);
 360        list_add_tail(&device->list, &group->devices);
 361        mutex_unlock(&group->mutex);
 362
 363        /* Notify any listeners about change to group. */
 364        blocking_notifier_call_chain(&group->notifier,
 365                                     IOMMU_GROUP_NOTIFY_ADD_DEVICE, dev);
 366        return 0;
 367}
 368EXPORT_SYMBOL_GPL(iommu_group_add_device);
 369
 370/**
 371 * iommu_group_remove_device - remove a device from it's current group
 372 * @dev: device to be removed
 373 *
 374 * This function is called by an iommu driver to remove the device from
 375 * it's current group.  This decrements the iommu group reference count.
 376 */
 377void iommu_group_remove_device(struct device *dev)
 378{
 379        struct iommu_group *group = dev->iommu_group;
 380        struct iommu_device *tmp_device, *device = NULL;
 381
 382        /* Pre-notify listeners that a device is being removed. */
 383        blocking_notifier_call_chain(&group->notifier,
 384                                     IOMMU_GROUP_NOTIFY_DEL_DEVICE, dev);
 385
 386        mutex_lock(&group->mutex);
 387        list_for_each_entry(tmp_device, &group->devices, list) {
 388                if (tmp_device->dev == dev) {
 389                        device = tmp_device;
 390                        list_del(&device->list);
 391                        break;
 392                }
 393        }
 394        mutex_unlock(&group->mutex);
 395
 396        if (!device)
 397                return;
 398
 399        sysfs_remove_link(group->devices_kobj, device->name);
 400        sysfs_remove_link(&dev->kobj, "iommu_group");
 401
 402        kfree(device->name);
 403        kfree(device);
 404        dev->iommu_group = NULL;
 405        kobject_put(group->devices_kobj);
 406}
 407EXPORT_SYMBOL_GPL(iommu_group_remove_device);
 408
 409/**
 410 * iommu_group_for_each_dev - iterate over each device in the group
 411 * @group: the group
 412 * @data: caller opaque data to be passed to callback function
 413 * @fn: caller supplied callback function
 414 *
 415 * This function is called by group users to iterate over group devices.
 416 * Callers should hold a reference count to the group during callback.
 417 * The group->mutex is held across callbacks, which will block calls to
 418 * iommu_group_add/remove_device.
 419 */
 420int iommu_group_for_each_dev(struct iommu_group *group, void *data,
 421                             int (*fn)(struct device *, void *))
 422{
 423        struct iommu_device *device;
 424        int ret = 0;
 425
 426        mutex_lock(&group->mutex);
 427        list_for_each_entry(device, &group->devices, list) {
 428                ret = fn(device->dev, data);
 429                if (ret)
 430                        break;
 431        }
 432        mutex_unlock(&group->mutex);
 433        return ret;
 434}
 435EXPORT_SYMBOL_GPL(iommu_group_for_each_dev);
 436
 437/**
 438 * iommu_group_get - Return the group for a device and increment reference
 439 * @dev: get the group that this device belongs to
 440 *
 441 * This function is called by iommu drivers and users to get the group
 442 * for the specified device.  If found, the group is returned and the group
 443 * reference in incremented, else NULL.
 444 */
 445struct iommu_group *iommu_group_get(struct device *dev)
 446{
 447        struct iommu_group *group = dev->iommu_group;
 448
 449        if (group)
 450                kobject_get(group->devices_kobj);
 451
 452        return group;
 453}
 454EXPORT_SYMBOL_GPL(iommu_group_get);
 455
 456/**
 457 * iommu_group_put - Decrement group reference
 458 * @group: the group to use
 459 *
 460 * This function is called by iommu drivers and users to release the
 461 * iommu group.  Once the reference count is zero, the group is released.
 462 */
 463void iommu_group_put(struct iommu_group *group)
 464{
 465        if (group)
 466                kobject_put(group->devices_kobj);
 467}
 468EXPORT_SYMBOL_GPL(iommu_group_put);
 469
 470/**
 471 * iommu_group_register_notifier - Register a notifier for group changes
 472 * @group: the group to watch
 473 * @nb: notifier block to signal
 474 *
 475 * This function allows iommu group users to track changes in a group.
 476 * See include/linux/iommu.h for actions sent via this notifier.  Caller
 477 * should hold a reference to the group throughout notifier registration.
 478 */
 479int iommu_group_register_notifier(struct iommu_group *group,
 480                                  struct notifier_block *nb)
 481{
 482        return blocking_notifier_chain_register(&group->notifier, nb);
 483}
 484EXPORT_SYMBOL_GPL(iommu_group_register_notifier);
 485
 486/**
 487 * iommu_group_unregister_notifier - Unregister a notifier
 488 * @group: the group to watch
 489 * @nb: notifier block to signal
 490 *
 491 * Unregister a previously registered group notifier block.
 492 */
 493int iommu_group_unregister_notifier(struct iommu_group *group,
 494                                    struct notifier_block *nb)
 495{
 496        return blocking_notifier_chain_unregister(&group->notifier, nb);
 497}
 498EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
 499
 500/**
 501 * iommu_group_id - Return ID for a group
 502 * @group: the group to ID
 503 *
 504 * Return the unique ID for the group matching the sysfs group number.
 505 */
 506int iommu_group_id(struct iommu_group *group)
 507{
 508        return group->id;
 509}
 510EXPORT_SYMBOL_GPL(iommu_group_id);
 511
 512static int add_iommu_group(struct device *dev, void *data)
 513{
 514        struct iommu_ops *ops = data;
 515
 516        if (!ops->add_device)
 517                return -ENODEV;
 518
 519        WARN_ON(dev->iommu_group);
 520
 521        ops->add_device(dev);
 522
 523        return 0;
 524}
 525
 526static int iommu_bus_notifier(struct notifier_block *nb,
 527                              unsigned long action, void *data)
 528{
 529        struct device *dev = data;
 530        struct iommu_ops *ops = dev->bus->iommu_ops;
 531        struct iommu_group *group;
 532        unsigned long group_action = 0;
 533
 534        /*
 535         * ADD/DEL call into iommu driver ops if provided, which may
 536         * result in ADD/DEL notifiers to group->notifier
 537         */
 538        if (action == BUS_NOTIFY_ADD_DEVICE) {
 539                if (ops->add_device)
 540                        return ops->add_device(dev);
 541        } else if (action == BUS_NOTIFY_DEL_DEVICE) {
 542                if (ops->remove_device && dev->iommu_group) {
 543                        ops->remove_device(dev);
 544                        return 0;
 545                }
 546        }
 547
 548        /*
 549         * Remaining BUS_NOTIFYs get filtered and republished to the
 550         * group, if anyone is listening
 551         */
 552        group = iommu_group_get(dev);
 553        if (!group)
 554                return 0;
 555
 556        switch (action) {
 557        case BUS_NOTIFY_BIND_DRIVER:
 558                group_action = IOMMU_GROUP_NOTIFY_BIND_DRIVER;
 559                break;
 560        case BUS_NOTIFY_BOUND_DRIVER:
 561                group_action = IOMMU_GROUP_NOTIFY_BOUND_DRIVER;
 562                break;
 563        case BUS_NOTIFY_UNBIND_DRIVER:
 564                group_action = IOMMU_GROUP_NOTIFY_UNBIND_DRIVER;
 565                break;
 566        case BUS_NOTIFY_UNBOUND_DRIVER:
 567                group_action = IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER;
 568                break;
 569        }
 570
 571        if (group_action)
 572                blocking_notifier_call_chain(&group->notifier,
 573                                             group_action, dev);
 574
 575        iommu_group_put(group);
 576        return 0;
 577}
 578
 579static struct notifier_block iommu_bus_nb = {
 580        .notifier_call = iommu_bus_notifier,
 581};
 582
 583static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
 584{
 585        bus_register_notifier(bus, &iommu_bus_nb);
 586        bus_for_each_dev(bus, NULL, ops, add_iommu_group);
 587}
 588
 589/**
 590 * bus_set_iommu - set iommu-callbacks for the bus
 591 * @bus: bus.
 592 * @ops: the callbacks provided by the iommu-driver
 593 *
 594 * This function is called by an iommu driver to set the iommu methods
 595 * used for a particular bus. Drivers for devices on that bus can use
 596 * the iommu-api after these ops are registered.
 597 * This special function is needed because IOMMUs are usually devices on
 598 * the bus itself, so the iommu drivers are not initialized when the bus
 599 * is set up. With this function the iommu-driver can set the iommu-ops
 600 * afterwards.
 601 */
 602int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops)
 603{
 604        if (bus->iommu_ops != NULL)
 605                return -EBUSY;
 606
 607        bus->iommu_ops = ops;
 608
 609        /* Do IOMMU specific setup for this bus-type */
 610        iommu_bus_init(bus, ops);
 611
 612        return 0;
 613}
 614EXPORT_SYMBOL_GPL(bus_set_iommu);
 615
 616bool iommu_present(struct bus_type *bus)
 617{
 618        return bus->iommu_ops != NULL;
 619}
 620EXPORT_SYMBOL_GPL(iommu_present);
 621
 622/**
 623 * iommu_set_fault_handler() - set a fault handler for an iommu domain
 624 * @domain: iommu domain
 625 * @handler: fault handler
 626 * @token: user data, will be passed back to the fault handler
 627 *
 628 * This function should be used by IOMMU users which want to be notified
 629 * whenever an IOMMU fault happens.
 630 *
 631 * The fault handler itself should return 0 on success, and an appropriate
 632 * error code otherwise.
 633 */
 634void iommu_set_fault_handler(struct iommu_domain *domain,
 635                                        iommu_fault_handler_t handler,
 636                                        void *token)
 637{
 638        BUG_ON(!domain);
 639
 640        domain->handler = handler;
 641        domain->handler_token = token;
 642}
 643EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
 644
 645struct iommu_domain *iommu_domain_alloc(struct bus_type *bus)
 646{
 647        struct iommu_domain *domain;
 648        int ret;
 649
 650        if (bus == NULL || bus->iommu_ops == NULL)
 651                return NULL;
 652
 653        domain = kzalloc(sizeof(*domain), GFP_KERNEL);
 654        if (!domain)
 655                return NULL;
 656
 657        domain->ops = bus->iommu_ops;
 658
 659        ret = domain->ops->domain_init(domain);
 660        if (ret)
 661                goto out_free;
 662
 663        return domain;
 664
 665out_free:
 666        kfree(domain);
 667
 668        return NULL;
 669}
 670EXPORT_SYMBOL_GPL(iommu_domain_alloc);
 671
 672void iommu_domain_free(struct iommu_domain *domain)
 673{
 674        if (likely(domain->ops->domain_destroy != NULL))
 675                domain->ops->domain_destroy(domain);
 676
 677        kfree(domain);
 678}
 679EXPORT_SYMBOL_GPL(iommu_domain_free);
 680
 681int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
 682{
 683        if (unlikely(domain->ops->attach_dev == NULL))
 684                return -ENODEV;
 685
 686        return domain->ops->attach_dev(domain, dev);
 687}
 688EXPORT_SYMBOL_GPL(iommu_attach_device);
 689
 690void iommu_detach_device(struct iommu_domain *domain, struct device *dev)
 691{
 692        if (unlikely(domain->ops->detach_dev == NULL))
 693                return;
 694
 695        domain->ops->detach_dev(domain, dev);
 696}
 697EXPORT_SYMBOL_GPL(iommu_detach_device);
 698
 699/*
 700 * IOMMU groups are really the natrual working unit of the IOMMU, but
 701 * the IOMMU API works on domains and devices.  Bridge that gap by
 702 * iterating over the devices in a group.  Ideally we'd have a single
 703 * device which represents the requestor ID of the group, but we also
 704 * allow IOMMU drivers to create policy defined minimum sets, where
 705 * the physical hardware may be able to distiguish members, but we
 706 * wish to group them at a higher level (ex. untrusted multi-function
 707 * PCI devices).  Thus we attach each device.
 708 */
 709static int iommu_group_do_attach_device(struct device *dev, void *data)
 710{
 711        struct iommu_domain *domain = data;
 712
 713        return iommu_attach_device(domain, dev);
 714}
 715
 716int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
 717{
 718        return iommu_group_for_each_dev(group, domain,
 719                                        iommu_group_do_attach_device);
 720}
 721EXPORT_SYMBOL_GPL(iommu_attach_group);
 722
 723static int iommu_group_do_detach_device(struct device *dev, void *data)
 724{
 725        struct iommu_domain *domain = data;
 726
 727        iommu_detach_device(domain, dev);
 728
 729        return 0;
 730}
 731
 732void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
 733{
 734        iommu_group_for_each_dev(group, domain, iommu_group_do_detach_device);
 735}
 736EXPORT_SYMBOL_GPL(iommu_detach_group);
 737
 738phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
 739{
 740        if (unlikely(domain->ops->iova_to_phys == NULL))
 741                return 0;
 742
 743        return domain->ops->iova_to_phys(domain, iova);
 744}
 745EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
 746
 747int iommu_domain_has_cap(struct iommu_domain *domain,
 748                         unsigned long cap)
 749{
 750        if (unlikely(domain->ops->domain_has_cap == NULL))
 751                return 0;
 752
 753        return domain->ops->domain_has_cap(domain, cap);
 754}
 755EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
 756
 757static size_t iommu_pgsize(struct iommu_domain *domain,
 758                           unsigned long addr_merge, size_t size)
 759{
 760        unsigned int pgsize_idx;
 761        size_t pgsize;
 762
 763        /* Max page size that still fits into 'size' */
 764        pgsize_idx = __fls(size);
 765
 766        /* need to consider alignment requirements ? */
 767        if (likely(addr_merge)) {
 768                /* Max page size allowed by address */
 769                unsigned int align_pgsize_idx = __ffs(addr_merge);
 770                pgsize_idx = min(pgsize_idx, align_pgsize_idx);
 771        }
 772
 773        /* build a mask of acceptable page sizes */
 774        pgsize = (1UL << (pgsize_idx + 1)) - 1;
 775
 776        /* throw away page sizes not supported by the hardware */
 777        pgsize &= domain->ops->pgsize_bitmap;
 778
 779        /* make sure we're still sane */
 780        BUG_ON(!pgsize);
 781
 782        /* pick the biggest page */
 783        pgsize_idx = __fls(pgsize);
 784        pgsize = 1UL << pgsize_idx;
 785
 786        return pgsize;
 787}
 788
 789int iommu_map(struct iommu_domain *domain, unsigned long iova,
 790              phys_addr_t paddr, size_t size, int prot)
 791{
 792        unsigned long orig_iova = iova;
 793        unsigned int min_pagesz;
 794        size_t orig_size = size;
 795        int ret = 0;
 796
 797        if (unlikely(domain->ops->unmap == NULL ||
 798                     domain->ops->pgsize_bitmap == 0UL))
 799                return -ENODEV;
 800
 801        /* find out the minimum page size supported */
 802        min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
 803
 804        /*
 805         * both the virtual address and the physical one, as well as
 806         * the size of the mapping, must be aligned (at least) to the
 807         * size of the smallest page supported by the hardware
 808         */
 809        if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
 810                pr_err("unaligned: iova 0x%lx pa 0x%pa size 0x%zx min_pagesz 0x%x\n",
 811                       iova, &paddr, size, min_pagesz);
 812                return -EINVAL;
 813        }
 814
 815        pr_debug("map: iova 0x%lx pa 0x%pa size 0x%zx\n", iova, &paddr, size);
 816
 817        while (size) {
 818                size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
 819
 820                pr_debug("mapping: iova 0x%lx pa 0x%pa pgsize 0x%zx\n",
 821                         iova, &paddr, pgsize);
 822
 823                ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
 824                if (ret)
 825                        break;
 826
 827                iova += pgsize;
 828                paddr += pgsize;
 829                size -= pgsize;
 830        }
 831
 832        /* unroll mapping in case something went wrong */
 833        if (ret)
 834                iommu_unmap(domain, orig_iova, orig_size - size);
 835
 836        return ret;
 837}
 838EXPORT_SYMBOL_GPL(iommu_map);
 839
 840size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
 841{
 842        size_t unmapped_page, unmapped = 0;
 843        unsigned int min_pagesz;
 844
 845        if (unlikely(domain->ops->unmap == NULL ||
 846                     domain->ops->pgsize_bitmap == 0UL))
 847                return -ENODEV;
 848
 849        /* find out the minimum page size supported */
 850        min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
 851
 852        /*
 853         * The virtual address, as well as the size of the mapping, must be
 854         * aligned (at least) to the size of the smallest page supported
 855         * by the hardware
 856         */
 857        if (!IS_ALIGNED(iova | size, min_pagesz)) {
 858                pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
 859                       iova, size, min_pagesz);
 860                return -EINVAL;
 861        }
 862
 863        pr_debug("unmap this: iova 0x%lx size 0x%zx\n", iova, size);
 864
 865        /*
 866         * Keep iterating until we either unmap 'size' bytes (or more)
 867         * or we hit an area that isn't mapped.
 868         */
 869        while (unmapped < size) {
 870                size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
 871
 872                unmapped_page = domain->ops->unmap(domain, iova, pgsize);
 873                if (!unmapped_page)
 874                        break;
 875
 876                pr_debug("unmapped: iova 0x%lx size 0x%zx\n",
 877                         iova, unmapped_page);
 878
 879                iova += unmapped_page;
 880                unmapped += unmapped_page;
 881        }
 882
 883        return unmapped;
 884}
 885EXPORT_SYMBOL_GPL(iommu_unmap);
 886
 887
 888int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
 889                               phys_addr_t paddr, u64 size, int prot)
 890{
 891        if (unlikely(domain->ops->domain_window_enable == NULL))
 892                return -ENODEV;
 893
 894        return domain->ops->domain_window_enable(domain, wnd_nr, paddr, size,
 895                                                 prot);
 896}
 897EXPORT_SYMBOL_GPL(iommu_domain_window_enable);
 898
 899void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
 900{
 901        if (unlikely(domain->ops->domain_window_disable == NULL))
 902                return;
 903
 904        return domain->ops->domain_window_disable(domain, wnd_nr);
 905}
 906EXPORT_SYMBOL_GPL(iommu_domain_window_disable);
 907
 908static int __init iommu_init(void)
 909{
 910        iommu_group_kset = kset_create_and_add("iommu_groups",
 911                                               NULL, kernel_kobj);
 912        ida_init(&iommu_group_ida);
 913        mutex_init(&iommu_group_mutex);
 914
 915        BUG_ON(!iommu_group_kset);
 916
 917        return 0;
 918}
 919arch_initcall(iommu_init);
 920
 921int iommu_domain_get_attr(struct iommu_domain *domain,
 922                          enum iommu_attr attr, void *data)
 923{
 924        struct iommu_domain_geometry *geometry;
 925        bool *paging;
 926        int ret = 0;
 927        u32 *count;
 928
 929        switch (attr) {
 930        case DOMAIN_ATTR_GEOMETRY:
 931                geometry  = data;
 932                *geometry = domain->geometry;
 933
 934                break;
 935        case DOMAIN_ATTR_PAGING:
 936                paging  = data;
 937                *paging = (domain->ops->pgsize_bitmap != 0UL);
 938                break;
 939        case DOMAIN_ATTR_WINDOWS:
 940                count = data;
 941
 942                if (domain->ops->domain_get_windows != NULL)
 943                        *count = domain->ops->domain_get_windows(domain);
 944                else
 945                        ret = -ENODEV;
 946
 947                break;
 948        default:
 949                if (!domain->ops->domain_get_attr)
 950                        return -EINVAL;
 951
 952                ret = domain->ops->domain_get_attr(domain, attr, data);
 953        }
 954
 955        return ret;
 956}
 957EXPORT_SYMBOL_GPL(iommu_domain_get_attr);
 958
 959int iommu_domain_set_attr(struct iommu_domain *domain,
 960                          enum iommu_attr attr, void *data)
 961{
 962        int ret = 0;
 963        u32 *count;
 964
 965        switch (attr) {
 966        case DOMAIN_ATTR_WINDOWS:
 967                count = data;
 968
 969                if (domain->ops->domain_set_windows != NULL)
 970                        ret = domain->ops->domain_set_windows(domain, *count);
 971                else
 972                        ret = -ENODEV;
 973
 974                break;
 975        default:
 976                if (domain->ops->domain_set_attr == NULL)
 977                        return -EINVAL;
 978
 979                ret = domain->ops->domain_set_attr(domain, attr, data);
 980        }
 981
 982        return ret;
 983}
 984EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
 985