linux/drivers/acpi/arm64/iort.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2016, Semihalf
   3 *      Author: Tomasz Nowicki <tn@semihalf.com>
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms and conditions of the GNU General Public License,
   7 * version 2, as published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * This file implements early detection/parsing of I/O mapping
  15 * reported to OS through firmware via I/O Remapping Table (IORT)
  16 * IORT document number: ARM DEN 0049A
  17 */
  18
  19#define pr_fmt(fmt)     "ACPI: IORT: " fmt
  20
  21#include <linux/acpi_iort.h>
  22#include <linux/iommu.h>
  23#include <linux/kernel.h>
  24#include <linux/list.h>
  25#include <linux/pci.h>
  26#include <linux/platform_device.h>
  27#include <linux/slab.h>
  28
  29#define IORT_TYPE_MASK(type)    (1 << (type))
  30#define IORT_MSI_TYPE           (1 << ACPI_IORT_NODE_ITS_GROUP)
  31#define IORT_IOMMU_TYPE         ((1 << ACPI_IORT_NODE_SMMU) |   \
  32                                (1 << ACPI_IORT_NODE_SMMU_V3))
  33
  34struct iort_its_msi_chip {
  35        struct list_head        list;
  36        struct fwnode_handle    *fw_node;
  37        phys_addr_t             base_addr;
  38        u32                     translation_id;
  39};
  40
  41struct iort_fwnode {
  42        struct list_head list;
  43        struct acpi_iort_node *iort_node;
  44        struct fwnode_handle *fwnode;
  45};
  46static LIST_HEAD(iort_fwnode_list);
  47static DEFINE_SPINLOCK(iort_fwnode_lock);
  48
  49/**
  50 * iort_set_fwnode() - Create iort_fwnode and use it to register
  51 *                     iommu data in the iort_fwnode_list
  52 *
  53 * @node: IORT table node associated with the IOMMU
  54 * @fwnode: fwnode associated with the IORT node
  55 *
  56 * Returns: 0 on success
  57 *          <0 on failure
  58 */
  59static inline int iort_set_fwnode(struct acpi_iort_node *iort_node,
  60                                  struct fwnode_handle *fwnode)
  61{
  62        struct iort_fwnode *np;
  63
  64        np = kzalloc(sizeof(struct iort_fwnode), GFP_ATOMIC);
  65
  66        if (WARN_ON(!np))
  67                return -ENOMEM;
  68
  69        INIT_LIST_HEAD(&np->list);
  70        np->iort_node = iort_node;
  71        np->fwnode = fwnode;
  72
  73        spin_lock(&iort_fwnode_lock);
  74        list_add_tail(&np->list, &iort_fwnode_list);
  75        spin_unlock(&iort_fwnode_lock);
  76
  77        return 0;
  78}
  79
  80/**
  81 * iort_get_fwnode() - Retrieve fwnode associated with an IORT node
  82 *
  83 * @node: IORT table node to be looked-up
  84 *
  85 * Returns: fwnode_handle pointer on success, NULL on failure
  86 */
  87static inline struct fwnode_handle *iort_get_fwnode(
  88                        struct acpi_iort_node *node)
  89{
  90        struct iort_fwnode *curr;
  91        struct fwnode_handle *fwnode = NULL;
  92
  93        spin_lock(&iort_fwnode_lock);
  94        list_for_each_entry(curr, &iort_fwnode_list, list) {
  95                if (curr->iort_node == node) {
  96                        fwnode = curr->fwnode;
  97                        break;
  98                }
  99        }
 100        spin_unlock(&iort_fwnode_lock);
 101
 102        return fwnode;
 103}
 104
 105/**
 106 * iort_delete_fwnode() - Delete fwnode associated with an IORT node
 107 *
 108 * @node: IORT table node associated with fwnode to delete
 109 */
 110static inline void iort_delete_fwnode(struct acpi_iort_node *node)
 111{
 112        struct iort_fwnode *curr, *tmp;
 113
 114        spin_lock(&iort_fwnode_lock);
 115        list_for_each_entry_safe(curr, tmp, &iort_fwnode_list, list) {
 116                if (curr->iort_node == node) {
 117                        list_del(&curr->list);
 118                        kfree(curr);
 119                        break;
 120                }
 121        }
 122        spin_unlock(&iort_fwnode_lock);
 123}
 124
 125/**
 126 * iort_get_iort_node() - Retrieve iort_node associated with an fwnode
 127 *
 128 * @fwnode: fwnode associated with device to be looked-up
 129 *
 130 * Returns: iort_node pointer on success, NULL on failure
 131 */
 132static inline struct acpi_iort_node *iort_get_iort_node(
 133                        struct fwnode_handle *fwnode)
 134{
 135        struct iort_fwnode *curr;
 136        struct acpi_iort_node *iort_node = NULL;
 137
 138        spin_lock(&iort_fwnode_lock);
 139        list_for_each_entry(curr, &iort_fwnode_list, list) {
 140                if (curr->fwnode == fwnode) {
 141                        iort_node = curr->iort_node;
 142                        break;
 143                }
 144        }
 145        spin_unlock(&iort_fwnode_lock);
 146
 147        return iort_node;
 148}
 149
 150typedef acpi_status (*iort_find_node_callback)
 151        (struct acpi_iort_node *node, void *context);
 152
 153/* Root pointer to the mapped IORT table */
 154static struct acpi_table_header *iort_table;
 155
 156static LIST_HEAD(iort_msi_chip_list);
 157static DEFINE_SPINLOCK(iort_msi_chip_lock);
 158
 159/**
 160 * iort_register_domain_token() - register domain token along with related
 161 * ITS ID and base address to the list from where we can get it back later on.
 162 * @trans_id: ITS ID.
 163 * @base: ITS base address.
 164 * @fw_node: Domain token.
 165 *
 166 * Returns: 0 on success, -ENOMEM if no memory when allocating list element
 167 */
 168int iort_register_domain_token(int trans_id, phys_addr_t base,
 169                               struct fwnode_handle *fw_node)
 170{
 171        struct iort_its_msi_chip *its_msi_chip;
 172
 173        its_msi_chip = kzalloc(sizeof(*its_msi_chip), GFP_KERNEL);
 174        if (!its_msi_chip)
 175                return -ENOMEM;
 176
 177        its_msi_chip->fw_node = fw_node;
 178        its_msi_chip->translation_id = trans_id;
 179        its_msi_chip->base_addr = base;
 180
 181        spin_lock(&iort_msi_chip_lock);
 182        list_add(&its_msi_chip->list, &iort_msi_chip_list);
 183        spin_unlock(&iort_msi_chip_lock);
 184
 185        return 0;
 186}
 187
 188/**
 189 * iort_deregister_domain_token() - Deregister domain token based on ITS ID
 190 * @trans_id: ITS ID.
 191 *
 192 * Returns: none.
 193 */
 194void iort_deregister_domain_token(int trans_id)
 195{
 196        struct iort_its_msi_chip *its_msi_chip, *t;
 197
 198        spin_lock(&iort_msi_chip_lock);
 199        list_for_each_entry_safe(its_msi_chip, t, &iort_msi_chip_list, list) {
 200                if (its_msi_chip->translation_id == trans_id) {
 201                        list_del(&its_msi_chip->list);
 202                        kfree(its_msi_chip);
 203                        break;
 204                }
 205        }
 206        spin_unlock(&iort_msi_chip_lock);
 207}
 208
 209/**
 210 * iort_find_domain_token() - Find domain token based on given ITS ID
 211 * @trans_id: ITS ID.
 212 *
 213 * Returns: domain token when find on the list, NULL otherwise
 214 */
 215struct fwnode_handle *iort_find_domain_token(int trans_id)
 216{
 217        struct fwnode_handle *fw_node = NULL;
 218        struct iort_its_msi_chip *its_msi_chip;
 219
 220        spin_lock(&iort_msi_chip_lock);
 221        list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) {
 222                if (its_msi_chip->translation_id == trans_id) {
 223                        fw_node = its_msi_chip->fw_node;
 224                        break;
 225                }
 226        }
 227        spin_unlock(&iort_msi_chip_lock);
 228
 229        return fw_node;
 230}
 231
 232static struct acpi_iort_node *iort_scan_node(enum acpi_iort_node_type type,
 233                                             iort_find_node_callback callback,
 234                                             void *context)
 235{
 236        struct acpi_iort_node *iort_node, *iort_end;
 237        struct acpi_table_iort *iort;
 238        int i;
 239
 240        if (!iort_table)
 241                return NULL;
 242
 243        /* Get the first IORT node */
 244        iort = (struct acpi_table_iort *)iort_table;
 245        iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
 246                                 iort->node_offset);
 247        iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
 248                                iort_table->length);
 249
 250        for (i = 0; i < iort->node_count; i++) {
 251                if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND,
 252                               "IORT node pointer overflows, bad table!\n"))
 253                        return NULL;
 254
 255                if (iort_node->type == type &&
 256                    ACPI_SUCCESS(callback(iort_node, context)))
 257                        return iort_node;
 258
 259                iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
 260                                         iort_node->length);
 261        }
 262
 263        return NULL;
 264}
 265
 266static acpi_status iort_match_node_callback(struct acpi_iort_node *node,
 267                                            void *context)
 268{
 269        struct device *dev = context;
 270        acpi_status status = AE_NOT_FOUND;
 271
 272        if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) {
 273                struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
 274                struct acpi_device *adev = to_acpi_device_node(dev->fwnode);
 275                struct acpi_iort_named_component *ncomp;
 276
 277                if (!adev)
 278                        goto out;
 279
 280                status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf);
 281                if (ACPI_FAILURE(status)) {
 282                        dev_warn(dev, "Can't get device full path name\n");
 283                        goto out;
 284                }
 285
 286                ncomp = (struct acpi_iort_named_component *)node->node_data;
 287                status = !strcmp(ncomp->device_name, buf.pointer) ?
 288                                                        AE_OK : AE_NOT_FOUND;
 289                acpi_os_free(buf.pointer);
 290        } else if (node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
 291                struct acpi_iort_root_complex *pci_rc;
 292                struct pci_bus *bus;
 293
 294                bus = to_pci_bus(dev);
 295                pci_rc = (struct acpi_iort_root_complex *)node->node_data;
 296
 297                /*
 298                 * It is assumed that PCI segment numbers maps one-to-one
 299                 * with root complexes. Each segment number can represent only
 300                 * one root complex.
 301                 */
 302                status = pci_rc->pci_segment_number == pci_domain_nr(bus) ?
 303                                                        AE_OK : AE_NOT_FOUND;
 304        }
 305out:
 306        return status;
 307}
 308
 309static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
 310                       u32 *rid_out)
 311{
 312        /* Single mapping does not care for input id */
 313        if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
 314                if (type == ACPI_IORT_NODE_NAMED_COMPONENT ||
 315                    type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
 316                        *rid_out = map->output_base;
 317                        return 0;
 318                }
 319
 320                pr_warn(FW_BUG "[map %p] SINGLE MAPPING flag not allowed for node type %d, skipping ID map\n",
 321                        map, type);
 322                return -ENXIO;
 323        }
 324
 325        if (rid_in < map->input_base ||
 326            (rid_in >= map->input_base + map->id_count))
 327                return -ENXIO;
 328
 329        *rid_out = map->output_base + (rid_in - map->input_base);
 330        return 0;
 331}
 332
 333static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
 334                                               u32 *id_out, int index)
 335{
 336        struct acpi_iort_node *parent;
 337        struct acpi_iort_id_mapping *map;
 338
 339        if (!node->mapping_offset || !node->mapping_count ||
 340                                     index >= node->mapping_count)
 341                return NULL;
 342
 343        map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
 344                           node->mapping_offset + index * sizeof(*map));
 345
 346        /* Firmware bug! */
 347        if (!map->output_reference) {
 348                pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
 349                       node, node->type);
 350                return NULL;
 351        }
 352
 353        parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
 354                               map->output_reference);
 355
 356        if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
 357                if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT ||
 358                    node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX ||
 359                    node->type == ACPI_IORT_NODE_SMMU_V3) {
 360                        *id_out = map->output_base;
 361                        return parent;
 362                }
 363        }
 364
 365        return NULL;
 366}
 367
 368static int iort_get_id_mapping_index(struct acpi_iort_node *node)
 369{
 370        struct acpi_iort_smmu_v3 *smmu;
 371
 372        switch (node->type) {
 373        case ACPI_IORT_NODE_SMMU_V3:
 374                /*
 375                 * SMMUv3 dev ID mapping index was introduced in revision 1
 376                 * table, not available in revision 0
 377                 */
 378                if (node->revision < 1)
 379                        return -EINVAL;
 380
 381                smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
 382                /*
 383                 * ID mapping index is only ignored if all interrupts are
 384                 * GSIV based
 385                 */
 386                if (smmu->event_gsiv && smmu->pri_gsiv && smmu->gerr_gsiv
 387                    && smmu->sync_gsiv)
 388                        return -EINVAL;
 389
 390                if (smmu->id_mapping_index >= node->mapping_count) {
 391                        pr_err(FW_BUG "[node %p type %d] ID mapping index overflows valid mappings\n",
 392                               node, node->type);
 393                        return -EINVAL;
 394                }
 395
 396                return smmu->id_mapping_index;
 397        default:
 398                return -EINVAL;
 399        }
 400}
 401
 402static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node,
 403                                               u32 id_in, u32 *id_out,
 404                                               u8 type_mask)
 405{
 406        u32 id = id_in;
 407
 408        /* Parse the ID mapping tree to find specified node type */
 409        while (node) {
 410                struct acpi_iort_id_mapping *map;
 411                int i, index;
 412
 413                if (IORT_TYPE_MASK(node->type) & type_mask) {
 414                        if (id_out)
 415                                *id_out = id;
 416                        return node;
 417                }
 418
 419                if (!node->mapping_offset || !node->mapping_count)
 420                        goto fail_map;
 421
 422                map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
 423                                   node->mapping_offset);
 424
 425                /* Firmware bug! */
 426                if (!map->output_reference) {
 427                        pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
 428                               node, node->type);
 429                        goto fail_map;
 430                }
 431
 432                /*
 433                 * Get the special ID mapping index (if any) and skip its
 434                 * associated ID map to prevent erroneous multi-stage
 435                 * IORT ID translations.
 436                 */
 437                index = iort_get_id_mapping_index(node);
 438
 439                /* Do the ID translation */
 440                for (i = 0; i < node->mapping_count; i++, map++) {
 441                        /* if it is special mapping index, skip it */
 442                        if (i == index)
 443                                continue;
 444
 445                        if (!iort_id_map(map, node->type, id, &id))
 446                                break;
 447                }
 448
 449                if (i == node->mapping_count)
 450                        goto fail_map;
 451
 452                node = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
 453                                    map->output_reference);
 454        }
 455
 456fail_map:
 457        /* Map input ID to output ID unchanged on mapping failure */
 458        if (id_out)
 459                *id_out = id_in;
 460
 461        return NULL;
 462}
 463
 464static struct acpi_iort_node *iort_node_map_platform_id(
 465                struct acpi_iort_node *node, u32 *id_out, u8 type_mask,
 466                int index)
 467{
 468        struct acpi_iort_node *parent;
 469        u32 id;
 470
 471        /* step 1: retrieve the initial dev id */
 472        parent = iort_node_get_id(node, &id, index);
 473        if (!parent)
 474                return NULL;
 475
 476        /*
 477         * optional step 2: map the initial dev id if its parent is not
 478         * the target type we want, map it again for the use cases such
 479         * as NC (named component) -> SMMU -> ITS. If the type is matched,
 480         * return the initial dev id and its parent pointer directly.
 481         */
 482        if (!(IORT_TYPE_MASK(parent->type) & type_mask))
 483                parent = iort_node_map_id(parent, id, id_out, type_mask);
 484        else
 485                if (id_out)
 486                        *id_out = id;
 487
 488        return parent;
 489}
 490
 491static struct acpi_iort_node *iort_find_dev_node(struct device *dev)
 492{
 493        struct pci_bus *pbus;
 494
 495        if (!dev_is_pci(dev)) {
 496                struct acpi_iort_node *node;
 497                /*
 498                 * scan iort_fwnode_list to see if it's an iort platform
 499                 * device (such as SMMU, PMCG),its iort node already cached
 500                 * and associated with fwnode when iort platform devices
 501                 * were initialized.
 502                 */
 503                node = iort_get_iort_node(dev->fwnode);
 504                if (node)
 505                        return node;
 506
 507                /*
 508                 * if not, then it should be a platform device defined in
 509                 * DSDT/SSDT (with Named Component node in IORT)
 510                 */
 511                return iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
 512                                      iort_match_node_callback, dev);
 513        }
 514
 515        /* Find a PCI root bus */
 516        pbus = to_pci_dev(dev)->bus;
 517        while (!pci_is_root_bus(pbus))
 518                pbus = pbus->parent;
 519
 520        return iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
 521                              iort_match_node_callback, &pbus->dev);
 522}
 523
 524/**
 525 * iort_msi_map_rid() - Map a MSI requester ID for a device
 526 * @dev: The device for which the mapping is to be done.
 527 * @req_id: The device requester ID.
 528 *
 529 * Returns: mapped MSI RID on success, input requester ID otherwise
 530 */
 531u32 iort_msi_map_rid(struct device *dev, u32 req_id)
 532{
 533        struct acpi_iort_node *node;
 534        u32 dev_id;
 535
 536        node = iort_find_dev_node(dev);
 537        if (!node)
 538                return req_id;
 539
 540        iort_node_map_id(node, req_id, &dev_id, IORT_MSI_TYPE);
 541        return dev_id;
 542}
 543
 544/**
 545 * iort_pmsi_get_dev_id() - Get the device id for a device
 546 * @dev: The device for which the mapping is to be done.
 547 * @dev_id: The device ID found.
 548 *
 549 * Returns: 0 for successful find a dev id, -ENODEV on error
 550 */
 551int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id)
 552{
 553        int i, index;
 554        struct acpi_iort_node *node;
 555
 556        node = iort_find_dev_node(dev);
 557        if (!node)
 558                return -ENODEV;
 559
 560        index = iort_get_id_mapping_index(node);
 561        /* if there is a valid index, go get the dev_id directly */
 562        if (index >= 0) {
 563                if (iort_node_get_id(node, dev_id, index))
 564                        return 0;
 565        } else {
 566                for (i = 0; i < node->mapping_count; i++) {
 567                        if (iort_node_map_platform_id(node, dev_id,
 568                                                      IORT_MSI_TYPE, i))
 569                                return 0;
 570                }
 571        }
 572
 573        return -ENODEV;
 574}
 575
 576static int __maybe_unused iort_find_its_base(u32 its_id, phys_addr_t *base)
 577{
 578        struct iort_its_msi_chip *its_msi_chip;
 579        int ret = -ENODEV;
 580
 581        spin_lock(&iort_msi_chip_lock);
 582        list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) {
 583                if (its_msi_chip->translation_id == its_id) {
 584                        *base = its_msi_chip->base_addr;
 585                        ret = 0;
 586                        break;
 587                }
 588        }
 589        spin_unlock(&iort_msi_chip_lock);
 590
 591        return ret;
 592}
 593
 594/**
 595 * iort_dev_find_its_id() - Find the ITS identifier for a device
 596 * @dev: The device.
 597 * @req_id: Device's requester ID
 598 * @idx: Index of the ITS identifier list.
 599 * @its_id: ITS identifier.
 600 *
 601 * Returns: 0 on success, appropriate error value otherwise
 602 */
 603static int iort_dev_find_its_id(struct device *dev, u32 req_id,
 604                                unsigned int idx, int *its_id)
 605{
 606        struct acpi_iort_its_group *its;
 607        struct acpi_iort_node *node;
 608
 609        node = iort_find_dev_node(dev);
 610        if (!node)
 611                return -ENXIO;
 612
 613        node = iort_node_map_id(node, req_id, NULL, IORT_MSI_TYPE);
 614        if (!node)
 615                return -ENXIO;
 616
 617        /* Move to ITS specific data */
 618        its = (struct acpi_iort_its_group *)node->node_data;
 619        if (idx > its->its_count) {
 620                dev_err(dev, "requested ITS ID index [%d] is greater than available [%d]\n",
 621                        idx, its->its_count);
 622                return -ENXIO;
 623        }
 624
 625        *its_id = its->identifiers[idx];
 626        return 0;
 627}
 628
 629/**
 630 * iort_get_device_domain() - Find MSI domain related to a device
 631 * @dev: The device.
 632 * @req_id: Requester ID for the device.
 633 *
 634 * Returns: the MSI domain for this device, NULL otherwise
 635 */
 636struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id)
 637{
 638        struct fwnode_handle *handle;
 639        int its_id;
 640
 641        if (iort_dev_find_its_id(dev, req_id, 0, &its_id))
 642                return NULL;
 643
 644        handle = iort_find_domain_token(its_id);
 645        if (!handle)
 646                return NULL;
 647
 648        return irq_find_matching_fwnode(handle, DOMAIN_BUS_PCI_MSI);
 649}
 650
 651static void iort_set_device_domain(struct device *dev,
 652                                   struct acpi_iort_node *node)
 653{
 654        struct acpi_iort_its_group *its;
 655        struct acpi_iort_node *msi_parent;
 656        struct acpi_iort_id_mapping *map;
 657        struct fwnode_handle *iort_fwnode;
 658        struct irq_domain *domain;
 659        int index;
 660
 661        index = iort_get_id_mapping_index(node);
 662        if (index < 0)
 663                return;
 664
 665        map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
 666                           node->mapping_offset + index * sizeof(*map));
 667
 668        /* Firmware bug! */
 669        if (!map->output_reference ||
 670            !(map->flags & ACPI_IORT_ID_SINGLE_MAPPING)) {
 671                pr_err(FW_BUG "[node %p type %d] Invalid MSI mapping\n",
 672                       node, node->type);
 673                return;
 674        }
 675
 676        msi_parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
 677                                  map->output_reference);
 678
 679        if (!msi_parent || msi_parent->type != ACPI_IORT_NODE_ITS_GROUP)
 680                return;
 681
 682        /* Move to ITS specific data */
 683        its = (struct acpi_iort_its_group *)msi_parent->node_data;
 684
 685        iort_fwnode = iort_find_domain_token(its->identifiers[0]);
 686        if (!iort_fwnode)
 687                return;
 688
 689        domain = irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI);
 690        if (domain)
 691                dev_set_msi_domain(dev, domain);
 692}
 693
 694/**
 695 * iort_get_platform_device_domain() - Find MSI domain related to a
 696 * platform device
 697 * @dev: the dev pointer associated with the platform device
 698 *
 699 * Returns: the MSI domain for this device, NULL otherwise
 700 */
 701static struct irq_domain *iort_get_platform_device_domain(struct device *dev)
 702{
 703        struct acpi_iort_node *node, *msi_parent;
 704        struct fwnode_handle *iort_fwnode;
 705        struct acpi_iort_its_group *its;
 706        int i;
 707
 708        /* find its associated iort node */
 709        node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
 710                              iort_match_node_callback, dev);
 711        if (!node)
 712                return NULL;
 713
 714        /* then find its msi parent node */
 715        for (i = 0; i < node->mapping_count; i++) {
 716                msi_parent = iort_node_map_platform_id(node, NULL,
 717                                                       IORT_MSI_TYPE, i);
 718                if (msi_parent)
 719                        break;
 720        }
 721
 722        if (!msi_parent)
 723                return NULL;
 724
 725        /* Move to ITS specific data */
 726        its = (struct acpi_iort_its_group *)msi_parent->node_data;
 727
 728        iort_fwnode = iort_find_domain_token(its->identifiers[0]);
 729        if (!iort_fwnode)
 730                return NULL;
 731
 732        return irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI);
 733}
 734
 735void acpi_configure_pmsi_domain(struct device *dev)
 736{
 737        struct irq_domain *msi_domain;
 738
 739        msi_domain = iort_get_platform_device_domain(dev);
 740        if (msi_domain)
 741                dev_set_msi_domain(dev, msi_domain);
 742}
 743
 744static int __maybe_unused __get_pci_rid(struct pci_dev *pdev, u16 alias,
 745                                        void *data)
 746{
 747        u32 *rid = data;
 748
 749        *rid = alias;
 750        return 0;
 751}
 752
 753static int arm_smmu_iort_xlate(struct device *dev, u32 streamid,
 754                               struct fwnode_handle *fwnode,
 755                               const struct iommu_ops *ops)
 756{
 757        int ret = iommu_fwspec_init(dev, fwnode, ops);
 758
 759        if (!ret)
 760                ret = iommu_fwspec_add_ids(dev, &streamid, 1);
 761
 762        return ret;
 763}
 764
 765static inline bool iort_iommu_driver_enabled(u8 type)
 766{
 767        switch (type) {
 768        case ACPI_IORT_NODE_SMMU_V3:
 769                return IS_BUILTIN(CONFIG_ARM_SMMU_V3);
 770        case ACPI_IORT_NODE_SMMU:
 771                return IS_BUILTIN(CONFIG_ARM_SMMU);
 772        default:
 773                pr_warn("IORT node type %u does not describe an SMMU\n", type);
 774                return false;
 775        }
 776}
 777
 778#ifdef CONFIG_IOMMU_API
 779static struct acpi_iort_node *iort_get_msi_resv_iommu(struct device *dev)
 780{
 781        struct acpi_iort_node *iommu;
 782        struct iommu_fwspec *fwspec = dev->iommu_fwspec;
 783
 784        iommu = iort_get_iort_node(fwspec->iommu_fwnode);
 785
 786        if (iommu && (iommu->type == ACPI_IORT_NODE_SMMU_V3)) {
 787                struct acpi_iort_smmu_v3 *smmu;
 788
 789                smmu = (struct acpi_iort_smmu_v3 *)iommu->node_data;
 790                if (smmu->model == ACPI_IORT_SMMU_V3_HISILICON_HI161X)
 791                        return iommu;
 792        }
 793
 794        return NULL;
 795}
 796
 797static inline const struct iommu_ops *iort_fwspec_iommu_ops(
 798                                struct iommu_fwspec *fwspec)
 799{
 800        return (fwspec && fwspec->ops) ? fwspec->ops : NULL;
 801}
 802
 803static inline int iort_add_device_replay(const struct iommu_ops *ops,
 804                                         struct device *dev)
 805{
 806        int err = 0;
 807
 808        if (ops->add_device && dev->bus && !dev->iommu_group)
 809                err = ops->add_device(dev);
 810
 811        return err;
 812}
 813
 814/**
 815 * iort_iommu_msi_get_resv_regions - Reserved region driver helper
 816 * @dev: Device from iommu_get_resv_regions()
 817 * @head: Reserved region list from iommu_get_resv_regions()
 818 *
 819 * Returns: Number of msi reserved regions on success (0 if platform
 820 *          doesn't require the reservation or no associated msi regions),
 821 *          appropriate error value otherwise. The ITS interrupt translation
 822 *          spaces (ITS_base + SZ_64K, SZ_64K) associated with the device
 823 *          are the msi reserved regions.
 824 */
 825int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
 826{
 827        struct acpi_iort_its_group *its;
 828        struct acpi_iort_node *iommu_node, *its_node = NULL;
 829        int i, resv = 0;
 830
 831        iommu_node = iort_get_msi_resv_iommu(dev);
 832        if (!iommu_node)
 833                return 0;
 834
 835        /*
 836         * Current logic to reserve ITS regions relies on HW topologies
 837         * where a given PCI or named component maps its IDs to only one
 838         * ITS group; if a PCI or named component can map its IDs to
 839         * different ITS groups through IORT mappings this function has
 840         * to be reworked to ensure we reserve regions for all ITS groups
 841         * a given PCI or named component may map IDs to.
 842         */
 843
 844        for (i = 0; i < dev->iommu_fwspec->num_ids; i++) {
 845                its_node = iort_node_map_id(iommu_node,
 846                                        dev->iommu_fwspec->ids[i],
 847                                        NULL, IORT_MSI_TYPE);
 848                if (its_node)
 849                        break;
 850        }
 851
 852        if (!its_node)
 853                return 0;
 854
 855        /* Move to ITS specific data */
 856        its = (struct acpi_iort_its_group *)its_node->node_data;
 857
 858        for (i = 0; i < its->its_count; i++) {
 859                phys_addr_t base;
 860
 861                if (!iort_find_its_base(its->identifiers[i], &base)) {
 862                        int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
 863                        struct iommu_resv_region *region;
 864
 865                        region = iommu_alloc_resv_region(base + SZ_64K, SZ_64K,
 866                                                         prot, IOMMU_RESV_MSI);
 867                        if (region) {
 868                                list_add_tail(&region->list, head);
 869                                resv++;
 870                        }
 871                }
 872        }
 873
 874        return (resv == its->its_count) ? resv : -ENODEV;
 875}
 876#else
 877static inline const struct iommu_ops *iort_fwspec_iommu_ops(
 878                                struct iommu_fwspec *fwspec)
 879{ return NULL; }
 880static inline int iort_add_device_replay(const struct iommu_ops *ops,
 881                                         struct device *dev)
 882{ return 0; }
 883int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
 884{ return 0; }
 885#endif
 886
 887static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node,
 888                            u32 streamid)
 889{
 890        const struct iommu_ops *ops;
 891        struct fwnode_handle *iort_fwnode;
 892
 893        if (!node)
 894                return -ENODEV;
 895
 896        iort_fwnode = iort_get_fwnode(node);
 897        if (!iort_fwnode)
 898                return -ENODEV;
 899
 900        /*
 901         * If the ops look-up fails, this means that either
 902         * the SMMU drivers have not been probed yet or that
 903         * the SMMU drivers are not built in the kernel;
 904         * Depending on whether the SMMU drivers are built-in
 905         * in the kernel or not, defer the IOMMU configuration
 906         * or just abort it.
 907         */
 908        ops = iommu_ops_from_fwnode(iort_fwnode);
 909        if (!ops)
 910                return iort_iommu_driver_enabled(node->type) ?
 911                       -EPROBE_DEFER : -ENODEV;
 912
 913        return arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops);
 914}
 915
 916struct iort_pci_alias_info {
 917        struct device *dev;
 918        struct acpi_iort_node *node;
 919};
 920
 921static int iort_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
 922{
 923        struct iort_pci_alias_info *info = data;
 924        struct acpi_iort_node *parent;
 925        u32 streamid;
 926
 927        parent = iort_node_map_id(info->node, alias, &streamid,
 928                                  IORT_IOMMU_TYPE);
 929        return iort_iommu_xlate(info->dev, parent, streamid);
 930}
 931
 932static int nc_dma_get_range(struct device *dev, u64 *size)
 933{
 934        struct acpi_iort_node *node;
 935        struct acpi_iort_named_component *ncomp;
 936
 937        node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
 938                              iort_match_node_callback, dev);
 939        if (!node)
 940                return -ENODEV;
 941
 942        ncomp = (struct acpi_iort_named_component *)node->node_data;
 943
 944        *size = ncomp->memory_address_limit >= 64 ? U64_MAX :
 945                        1ULL<<ncomp->memory_address_limit;
 946
 947        return 0;
 948}
 949
 950/**
 951 * iort_dma_setup() - Set-up device DMA parameters.
 952 *
 953 * @dev: device to configure
 954 * @dma_addr: device DMA address result pointer
 955 * @size: DMA range size result pointer
 956 */
 957void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
 958{
 959        u64 mask, dmaaddr = 0, size = 0, offset = 0;
 960        int ret, msb;
 961
 962        /*
 963         * Set default coherent_dma_mask to 32 bit.  Drivers are expected to
 964         * setup the correct supported mask.
 965         */
 966        if (!dev->coherent_dma_mask)
 967                dev->coherent_dma_mask = DMA_BIT_MASK(32);
 968
 969        /*
 970         * Set it to coherent_dma_mask by default if the architecture
 971         * code has not set it.
 972         */
 973        if (!dev->dma_mask)
 974                dev->dma_mask = &dev->coherent_dma_mask;
 975
 976        size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
 977
 978        if (dev_is_pci(dev))
 979                ret = acpi_dma_get_range(dev, &dmaaddr, &offset, &size);
 980        else
 981                ret = nc_dma_get_range(dev, &size);
 982
 983        if (!ret) {
 984                msb = fls64(dmaaddr + size - 1);
 985                /*
 986                 * Round-up to the power-of-two mask or set
 987                 * the mask to the whole 64-bit address space
 988                 * in case the DMA region covers the full
 989                 * memory window.
 990                 */
 991                mask = msb == 64 ? U64_MAX : (1ULL << msb) - 1;
 992                /*
 993                 * Limit coherent and dma mask based on size
 994                 * retrieved from firmware.
 995                 */
 996                dev->coherent_dma_mask = mask;
 997                *dev->dma_mask = mask;
 998        }
 999
1000        *dma_addr = dmaaddr;
1001        *dma_size = size;
1002
1003        dev->dma_pfn_offset = PFN_DOWN(offset);
1004        dev_dbg(dev, "dma_pfn_offset(%#08llx)\n", offset);
1005}
1006
1007/**
1008 * iort_iommu_configure - Set-up IOMMU configuration for a device.
1009 *
1010 * @dev: device to configure
1011 *
1012 * Returns: iommu_ops pointer on configuration success
1013 *          NULL on configuration failure
1014 */
1015const struct iommu_ops *iort_iommu_configure(struct device *dev)
1016{
1017        struct acpi_iort_node *node, *parent;
1018        const struct iommu_ops *ops;
1019        u32 streamid = 0;
1020        int err = -ENODEV;
1021
1022        /*
1023         * If we already translated the fwspec there
1024         * is nothing left to do, return the iommu_ops.
1025         */
1026        ops = iort_fwspec_iommu_ops(dev->iommu_fwspec);
1027        if (ops)
1028                return ops;
1029
1030        if (dev_is_pci(dev)) {
1031                struct pci_bus *bus = to_pci_dev(dev)->bus;
1032                struct iort_pci_alias_info info = { .dev = dev };
1033
1034                node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
1035                                      iort_match_node_callback, &bus->dev);
1036                if (!node)
1037                        return NULL;
1038
1039                info.node = node;
1040                err = pci_for_each_dma_alias(to_pci_dev(dev),
1041                                             iort_pci_iommu_init, &info);
1042        } else {
1043                int i = 0;
1044
1045                node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
1046                                      iort_match_node_callback, dev);
1047                if (!node)
1048                        return NULL;
1049
1050                do {
1051                        parent = iort_node_map_platform_id(node, &streamid,
1052                                                           IORT_IOMMU_TYPE,
1053                                                           i++);
1054
1055                        if (parent)
1056                                err = iort_iommu_xlate(dev, parent, streamid);
1057                } while (parent && !err);
1058        }
1059
1060        /*
1061         * If we have reason to believe the IOMMU driver missed the initial
1062         * add_device callback for dev, replay it to get things in order.
1063         */
1064        if (!err) {
1065                ops = iort_fwspec_iommu_ops(dev->iommu_fwspec);
1066                err = iort_add_device_replay(ops, dev);
1067        }
1068
1069        /* Ignore all other errors apart from EPROBE_DEFER */
1070        if (err == -EPROBE_DEFER) {
1071                ops = ERR_PTR(err);
1072        } else if (err) {
1073                dev_dbg(dev, "Adding to IOMMU failed: %d\n", err);
1074                ops = NULL;
1075        }
1076
1077        return ops;
1078}
1079
1080static void __init acpi_iort_register_irq(int hwirq, const char *name,
1081                                          int trigger,
1082                                          struct resource *res)
1083{
1084        int irq = acpi_register_gsi(NULL, hwirq, trigger,
1085                                    ACPI_ACTIVE_HIGH);
1086
1087        if (irq <= 0) {
1088                pr_err("could not register gsi hwirq %d name [%s]\n", hwirq,
1089                                                                      name);
1090                return;
1091        }
1092
1093        res->start = irq;
1094        res->end = irq;
1095        res->flags = IORESOURCE_IRQ;
1096        res->name = name;
1097}
1098
1099static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node)
1100{
1101        struct acpi_iort_smmu_v3 *smmu;
1102        /* Always present mem resource */
1103        int num_res = 1;
1104
1105        /* Retrieve SMMUv3 specific data */
1106        smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1107
1108        if (smmu->event_gsiv)
1109                num_res++;
1110
1111        if (smmu->pri_gsiv)
1112                num_res++;
1113
1114        if (smmu->gerr_gsiv)
1115                num_res++;
1116
1117        if (smmu->sync_gsiv)
1118                num_res++;
1119
1120        return num_res;
1121}
1122
1123static bool arm_smmu_v3_is_combined_irq(struct acpi_iort_smmu_v3 *smmu)
1124{
1125        /*
1126         * Cavium ThunderX2 implementation doesn't not support unique
1127         * irq line. Use single irq line for all the SMMUv3 interrupts.
1128         */
1129        if (smmu->model != ACPI_IORT_SMMU_V3_CAVIUM_CN99XX)
1130                return false;
1131
1132        /*
1133         * ThunderX2 doesn't support MSIs from the SMMU, so we're checking
1134         * SPI numbers here.
1135         */
1136        return smmu->event_gsiv == smmu->pri_gsiv &&
1137               smmu->event_gsiv == smmu->gerr_gsiv &&
1138               smmu->event_gsiv == smmu->sync_gsiv;
1139}
1140
1141static unsigned long arm_smmu_v3_resource_size(struct acpi_iort_smmu_v3 *smmu)
1142{
1143        /*
1144         * Override the size, for Cavium ThunderX2 implementation
1145         * which doesn't support the page 1 SMMU register space.
1146         */
1147        if (smmu->model == ACPI_IORT_SMMU_V3_CAVIUM_CN99XX)
1148                return SZ_64K;
1149
1150        return SZ_128K;
1151}
1152
1153static void __init arm_smmu_v3_init_resources(struct resource *res,
1154                                              struct acpi_iort_node *node)
1155{
1156        struct acpi_iort_smmu_v3 *smmu;
1157        int num_res = 0;
1158
1159        /* Retrieve SMMUv3 specific data */
1160        smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1161
1162        res[num_res].start = smmu->base_address;
1163        res[num_res].end = smmu->base_address +
1164                                arm_smmu_v3_resource_size(smmu) - 1;
1165        res[num_res].flags = IORESOURCE_MEM;
1166
1167        num_res++;
1168        if (arm_smmu_v3_is_combined_irq(smmu)) {
1169                if (smmu->event_gsiv)
1170                        acpi_iort_register_irq(smmu->event_gsiv, "combined",
1171                                               ACPI_EDGE_SENSITIVE,
1172                                               &res[num_res++]);
1173        } else {
1174
1175                if (smmu->event_gsiv)
1176                        acpi_iort_register_irq(smmu->event_gsiv, "eventq",
1177                                               ACPI_EDGE_SENSITIVE,
1178                                               &res[num_res++]);
1179
1180                if (smmu->pri_gsiv)
1181                        acpi_iort_register_irq(smmu->pri_gsiv, "priq",
1182                                               ACPI_EDGE_SENSITIVE,
1183                                               &res[num_res++]);
1184
1185                if (smmu->gerr_gsiv)
1186                        acpi_iort_register_irq(smmu->gerr_gsiv, "gerror",
1187                                               ACPI_EDGE_SENSITIVE,
1188                                               &res[num_res++]);
1189
1190                if (smmu->sync_gsiv)
1191                        acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync",
1192                                               ACPI_EDGE_SENSITIVE,
1193                                               &res[num_res++]);
1194        }
1195}
1196
1197static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node)
1198{
1199        struct acpi_iort_smmu_v3 *smmu;
1200
1201        /* Retrieve SMMUv3 specific data */
1202        smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1203
1204        return smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE;
1205}
1206
1207#if defined(CONFIG_ACPI_NUMA)
1208/*
1209 * set numa proximity domain for smmuv3 device
1210 */
1211static void  __init arm_smmu_v3_set_proximity(struct device *dev,
1212                                              struct acpi_iort_node *node)
1213{
1214        struct acpi_iort_smmu_v3 *smmu;
1215
1216        smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1217        if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) {
1218                set_dev_node(dev, acpi_map_pxm_to_node(smmu->pxm));
1219                pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n",
1220                        smmu->base_address,
1221                        smmu->pxm);
1222        }
1223}
1224#else
1225#define arm_smmu_v3_set_proximity NULL
1226#endif
1227
1228static int __init arm_smmu_count_resources(struct acpi_iort_node *node)
1229{
1230        struct acpi_iort_smmu *smmu;
1231
1232        /* Retrieve SMMU specific data */
1233        smmu = (struct acpi_iort_smmu *)node->node_data;
1234
1235        /*
1236         * Only consider the global fault interrupt and ignore the
1237         * configuration access interrupt.
1238         *
1239         * MMIO address and global fault interrupt resources are always
1240         * present so add them to the context interrupt count as a static
1241         * value.
1242         */
1243        return smmu->context_interrupt_count + 2;
1244}
1245
1246static void __init arm_smmu_init_resources(struct resource *res,
1247                                           struct acpi_iort_node *node)
1248{
1249        struct acpi_iort_smmu *smmu;
1250        int i, hw_irq, trigger, num_res = 0;
1251        u64 *ctx_irq, *glb_irq;
1252
1253        /* Retrieve SMMU specific data */
1254        smmu = (struct acpi_iort_smmu *)node->node_data;
1255
1256        res[num_res].start = smmu->base_address;
1257        res[num_res].end = smmu->base_address + smmu->span - 1;
1258        res[num_res].flags = IORESOURCE_MEM;
1259        num_res++;
1260
1261        glb_irq = ACPI_ADD_PTR(u64, node, smmu->global_interrupt_offset);
1262        /* Global IRQs */
1263        hw_irq = IORT_IRQ_MASK(glb_irq[0]);
1264        trigger = IORT_IRQ_TRIGGER_MASK(glb_irq[0]);
1265
1266        acpi_iort_register_irq(hw_irq, "arm-smmu-global", trigger,
1267                                     &res[num_res++]);
1268
1269        /* Context IRQs */
1270        ctx_irq = ACPI_ADD_PTR(u64, node, smmu->context_interrupt_offset);
1271        for (i = 0; i < smmu->context_interrupt_count; i++) {
1272                hw_irq = IORT_IRQ_MASK(ctx_irq[i]);
1273                trigger = IORT_IRQ_TRIGGER_MASK(ctx_irq[i]);
1274
1275                acpi_iort_register_irq(hw_irq, "arm-smmu-context", trigger,
1276                                       &res[num_res++]);
1277        }
1278}
1279
1280static bool __init arm_smmu_is_coherent(struct acpi_iort_node *node)
1281{
1282        struct acpi_iort_smmu *smmu;
1283
1284        /* Retrieve SMMU specific data */
1285        smmu = (struct acpi_iort_smmu *)node->node_data;
1286
1287        return smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK;
1288}
1289
1290struct iort_dev_config {
1291        const char *name;
1292        int (*dev_init)(struct acpi_iort_node *node);
1293        bool (*dev_is_coherent)(struct acpi_iort_node *node);
1294        int (*dev_count_resources)(struct acpi_iort_node *node);
1295        void (*dev_init_resources)(struct resource *res,
1296                                     struct acpi_iort_node *node);
1297        void (*dev_set_proximity)(struct device *dev,
1298                                    struct acpi_iort_node *node);
1299};
1300
1301static const struct iort_dev_config iort_arm_smmu_v3_cfg __initconst = {
1302        .name = "arm-smmu-v3",
1303        .dev_is_coherent = arm_smmu_v3_is_coherent,
1304        .dev_count_resources = arm_smmu_v3_count_resources,
1305        .dev_init_resources = arm_smmu_v3_init_resources,
1306        .dev_set_proximity = arm_smmu_v3_set_proximity,
1307};
1308
1309static const struct iort_dev_config iort_arm_smmu_cfg __initconst = {
1310        .name = "arm-smmu",
1311        .dev_is_coherent = arm_smmu_is_coherent,
1312        .dev_count_resources = arm_smmu_count_resources,
1313        .dev_init_resources = arm_smmu_init_resources
1314};
1315
1316static __init const struct iort_dev_config *iort_get_dev_cfg(
1317                        struct acpi_iort_node *node)
1318{
1319        switch (node->type) {
1320        case ACPI_IORT_NODE_SMMU_V3:
1321                return &iort_arm_smmu_v3_cfg;
1322        case ACPI_IORT_NODE_SMMU:
1323                return &iort_arm_smmu_cfg;
1324        default:
1325                return NULL;
1326        }
1327}
1328
1329/**
1330 * iort_add_platform_device() - Allocate a platform device for IORT node
1331 * @node: Pointer to device ACPI IORT node
1332 *
1333 * Returns: 0 on success, <0 failure
1334 */
1335static int __init iort_add_platform_device(struct acpi_iort_node *node,
1336                                           const struct iort_dev_config *ops)
1337{
1338        struct fwnode_handle *fwnode;
1339        struct platform_device *pdev;
1340        struct resource *r;
1341        enum dev_dma_attr attr;
1342        int ret, count;
1343
1344        pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO);
1345        if (!pdev)
1346                return -ENOMEM;
1347
1348        if (ops->dev_set_proximity)
1349                ops->dev_set_proximity(&pdev->dev, node);
1350
1351        count = ops->dev_count_resources(node);
1352
1353        r = kcalloc(count, sizeof(*r), GFP_KERNEL);
1354        if (!r) {
1355                ret = -ENOMEM;
1356                goto dev_put;
1357        }
1358
1359        ops->dev_init_resources(r, node);
1360
1361        ret = platform_device_add_resources(pdev, r, count);
1362        /*
1363         * Resources are duplicated in platform_device_add_resources,
1364         * free their allocated memory
1365         */
1366        kfree(r);
1367
1368        if (ret)
1369                goto dev_put;
1370
1371        /*
1372         * Add a copy of IORT node pointer to platform_data to
1373         * be used to retrieve IORT data information.
1374         */
1375        ret = platform_device_add_data(pdev, &node, sizeof(node));
1376        if (ret)
1377                goto dev_put;
1378
1379        /*
1380         * We expect the dma masks to be equivalent for
1381         * all SMMUs set-ups
1382         */
1383        pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
1384
1385        fwnode = iort_get_fwnode(node);
1386
1387        if (!fwnode) {
1388                ret = -ENODEV;
1389                goto dev_put;
1390        }
1391
1392        pdev->dev.fwnode = fwnode;
1393
1394        attr = ops->dev_is_coherent && ops->dev_is_coherent(node) ?
1395                        DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
1396
1397        /* Configure DMA for the page table walker */
1398        acpi_dma_configure(&pdev->dev, attr);
1399
1400        iort_set_device_domain(&pdev->dev, node);
1401
1402        ret = platform_device_add(pdev);
1403        if (ret)
1404                goto dma_deconfigure;
1405
1406        return 0;
1407
1408dma_deconfigure:
1409        acpi_dma_deconfigure(&pdev->dev);
1410dev_put:
1411        platform_device_put(pdev);
1412
1413        return ret;
1414}
1415
1416static bool __init iort_enable_acs(struct acpi_iort_node *iort_node)
1417{
1418        if (iort_node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
1419                struct acpi_iort_node *parent;
1420                struct acpi_iort_id_mapping *map;
1421                int i;
1422
1423                map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, iort_node,
1424                                   iort_node->mapping_offset);
1425
1426                for (i = 0; i < iort_node->mapping_count; i++, map++) {
1427                        if (!map->output_reference)
1428                                continue;
1429
1430                        parent = ACPI_ADD_PTR(struct acpi_iort_node,
1431                                        iort_table,  map->output_reference);
1432                        /*
1433                         * If we detect a RC->SMMU mapping, make sure
1434                         * we enable ACS on the system.
1435                         */
1436                        if ((parent->type == ACPI_IORT_NODE_SMMU) ||
1437                                (parent->type == ACPI_IORT_NODE_SMMU_V3)) {
1438                                pci_request_acs();
1439                                return true;
1440                        }
1441                }
1442        }
1443
1444        return false;
1445}
1446
1447static void __init iort_init_platform_devices(void)
1448{
1449        struct acpi_iort_node *iort_node, *iort_end;
1450        struct acpi_table_iort *iort;
1451        struct fwnode_handle *fwnode;
1452        int i, ret;
1453        bool acs_enabled = false;
1454        const struct iort_dev_config *ops;
1455
1456        /*
1457         * iort_table and iort both point to the start of IORT table, but
1458         * have different struct types
1459         */
1460        iort = (struct acpi_table_iort *)iort_table;
1461
1462        /* Get the first IORT node */
1463        iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
1464                                 iort->node_offset);
1465        iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort,
1466                                iort_table->length);
1467
1468        for (i = 0; i < iort->node_count; i++) {
1469                if (iort_node >= iort_end) {
1470                        pr_err("iort node pointer overflows, bad table\n");
1471                        return;
1472                }
1473
1474                if (!acs_enabled)
1475                        acs_enabled = iort_enable_acs(iort_node);
1476
1477                ops = iort_get_dev_cfg(iort_node);
1478                if (ops) {
1479                        fwnode = acpi_alloc_fwnode_static();
1480                        if (!fwnode)
1481                                return;
1482
1483                        iort_set_fwnode(iort_node, fwnode);
1484
1485                        ret = iort_add_platform_device(iort_node, ops);
1486                        if (ret) {
1487                                iort_delete_fwnode(iort_node);
1488                                acpi_free_fwnode_static(fwnode);
1489                                return;
1490                        }
1491                }
1492
1493                iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
1494                                         iort_node->length);
1495        }
1496}
1497
1498void __init acpi_iort_init(void)
1499{
1500        acpi_status status;
1501
1502        status = acpi_get_table(ACPI_SIG_IORT, 0, &iort_table);
1503        if (ACPI_FAILURE(status)) {
1504                if (status != AE_NOT_FOUND) {
1505                        const char *msg = acpi_format_exception(status);
1506
1507                        pr_err("Failed to get table, %s\n", msg);
1508                }
1509
1510                return;
1511        }
1512
1513        iort_init_platform_devices();
1514}
1515