linux/drivers/acpi/arm64/iort.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2016, Semihalf
   4 *      Author: Tomasz Nowicki <tn@semihalf.com>
   5 *
   6 * This file implements early detection/parsing of I/O mapping
   7 * reported to OS through firmware via I/O Remapping Table (IORT)
   8 * IORT document number: ARM DEN 0049A
   9 */
  10
  11#define pr_fmt(fmt)     "ACPI: IORT: " fmt
  12
  13#include <linux/acpi_iort.h>
  14#include <linux/iommu.h>
  15#include <linux/kernel.h>
  16#include <linux/list.h>
  17#include <linux/pci.h>
  18#include <linux/platform_device.h>
  19#include <linux/slab.h>
  20
  21#define IORT_TYPE_MASK(type)    (1 << (type))
  22#define IORT_MSI_TYPE           (1 << ACPI_IORT_NODE_ITS_GROUP)
  23#define IORT_IOMMU_TYPE         ((1 << ACPI_IORT_NODE_SMMU) |   \
  24                                (1 << ACPI_IORT_NODE_SMMU_V3))
  25
  26struct iort_its_msi_chip {
  27        struct list_head        list;
  28        struct fwnode_handle    *fw_node;
  29        phys_addr_t             base_addr;
  30        u32                     translation_id;
  31};
  32
  33struct iort_fwnode {
  34        struct list_head list;
  35        struct acpi_iort_node *iort_node;
  36        struct fwnode_handle *fwnode;
  37};
  38static LIST_HEAD(iort_fwnode_list);
  39static DEFINE_SPINLOCK(iort_fwnode_lock);
  40
  41/**
  42 * iort_set_fwnode() - Create iort_fwnode and use it to register
  43 *                     iommu data in the iort_fwnode_list
  44 *
  45 * @node: IORT table node associated with the IOMMU
  46 * @fwnode: fwnode associated with the IORT node
  47 *
  48 * Returns: 0 on success
  49 *          <0 on failure
  50 */
  51static inline int iort_set_fwnode(struct acpi_iort_node *iort_node,
  52                                  struct fwnode_handle *fwnode)
  53{
  54        struct iort_fwnode *np;
  55
  56        np = kzalloc(sizeof(struct iort_fwnode), GFP_ATOMIC);
  57
  58        if (WARN_ON(!np))
  59                return -ENOMEM;
  60
  61        INIT_LIST_HEAD(&np->list);
  62        np->iort_node = iort_node;
  63        np->fwnode = fwnode;
  64
  65        spin_lock(&iort_fwnode_lock);
  66        list_add_tail(&np->list, &iort_fwnode_list);
  67        spin_unlock(&iort_fwnode_lock);
  68
  69        return 0;
  70}
  71
  72/**
  73 * iort_get_fwnode() - Retrieve fwnode associated with an IORT node
  74 *
  75 * @node: IORT table node to be looked-up
  76 *
  77 * Returns: fwnode_handle pointer on success, NULL on failure
  78 */
  79static inline struct fwnode_handle *iort_get_fwnode(
  80                        struct acpi_iort_node *node)
  81{
  82        struct iort_fwnode *curr;
  83        struct fwnode_handle *fwnode = NULL;
  84
  85        spin_lock(&iort_fwnode_lock);
  86        list_for_each_entry(curr, &iort_fwnode_list, list) {
  87                if (curr->iort_node == node) {
  88                        fwnode = curr->fwnode;
  89                        break;
  90                }
  91        }
  92        spin_unlock(&iort_fwnode_lock);
  93
  94        return fwnode;
  95}
  96
  97/**
  98 * iort_delete_fwnode() - Delete fwnode associated with an IORT node
  99 *
 100 * @node: IORT table node associated with fwnode to delete
 101 */
 102static inline void iort_delete_fwnode(struct acpi_iort_node *node)
 103{
 104        struct iort_fwnode *curr, *tmp;
 105
 106        spin_lock(&iort_fwnode_lock);
 107        list_for_each_entry_safe(curr, tmp, &iort_fwnode_list, list) {
 108                if (curr->iort_node == node) {
 109                        list_del(&curr->list);
 110                        kfree(curr);
 111                        break;
 112                }
 113        }
 114        spin_unlock(&iort_fwnode_lock);
 115}
 116
 117/**
 118 * iort_get_iort_node() - Retrieve iort_node associated with an fwnode
 119 *
 120 * @fwnode: fwnode associated with device to be looked-up
 121 *
 122 * Returns: iort_node pointer on success, NULL on failure
 123 */
 124static inline struct acpi_iort_node *iort_get_iort_node(
 125                        struct fwnode_handle *fwnode)
 126{
 127        struct iort_fwnode *curr;
 128        struct acpi_iort_node *iort_node = NULL;
 129
 130        spin_lock(&iort_fwnode_lock);
 131        list_for_each_entry(curr, &iort_fwnode_list, list) {
 132                if (curr->fwnode == fwnode) {
 133                        iort_node = curr->iort_node;
 134                        break;
 135                }
 136        }
 137        spin_unlock(&iort_fwnode_lock);
 138
 139        return iort_node;
 140}
 141
 142typedef acpi_status (*iort_find_node_callback)
 143        (struct acpi_iort_node *node, void *context);
 144
 145/* Root pointer to the mapped IORT table */
 146static struct acpi_table_header *iort_table;
 147
 148static LIST_HEAD(iort_msi_chip_list);
 149static DEFINE_SPINLOCK(iort_msi_chip_lock);
 150
 151/**
 152 * iort_register_domain_token() - register domain token along with related
 153 * ITS ID and base address to the list from where we can get it back later on.
 154 * @trans_id: ITS ID.
 155 * @base: ITS base address.
 156 * @fw_node: Domain token.
 157 *
 158 * Returns: 0 on success, -ENOMEM if no memory when allocating list element
 159 */
 160int iort_register_domain_token(int trans_id, phys_addr_t base,
 161                               struct fwnode_handle *fw_node)
 162{
 163        struct iort_its_msi_chip *its_msi_chip;
 164
 165        its_msi_chip = kzalloc(sizeof(*its_msi_chip), GFP_KERNEL);
 166        if (!its_msi_chip)
 167                return -ENOMEM;
 168
 169        its_msi_chip->fw_node = fw_node;
 170        its_msi_chip->translation_id = trans_id;
 171        its_msi_chip->base_addr = base;
 172
 173        spin_lock(&iort_msi_chip_lock);
 174        list_add(&its_msi_chip->list, &iort_msi_chip_list);
 175        spin_unlock(&iort_msi_chip_lock);
 176
 177        return 0;
 178}
 179
 180/**
 181 * iort_deregister_domain_token() - Deregister domain token based on ITS ID
 182 * @trans_id: ITS ID.
 183 *
 184 * Returns: none.
 185 */
 186void iort_deregister_domain_token(int trans_id)
 187{
 188        struct iort_its_msi_chip *its_msi_chip, *t;
 189
 190        spin_lock(&iort_msi_chip_lock);
 191        list_for_each_entry_safe(its_msi_chip, t, &iort_msi_chip_list, list) {
 192                if (its_msi_chip->translation_id == trans_id) {
 193                        list_del(&its_msi_chip->list);
 194                        kfree(its_msi_chip);
 195                        break;
 196                }
 197        }
 198        spin_unlock(&iort_msi_chip_lock);
 199}
 200
 201/**
 202 * iort_find_domain_token() - Find domain token based on given ITS ID
 203 * @trans_id: ITS ID.
 204 *
 205 * Returns: domain token when find on the list, NULL otherwise
 206 */
 207struct fwnode_handle *iort_find_domain_token(int trans_id)
 208{
 209        struct fwnode_handle *fw_node = NULL;
 210        struct iort_its_msi_chip *its_msi_chip;
 211
 212        spin_lock(&iort_msi_chip_lock);
 213        list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) {
 214                if (its_msi_chip->translation_id == trans_id) {
 215                        fw_node = its_msi_chip->fw_node;
 216                        break;
 217                }
 218        }
 219        spin_unlock(&iort_msi_chip_lock);
 220
 221        return fw_node;
 222}
 223
 224static struct acpi_iort_node *iort_scan_node(enum acpi_iort_node_type type,
 225                                             iort_find_node_callback callback,
 226                                             void *context)
 227{
 228        struct acpi_iort_node *iort_node, *iort_end;
 229        struct acpi_table_iort *iort;
 230        int i;
 231
 232        if (!iort_table)
 233                return NULL;
 234
 235        /* Get the first IORT node */
 236        iort = (struct acpi_table_iort *)iort_table;
 237        iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
 238                                 iort->node_offset);
 239        iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
 240                                iort_table->length);
 241
 242        for (i = 0; i < iort->node_count; i++) {
 243                if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND,
 244                               "IORT node pointer overflows, bad table!\n"))
 245                        return NULL;
 246
 247                if (iort_node->type == type &&
 248                    ACPI_SUCCESS(callback(iort_node, context)))
 249                        return iort_node;
 250
 251                iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
 252                                         iort_node->length);
 253        }
 254
 255        return NULL;
 256}
 257
 258static acpi_status iort_match_node_callback(struct acpi_iort_node *node,
 259                                            void *context)
 260{
 261        struct device *dev = context;
 262        acpi_status status = AE_NOT_FOUND;
 263
 264        if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) {
 265                struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
 266                struct acpi_device *adev = to_acpi_device_node(dev->fwnode);
 267                struct acpi_iort_named_component *ncomp;
 268
 269                if (!adev)
 270                        goto out;
 271
 272                status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf);
 273                if (ACPI_FAILURE(status)) {
 274                        dev_warn(dev, "Can't get device full path name\n");
 275                        goto out;
 276                }
 277
 278                ncomp = (struct acpi_iort_named_component *)node->node_data;
 279                status = !strcmp(ncomp->device_name, buf.pointer) ?
 280                                                        AE_OK : AE_NOT_FOUND;
 281                acpi_os_free(buf.pointer);
 282        } else if (node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
 283                struct acpi_iort_root_complex *pci_rc;
 284                struct pci_bus *bus;
 285
 286                bus = to_pci_bus(dev);
 287                pci_rc = (struct acpi_iort_root_complex *)node->node_data;
 288
 289                /*
 290                 * It is assumed that PCI segment numbers maps one-to-one
 291                 * with root complexes. Each segment number can represent only
 292                 * one root complex.
 293                 */
 294                status = pci_rc->pci_segment_number == pci_domain_nr(bus) ?
 295                                                        AE_OK : AE_NOT_FOUND;
 296        }
 297out:
 298        return status;
 299}
 300
 301static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
 302                       u32 *rid_out)
 303{
 304        /* Single mapping does not care for input id */
 305        if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
 306                if (type == ACPI_IORT_NODE_NAMED_COMPONENT ||
 307                    type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
 308                        *rid_out = map->output_base;
 309                        return 0;
 310                }
 311
 312                pr_warn(FW_BUG "[map %p] SINGLE MAPPING flag not allowed for node type %d, skipping ID map\n",
 313                        map, type);
 314                return -ENXIO;
 315        }
 316
 317        if (rid_in < map->input_base ||
 318            (rid_in >= map->input_base + map->id_count))
 319                return -ENXIO;
 320
 321        *rid_out = map->output_base + (rid_in - map->input_base);
 322        return 0;
 323}
 324
 325static struct acpi_iort_node *iort_node_get_id(struct acpi_iort_node *node,
 326                                               u32 *id_out, int index)
 327{
 328        struct acpi_iort_node *parent;
 329        struct acpi_iort_id_mapping *map;
 330
 331        if (!node->mapping_offset || !node->mapping_count ||
 332                                     index >= node->mapping_count)
 333                return NULL;
 334
 335        map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
 336                           node->mapping_offset + index * sizeof(*map));
 337
 338        /* Firmware bug! */
 339        if (!map->output_reference) {
 340                pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
 341                       node, node->type);
 342                return NULL;
 343        }
 344
 345        parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
 346                               map->output_reference);
 347
 348        if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
 349                if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT ||
 350                    node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX ||
 351                    node->type == ACPI_IORT_NODE_SMMU_V3 ||
 352                    node->type == ACPI_IORT_NODE_PMCG) {
 353                        *id_out = map->output_base;
 354                        return parent;
 355                }
 356        }
 357
 358        return NULL;
 359}
 360
 361static int iort_get_id_mapping_index(struct acpi_iort_node *node)
 362{
 363        struct acpi_iort_smmu_v3 *smmu;
 364
 365        switch (node->type) {
 366        case ACPI_IORT_NODE_SMMU_V3:
 367                /*
 368                 * SMMUv3 dev ID mapping index was introduced in revision 1
 369                 * table, not available in revision 0
 370                 */
 371                if (node->revision < 1)
 372                        return -EINVAL;
 373
 374                smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
 375                /*
 376                 * ID mapping index is only ignored if all interrupts are
 377                 * GSIV based
 378                 */
 379                if (smmu->event_gsiv && smmu->pri_gsiv && smmu->gerr_gsiv
 380                    && smmu->sync_gsiv)
 381                        return -EINVAL;
 382
 383                if (smmu->id_mapping_index >= node->mapping_count) {
 384                        pr_err(FW_BUG "[node %p type %d] ID mapping index overflows valid mappings\n",
 385                               node, node->type);
 386                        return -EINVAL;
 387                }
 388
 389                return smmu->id_mapping_index;
 390        case ACPI_IORT_NODE_PMCG:
 391                return 0;
 392        default:
 393                return -EINVAL;
 394        }
 395}
 396
 397static struct acpi_iort_node *iort_node_map_id(struct acpi_iort_node *node,
 398                                               u32 id_in, u32 *id_out,
 399                                               u8 type_mask)
 400{
 401        u32 id = id_in;
 402
 403        /* Parse the ID mapping tree to find specified node type */
 404        while (node) {
 405                struct acpi_iort_id_mapping *map;
 406                int i, index;
 407
 408                if (IORT_TYPE_MASK(node->type) & type_mask) {
 409                        if (id_out)
 410                                *id_out = id;
 411                        return node;
 412                }
 413
 414                if (!node->mapping_offset || !node->mapping_count)
 415                        goto fail_map;
 416
 417                map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
 418                                   node->mapping_offset);
 419
 420                /* Firmware bug! */
 421                if (!map->output_reference) {
 422                        pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
 423                               node, node->type);
 424                        goto fail_map;
 425                }
 426
 427                /*
 428                 * Get the special ID mapping index (if any) and skip its
 429                 * associated ID map to prevent erroneous multi-stage
 430                 * IORT ID translations.
 431                 */
 432                index = iort_get_id_mapping_index(node);
 433
 434                /* Do the ID translation */
 435                for (i = 0; i < node->mapping_count; i++, map++) {
 436                        /* if it is special mapping index, skip it */
 437                        if (i == index)
 438                                continue;
 439
 440                        if (!iort_id_map(map, node->type, id, &id))
 441                                break;
 442                }
 443
 444                if (i == node->mapping_count)
 445                        goto fail_map;
 446
 447                node = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
 448                                    map->output_reference);
 449        }
 450
 451fail_map:
 452        /* Map input ID to output ID unchanged on mapping failure */
 453        if (id_out)
 454                *id_out = id_in;
 455
 456        return NULL;
 457}
 458
 459static struct acpi_iort_node *iort_node_map_platform_id(
 460                struct acpi_iort_node *node, u32 *id_out, u8 type_mask,
 461                int index)
 462{
 463        struct acpi_iort_node *parent;
 464        u32 id;
 465
 466        /* step 1: retrieve the initial dev id */
 467        parent = iort_node_get_id(node, &id, index);
 468        if (!parent)
 469                return NULL;
 470
 471        /*
 472         * optional step 2: map the initial dev id if its parent is not
 473         * the target type we want, map it again for the use cases such
 474         * as NC (named component) -> SMMU -> ITS. If the type is matched,
 475         * return the initial dev id and its parent pointer directly.
 476         */
 477        if (!(IORT_TYPE_MASK(parent->type) & type_mask))
 478                parent = iort_node_map_id(parent, id, id_out, type_mask);
 479        else
 480                if (id_out)
 481                        *id_out = id;
 482
 483        return parent;
 484}
 485
 486static struct acpi_iort_node *iort_find_dev_node(struct device *dev)
 487{
 488        struct pci_bus *pbus;
 489
 490        if (!dev_is_pci(dev)) {
 491                struct acpi_iort_node *node;
 492                /*
 493                 * scan iort_fwnode_list to see if it's an iort platform
 494                 * device (such as SMMU, PMCG),its iort node already cached
 495                 * and associated with fwnode when iort platform devices
 496                 * were initialized.
 497                 */
 498                node = iort_get_iort_node(dev->fwnode);
 499                if (node)
 500                        return node;
 501
 502                /*
 503                 * if not, then it should be a platform device defined in
 504                 * DSDT/SSDT (with Named Component node in IORT)
 505                 */
 506                return iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
 507                                      iort_match_node_callback, dev);
 508        }
 509
 510        /* Find a PCI root bus */
 511        pbus = to_pci_dev(dev)->bus;
 512        while (!pci_is_root_bus(pbus))
 513                pbus = pbus->parent;
 514
 515        return iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
 516                              iort_match_node_callback, &pbus->dev);
 517}
 518
 519/**
 520 * iort_msi_map_rid() - Map a MSI requester ID for a device
 521 * @dev: The device for which the mapping is to be done.
 522 * @req_id: The device requester ID.
 523 *
 524 * Returns: mapped MSI RID on success, input requester ID otherwise
 525 */
 526u32 iort_msi_map_rid(struct device *dev, u32 req_id)
 527{
 528        struct acpi_iort_node *node;
 529        u32 dev_id;
 530
 531        node = iort_find_dev_node(dev);
 532        if (!node)
 533                return req_id;
 534
 535        iort_node_map_id(node, req_id, &dev_id, IORT_MSI_TYPE);
 536        return dev_id;
 537}
 538
 539/**
 540 * iort_pmsi_get_dev_id() - Get the device id for a device
 541 * @dev: The device for which the mapping is to be done.
 542 * @dev_id: The device ID found.
 543 *
 544 * Returns: 0 for successful find a dev id, -ENODEV on error
 545 */
 546int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id)
 547{
 548        int i, index;
 549        struct acpi_iort_node *node;
 550
 551        node = iort_find_dev_node(dev);
 552        if (!node)
 553                return -ENODEV;
 554
 555        index = iort_get_id_mapping_index(node);
 556        /* if there is a valid index, go get the dev_id directly */
 557        if (index >= 0) {
 558                if (iort_node_get_id(node, dev_id, index))
 559                        return 0;
 560        } else {
 561                for (i = 0; i < node->mapping_count; i++) {
 562                        if (iort_node_map_platform_id(node, dev_id,
 563                                                      IORT_MSI_TYPE, i))
 564                                return 0;
 565                }
 566        }
 567
 568        return -ENODEV;
 569}
 570
 571static int __maybe_unused iort_find_its_base(u32 its_id, phys_addr_t *base)
 572{
 573        struct iort_its_msi_chip *its_msi_chip;
 574        int ret = -ENODEV;
 575
 576        spin_lock(&iort_msi_chip_lock);
 577        list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) {
 578                if (its_msi_chip->translation_id == its_id) {
 579                        *base = its_msi_chip->base_addr;
 580                        ret = 0;
 581                        break;
 582                }
 583        }
 584        spin_unlock(&iort_msi_chip_lock);
 585
 586        return ret;
 587}
 588
 589/**
 590 * iort_dev_find_its_id() - Find the ITS identifier for a device
 591 * @dev: The device.
 592 * @req_id: Device's requester ID
 593 * @idx: Index of the ITS identifier list.
 594 * @its_id: ITS identifier.
 595 *
 596 * Returns: 0 on success, appropriate error value otherwise
 597 */
 598static int iort_dev_find_its_id(struct device *dev, u32 req_id,
 599                                unsigned int idx, int *its_id)
 600{
 601        struct acpi_iort_its_group *its;
 602        struct acpi_iort_node *node;
 603
 604        node = iort_find_dev_node(dev);
 605        if (!node)
 606                return -ENXIO;
 607
 608        node = iort_node_map_id(node, req_id, NULL, IORT_MSI_TYPE);
 609        if (!node)
 610                return -ENXIO;
 611
 612        /* Move to ITS specific data */
 613        its = (struct acpi_iort_its_group *)node->node_data;
 614        if (idx >= its->its_count) {
 615                dev_err(dev, "requested ITS ID index [%d] overruns ITS entries [%d]\n",
 616                        idx, its->its_count);
 617                return -ENXIO;
 618        }
 619
 620        *its_id = its->identifiers[idx];
 621        return 0;
 622}
 623
 624/**
 625 * iort_get_device_domain() - Find MSI domain related to a device
 626 * @dev: The device.
 627 * @req_id: Requester ID for the device.
 628 *
 629 * Returns: the MSI domain for this device, NULL otherwise
 630 */
 631struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id)
 632{
 633        struct fwnode_handle *handle;
 634        int its_id;
 635
 636        if (iort_dev_find_its_id(dev, req_id, 0, &its_id))
 637                return NULL;
 638
 639        handle = iort_find_domain_token(its_id);
 640        if (!handle)
 641                return NULL;
 642
 643        return irq_find_matching_fwnode(handle, DOMAIN_BUS_PCI_MSI);
 644}
 645
 646static void iort_set_device_domain(struct device *dev,
 647                                   struct acpi_iort_node *node)
 648{
 649        struct acpi_iort_its_group *its;
 650        struct acpi_iort_node *msi_parent;
 651        struct acpi_iort_id_mapping *map;
 652        struct fwnode_handle *iort_fwnode;
 653        struct irq_domain *domain;
 654        int index;
 655
 656        index = iort_get_id_mapping_index(node);
 657        if (index < 0)
 658                return;
 659
 660        map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
 661                           node->mapping_offset + index * sizeof(*map));
 662
 663        /* Firmware bug! */
 664        if (!map->output_reference ||
 665            !(map->flags & ACPI_IORT_ID_SINGLE_MAPPING)) {
 666                pr_err(FW_BUG "[node %p type %d] Invalid MSI mapping\n",
 667                       node, node->type);
 668                return;
 669        }
 670
 671        msi_parent = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
 672                                  map->output_reference);
 673
 674        if (!msi_parent || msi_parent->type != ACPI_IORT_NODE_ITS_GROUP)
 675                return;
 676
 677        /* Move to ITS specific data */
 678        its = (struct acpi_iort_its_group *)msi_parent->node_data;
 679
 680        iort_fwnode = iort_find_domain_token(its->identifiers[0]);
 681        if (!iort_fwnode)
 682                return;
 683
 684        domain = irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI);
 685        if (domain)
 686                dev_set_msi_domain(dev, domain);
 687}
 688
 689/**
 690 * iort_get_platform_device_domain() - Find MSI domain related to a
 691 * platform device
 692 * @dev: the dev pointer associated with the platform device
 693 *
 694 * Returns: the MSI domain for this device, NULL otherwise
 695 */
 696static struct irq_domain *iort_get_platform_device_domain(struct device *dev)
 697{
 698        struct acpi_iort_node *node, *msi_parent = NULL;
 699        struct fwnode_handle *iort_fwnode;
 700        struct acpi_iort_its_group *its;
 701        int i;
 702
 703        /* find its associated iort node */
 704        node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
 705                              iort_match_node_callback, dev);
 706        if (!node)
 707                return NULL;
 708
 709        /* then find its msi parent node */
 710        for (i = 0; i < node->mapping_count; i++) {
 711                msi_parent = iort_node_map_platform_id(node, NULL,
 712                                                       IORT_MSI_TYPE, i);
 713                if (msi_parent)
 714                        break;
 715        }
 716
 717        if (!msi_parent)
 718                return NULL;
 719
 720        /* Move to ITS specific data */
 721        its = (struct acpi_iort_its_group *)msi_parent->node_data;
 722
 723        iort_fwnode = iort_find_domain_token(its->identifiers[0]);
 724        if (!iort_fwnode)
 725                return NULL;
 726
 727        return irq_find_matching_fwnode(iort_fwnode, DOMAIN_BUS_PLATFORM_MSI);
 728}
 729
 730void acpi_configure_pmsi_domain(struct device *dev)
 731{
 732        struct irq_domain *msi_domain;
 733
 734        msi_domain = iort_get_platform_device_domain(dev);
 735        if (msi_domain)
 736                dev_set_msi_domain(dev, msi_domain);
 737}
 738
 739static int __maybe_unused __get_pci_rid(struct pci_dev *pdev, u16 alias,
 740                                        void *data)
 741{
 742        u32 *rid = data;
 743
 744        *rid = alias;
 745        return 0;
 746}
 747
 748#ifdef CONFIG_IOMMU_API
 749static struct acpi_iort_node *iort_get_msi_resv_iommu(struct device *dev)
 750{
 751        struct acpi_iort_node *iommu;
 752        struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
 753
 754        iommu = iort_get_iort_node(fwspec->iommu_fwnode);
 755
 756        if (iommu && (iommu->type == ACPI_IORT_NODE_SMMU_V3)) {
 757                struct acpi_iort_smmu_v3 *smmu;
 758
 759                smmu = (struct acpi_iort_smmu_v3 *)iommu->node_data;
 760                if (smmu->model == ACPI_IORT_SMMU_V3_HISILICON_HI161X)
 761                        return iommu;
 762        }
 763
 764        return NULL;
 765}
 766
 767static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev)
 768{
 769        struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
 770
 771        return (fwspec && fwspec->ops) ? fwspec->ops : NULL;
 772}
 773
 774static inline int iort_add_device_replay(const struct iommu_ops *ops,
 775                                         struct device *dev)
 776{
 777        int err = 0;
 778
 779        if (dev->bus && !device_iommu_mapped(dev))
 780                err = iommu_probe_device(dev);
 781
 782        return err;
 783}
 784
 785/**
 786 * iort_iommu_msi_get_resv_regions - Reserved region driver helper
 787 * @dev: Device from iommu_get_resv_regions()
 788 * @head: Reserved region list from iommu_get_resv_regions()
 789 *
 790 * Returns: Number of msi reserved regions on success (0 if platform
 791 *          doesn't require the reservation or no associated msi regions),
 792 *          appropriate error value otherwise. The ITS interrupt translation
 793 *          spaces (ITS_base + SZ_64K, SZ_64K) associated with the device
 794 *          are the msi reserved regions.
 795 */
 796int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
 797{
 798        struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
 799        struct acpi_iort_its_group *its;
 800        struct acpi_iort_node *iommu_node, *its_node = NULL;
 801        int i, resv = 0;
 802
 803        iommu_node = iort_get_msi_resv_iommu(dev);
 804        if (!iommu_node)
 805                return 0;
 806
 807        /*
 808         * Current logic to reserve ITS regions relies on HW topologies
 809         * where a given PCI or named component maps its IDs to only one
 810         * ITS group; if a PCI or named component can map its IDs to
 811         * different ITS groups through IORT mappings this function has
 812         * to be reworked to ensure we reserve regions for all ITS groups
 813         * a given PCI or named component may map IDs to.
 814         */
 815
 816        for (i = 0; i < fwspec->num_ids; i++) {
 817                its_node = iort_node_map_id(iommu_node,
 818                                        fwspec->ids[i],
 819                                        NULL, IORT_MSI_TYPE);
 820                if (its_node)
 821                        break;
 822        }
 823
 824        if (!its_node)
 825                return 0;
 826
 827        /* Move to ITS specific data */
 828        its = (struct acpi_iort_its_group *)its_node->node_data;
 829
 830        for (i = 0; i < its->its_count; i++) {
 831                phys_addr_t base;
 832
 833                if (!iort_find_its_base(its->identifiers[i], &base)) {
 834                        int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
 835                        struct iommu_resv_region *region;
 836
 837                        region = iommu_alloc_resv_region(base + SZ_64K, SZ_64K,
 838                                                         prot, IOMMU_RESV_MSI);
 839                        if (region) {
 840                                list_add_tail(&region->list, head);
 841                                resv++;
 842                        }
 843                }
 844        }
 845
 846        return (resv == its->its_count) ? resv : -ENODEV;
 847}
 848
 849static inline bool iort_iommu_driver_enabled(u8 type)
 850{
 851        switch (type) {
 852        case ACPI_IORT_NODE_SMMU_V3:
 853                return IS_BUILTIN(CONFIG_ARM_SMMU_V3);
 854        case ACPI_IORT_NODE_SMMU:
 855                return IS_BUILTIN(CONFIG_ARM_SMMU);
 856        default:
 857                pr_warn("IORT node type %u does not describe an SMMU\n", type);
 858                return false;
 859        }
 860}
 861
 862static int arm_smmu_iort_xlate(struct device *dev, u32 streamid,
 863                               struct fwnode_handle *fwnode,
 864                               const struct iommu_ops *ops)
 865{
 866        int ret = iommu_fwspec_init(dev, fwnode, ops);
 867
 868        if (!ret)
 869                ret = iommu_fwspec_add_ids(dev, &streamid, 1);
 870
 871        return ret;
 872}
 873
 874static bool iort_pci_rc_supports_ats(struct acpi_iort_node *node)
 875{
 876        struct acpi_iort_root_complex *pci_rc;
 877
 878        pci_rc = (struct acpi_iort_root_complex *)node->node_data;
 879        return pci_rc->ats_attribute & ACPI_IORT_ATS_SUPPORTED;
 880}
 881
 882static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node,
 883                            u32 streamid)
 884{
 885        const struct iommu_ops *ops;
 886        struct fwnode_handle *iort_fwnode;
 887
 888        if (!node)
 889                return -ENODEV;
 890
 891        iort_fwnode = iort_get_fwnode(node);
 892        if (!iort_fwnode)
 893                return -ENODEV;
 894
 895        /*
 896         * If the ops look-up fails, this means that either
 897         * the SMMU drivers have not been probed yet or that
 898         * the SMMU drivers are not built in the kernel;
 899         * Depending on whether the SMMU drivers are built-in
 900         * in the kernel or not, defer the IOMMU configuration
 901         * or just abort it.
 902         */
 903        ops = iommu_ops_from_fwnode(iort_fwnode);
 904        if (!ops)
 905                return iort_iommu_driver_enabled(node->type) ?
 906                       -EPROBE_DEFER : -ENODEV;
 907
 908        return arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops);
 909}
 910
 911struct iort_pci_alias_info {
 912        struct device *dev;
 913        struct acpi_iort_node *node;
 914};
 915
 916static int iort_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
 917{
 918        struct iort_pci_alias_info *info = data;
 919        struct acpi_iort_node *parent;
 920        u32 streamid;
 921
 922        parent = iort_node_map_id(info->node, alias, &streamid,
 923                                  IORT_IOMMU_TYPE);
 924        return iort_iommu_xlate(info->dev, parent, streamid);
 925}
 926
 927/**
 928 * iort_iommu_configure - Set-up IOMMU configuration for a device.
 929 *
 930 * @dev: device to configure
 931 *
 932 * Returns: iommu_ops pointer on configuration success
 933 *          NULL on configuration failure
 934 */
 935const struct iommu_ops *iort_iommu_configure(struct device *dev)
 936{
 937        struct acpi_iort_node *node, *parent;
 938        const struct iommu_ops *ops;
 939        u32 streamid = 0;
 940        int err = -ENODEV;
 941
 942        /*
 943         * If we already translated the fwspec there
 944         * is nothing left to do, return the iommu_ops.
 945         */
 946        ops = iort_fwspec_iommu_ops(dev);
 947        if (ops)
 948                return ops;
 949
 950        if (dev_is_pci(dev)) {
 951                struct pci_bus *bus = to_pci_dev(dev)->bus;
 952                struct iort_pci_alias_info info = { .dev = dev };
 953
 954                node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
 955                                      iort_match_node_callback, &bus->dev);
 956                if (!node)
 957                        return NULL;
 958
 959                info.node = node;
 960                err = pci_for_each_dma_alias(to_pci_dev(dev),
 961                                             iort_pci_iommu_init, &info);
 962
 963                if (!err && iort_pci_rc_supports_ats(node))
 964                        dev->iommu_fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS;
 965        } else {
 966                int i = 0;
 967
 968                node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
 969                                      iort_match_node_callback, dev);
 970                if (!node)
 971                        return NULL;
 972
 973                do {
 974                        parent = iort_node_map_platform_id(node, &streamid,
 975                                                           IORT_IOMMU_TYPE,
 976                                                           i++);
 977
 978                        if (parent)
 979                                err = iort_iommu_xlate(dev, parent, streamid);
 980                } while (parent && !err);
 981        }
 982
 983        /*
 984         * If we have reason to believe the IOMMU driver missed the initial
 985         * add_device callback for dev, replay it to get things in order.
 986         */
 987        if (!err) {
 988                ops = iort_fwspec_iommu_ops(dev);
 989                err = iort_add_device_replay(ops, dev);
 990        }
 991
 992        /* Ignore all other errors apart from EPROBE_DEFER */
 993        if (err == -EPROBE_DEFER) {
 994                ops = ERR_PTR(err);
 995        } else if (err) {
 996                dev_dbg(dev, "Adding to IOMMU failed: %d\n", err);
 997                ops = NULL;
 998        }
 999
1000        return ops;
1001}
1002#else
1003static inline const struct iommu_ops *iort_fwspec_iommu_ops(struct device *dev)
1004{ return NULL; }
1005static inline int iort_add_device_replay(const struct iommu_ops *ops,
1006                                         struct device *dev)
1007{ return 0; }
1008int iort_iommu_msi_get_resv_regions(struct device *dev, struct list_head *head)
1009{ return 0; }
1010const struct iommu_ops *iort_iommu_configure(struct device *dev)
1011{ return NULL; }
1012#endif
1013
1014static int nc_dma_get_range(struct device *dev, u64 *size)
1015{
1016        struct acpi_iort_node *node;
1017        struct acpi_iort_named_component *ncomp;
1018
1019        node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
1020                              iort_match_node_callback, dev);
1021        if (!node)
1022                return -ENODEV;
1023
1024        ncomp = (struct acpi_iort_named_component *)node->node_data;
1025
1026        *size = ncomp->memory_address_limit >= 64 ? U64_MAX :
1027                        1ULL<<ncomp->memory_address_limit;
1028
1029        return 0;
1030}
1031
1032static int rc_dma_get_range(struct device *dev, u64 *size)
1033{
1034        struct acpi_iort_node *node;
1035        struct acpi_iort_root_complex *rc;
1036        struct pci_bus *pbus = to_pci_dev(dev)->bus;
1037
1038        node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
1039                              iort_match_node_callback, &pbus->dev);
1040        if (!node || node->revision < 1)
1041                return -ENODEV;
1042
1043        rc = (struct acpi_iort_root_complex *)node->node_data;
1044
1045        *size = rc->memory_address_limit >= 64 ? U64_MAX :
1046                        1ULL<<rc->memory_address_limit;
1047
1048        return 0;
1049}
1050
1051/**
1052 * iort_dma_setup() - Set-up device DMA parameters.
1053 *
1054 * @dev: device to configure
1055 * @dma_addr: device DMA address result pointer
1056 * @size: DMA range size result pointer
1057 */
1058void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
1059{
1060        u64 mask, dmaaddr = 0, size = 0, offset = 0;
1061        int ret, msb;
1062
1063        /*
1064         * If @dev is expected to be DMA-capable then the bus code that created
1065         * it should have initialised its dma_mask pointer by this point. For
1066         * now, we'll continue the legacy behaviour of coercing it to the
1067         * coherent mask if not, but we'll no longer do so quietly.
1068         */
1069        if (!dev->dma_mask) {
1070                dev_warn(dev, "DMA mask not set\n");
1071                dev->dma_mask = &dev->coherent_dma_mask;
1072        }
1073
1074        if (dev->coherent_dma_mask)
1075                size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
1076        else
1077                size = 1ULL << 32;
1078
1079        if (dev_is_pci(dev)) {
1080                ret = acpi_dma_get_range(dev, &dmaaddr, &offset, &size);
1081                if (ret == -ENODEV)
1082                        ret = rc_dma_get_range(dev, &size);
1083        } else {
1084                ret = nc_dma_get_range(dev, &size);
1085        }
1086
1087        if (!ret) {
1088                msb = fls64(dmaaddr + size - 1);
1089                /*
1090                 * Round-up to the power-of-two mask or set
1091                 * the mask to the whole 64-bit address space
1092                 * in case the DMA region covers the full
1093                 * memory window.
1094                 */
1095                mask = msb == 64 ? U64_MAX : (1ULL << msb) - 1;
1096                /*
1097                 * Limit coherent and dma mask based on size
1098                 * retrieved from firmware.
1099                 */
1100                dev->bus_dma_mask = mask;
1101                dev->coherent_dma_mask = mask;
1102                *dev->dma_mask = mask;
1103        }
1104
1105        *dma_addr = dmaaddr;
1106        *dma_size = size;
1107
1108        dev->dma_pfn_offset = PFN_DOWN(offset);
1109        dev_dbg(dev, "dma_pfn_offset(%#08llx)\n", offset);
1110}
1111
1112static void __init acpi_iort_register_irq(int hwirq, const char *name,
1113                                          int trigger,
1114                                          struct resource *res)
1115{
1116        int irq = acpi_register_gsi(NULL, hwirq, trigger,
1117                                    ACPI_ACTIVE_HIGH);
1118
1119        if (irq <= 0) {
1120                pr_err("could not register gsi hwirq %d name [%s]\n", hwirq,
1121                                                                      name);
1122                return;
1123        }
1124
1125        res->start = irq;
1126        res->end = irq;
1127        res->flags = IORESOURCE_IRQ;
1128        res->name = name;
1129}
1130
1131static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node)
1132{
1133        struct acpi_iort_smmu_v3 *smmu;
1134        /* Always present mem resource */
1135        int num_res = 1;
1136
1137        /* Retrieve SMMUv3 specific data */
1138        smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1139
1140        if (smmu->event_gsiv)
1141                num_res++;
1142
1143        if (smmu->pri_gsiv)
1144                num_res++;
1145
1146        if (smmu->gerr_gsiv)
1147                num_res++;
1148
1149        if (smmu->sync_gsiv)
1150                num_res++;
1151
1152        return num_res;
1153}
1154
1155static bool arm_smmu_v3_is_combined_irq(struct acpi_iort_smmu_v3 *smmu)
1156{
1157        /*
1158         * Cavium ThunderX2 implementation doesn't not support unique
1159         * irq line. Use single irq line for all the SMMUv3 interrupts.
1160         */
1161        if (smmu->model != ACPI_IORT_SMMU_V3_CAVIUM_CN99XX)
1162                return false;
1163
1164        /*
1165         * ThunderX2 doesn't support MSIs from the SMMU, so we're checking
1166         * SPI numbers here.
1167         */
1168        return smmu->event_gsiv == smmu->pri_gsiv &&
1169               smmu->event_gsiv == smmu->gerr_gsiv &&
1170               smmu->event_gsiv == smmu->sync_gsiv;
1171}
1172
1173static unsigned long arm_smmu_v3_resource_size(struct acpi_iort_smmu_v3 *smmu)
1174{
1175        /*
1176         * Override the size, for Cavium ThunderX2 implementation
1177         * which doesn't support the page 1 SMMU register space.
1178         */
1179        if (smmu->model == ACPI_IORT_SMMU_V3_CAVIUM_CN99XX)
1180                return SZ_64K;
1181
1182        return SZ_128K;
1183}
1184
1185static void __init arm_smmu_v3_init_resources(struct resource *res,
1186                                              struct acpi_iort_node *node)
1187{
1188        struct acpi_iort_smmu_v3 *smmu;
1189        int num_res = 0;
1190
1191        /* Retrieve SMMUv3 specific data */
1192        smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1193
1194        res[num_res].start = smmu->base_address;
1195        res[num_res].end = smmu->base_address +
1196                                arm_smmu_v3_resource_size(smmu) - 1;
1197        res[num_res].flags = IORESOURCE_MEM;
1198
1199        num_res++;
1200        if (arm_smmu_v3_is_combined_irq(smmu)) {
1201                if (smmu->event_gsiv)
1202                        acpi_iort_register_irq(smmu->event_gsiv, "combined",
1203                                               ACPI_EDGE_SENSITIVE,
1204                                               &res[num_res++]);
1205        } else {
1206
1207                if (smmu->event_gsiv)
1208                        acpi_iort_register_irq(smmu->event_gsiv, "eventq",
1209                                               ACPI_EDGE_SENSITIVE,
1210                                               &res[num_res++]);
1211
1212                if (smmu->pri_gsiv)
1213                        acpi_iort_register_irq(smmu->pri_gsiv, "priq",
1214                                               ACPI_EDGE_SENSITIVE,
1215                                               &res[num_res++]);
1216
1217                if (smmu->gerr_gsiv)
1218                        acpi_iort_register_irq(smmu->gerr_gsiv, "gerror",
1219                                               ACPI_EDGE_SENSITIVE,
1220                                               &res[num_res++]);
1221
1222                if (smmu->sync_gsiv)
1223                        acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync",
1224                                               ACPI_EDGE_SENSITIVE,
1225                                               &res[num_res++]);
1226        }
1227}
1228
1229static void __init arm_smmu_v3_dma_configure(struct device *dev,
1230                                             struct acpi_iort_node *node)
1231{
1232        struct acpi_iort_smmu_v3 *smmu;
1233        enum dev_dma_attr attr;
1234
1235        /* Retrieve SMMUv3 specific data */
1236        smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1237
1238        attr = (smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE) ?
1239                        DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
1240
1241        /* We expect the dma masks to be equivalent for all SMMUv3 set-ups */
1242        dev->dma_mask = &dev->coherent_dma_mask;
1243
1244        /* Configure DMA for the page table walker */
1245        acpi_dma_configure(dev, attr);
1246}
1247
1248#if defined(CONFIG_ACPI_NUMA)
1249/*
1250 * set numa proximity domain for smmuv3 device
1251 */
1252static int  __init arm_smmu_v3_set_proximity(struct device *dev,
1253                                              struct acpi_iort_node *node)
1254{
1255        struct acpi_iort_smmu_v3 *smmu;
1256
1257        smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1258        if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) {
1259                int node = acpi_map_pxm_to_node(smmu->pxm);
1260
1261                if (node != NUMA_NO_NODE && !node_online(node))
1262                        return -EINVAL;
1263
1264                set_dev_node(dev, node);
1265                pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n",
1266                        smmu->base_address,
1267                        smmu->pxm);
1268        }
1269        return 0;
1270}
1271#else
1272#define arm_smmu_v3_set_proximity NULL
1273#endif
1274
1275static int __init arm_smmu_count_resources(struct acpi_iort_node *node)
1276{
1277        struct acpi_iort_smmu *smmu;
1278
1279        /* Retrieve SMMU specific data */
1280        smmu = (struct acpi_iort_smmu *)node->node_data;
1281
1282        /*
1283         * Only consider the global fault interrupt and ignore the
1284         * configuration access interrupt.
1285         *
1286         * MMIO address and global fault interrupt resources are always
1287         * present so add them to the context interrupt count as a static
1288         * value.
1289         */
1290        return smmu->context_interrupt_count + 2;
1291}
1292
1293static void __init arm_smmu_init_resources(struct resource *res,
1294                                           struct acpi_iort_node *node)
1295{
1296        struct acpi_iort_smmu *smmu;
1297        int i, hw_irq, trigger, num_res = 0;
1298        u64 *ctx_irq, *glb_irq;
1299
1300        /* Retrieve SMMU specific data */
1301        smmu = (struct acpi_iort_smmu *)node->node_data;
1302
1303        res[num_res].start = smmu->base_address;
1304        res[num_res].end = smmu->base_address + smmu->span - 1;
1305        res[num_res].flags = IORESOURCE_MEM;
1306        num_res++;
1307
1308        glb_irq = ACPI_ADD_PTR(u64, node, smmu->global_interrupt_offset);
1309        /* Global IRQs */
1310        hw_irq = IORT_IRQ_MASK(glb_irq[0]);
1311        trigger = IORT_IRQ_TRIGGER_MASK(glb_irq[0]);
1312
1313        acpi_iort_register_irq(hw_irq, "arm-smmu-global", trigger,
1314                                     &res[num_res++]);
1315
1316        /* Context IRQs */
1317        ctx_irq = ACPI_ADD_PTR(u64, node, smmu->context_interrupt_offset);
1318        for (i = 0; i < smmu->context_interrupt_count; i++) {
1319                hw_irq = IORT_IRQ_MASK(ctx_irq[i]);
1320                trigger = IORT_IRQ_TRIGGER_MASK(ctx_irq[i]);
1321
1322                acpi_iort_register_irq(hw_irq, "arm-smmu-context", trigger,
1323                                       &res[num_res++]);
1324        }
1325}
1326
1327static void __init arm_smmu_dma_configure(struct device *dev,
1328                                          struct acpi_iort_node *node)
1329{
1330        struct acpi_iort_smmu *smmu;
1331        enum dev_dma_attr attr;
1332
1333        /* Retrieve SMMU specific data */
1334        smmu = (struct acpi_iort_smmu *)node->node_data;
1335
1336        attr = (smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK) ?
1337                        DEV_DMA_COHERENT : DEV_DMA_NON_COHERENT;
1338
1339        /* We expect the dma masks to be equivalent for SMMU set-ups */
1340        dev->dma_mask = &dev->coherent_dma_mask;
1341
1342        /* Configure DMA for the page table walker */
1343        acpi_dma_configure(dev, attr);
1344}
1345
1346static int __init arm_smmu_v3_pmcg_count_resources(struct acpi_iort_node *node)
1347{
1348        struct acpi_iort_pmcg *pmcg;
1349
1350        /* Retrieve PMCG specific data */
1351        pmcg = (struct acpi_iort_pmcg *)node->node_data;
1352
1353        /*
1354         * There are always 2 memory resources.
1355         * If the overflow_gsiv is present then add that for a total of 3.
1356         */
1357        return pmcg->overflow_gsiv ? 3 : 2;
1358}
1359
1360static void __init arm_smmu_v3_pmcg_init_resources(struct resource *res,
1361                                                   struct acpi_iort_node *node)
1362{
1363        struct acpi_iort_pmcg *pmcg;
1364
1365        /* Retrieve PMCG specific data */
1366        pmcg = (struct acpi_iort_pmcg *)node->node_data;
1367
1368        res[0].start = pmcg->page0_base_address;
1369        res[0].end = pmcg->page0_base_address + SZ_4K - 1;
1370        res[0].flags = IORESOURCE_MEM;
1371        res[1].start = pmcg->page1_base_address;
1372        res[1].end = pmcg->page1_base_address + SZ_4K - 1;
1373        res[1].flags = IORESOURCE_MEM;
1374
1375        if (pmcg->overflow_gsiv)
1376                acpi_iort_register_irq(pmcg->overflow_gsiv, "overflow",
1377                                       ACPI_EDGE_SENSITIVE, &res[2]);
1378}
1379
1380static struct acpi_platform_list pmcg_plat_info[] __initdata = {
1381        /* HiSilicon Hip08 Platform */
1382        {"HISI  ", "HIP08   ", 0, ACPI_SIG_IORT, greater_than_or_equal,
1383         "Erratum #162001800", IORT_SMMU_V3_PMCG_HISI_HIP08},
1384        { }
1385};
1386
1387static int __init arm_smmu_v3_pmcg_add_platdata(struct platform_device *pdev)
1388{
1389        u32 model;
1390        int idx;
1391
1392        idx = acpi_match_platform_list(pmcg_plat_info);
1393        if (idx >= 0)
1394                model = pmcg_plat_info[idx].data;
1395        else
1396                model = IORT_SMMU_V3_PMCG_GENERIC;
1397
1398        return platform_device_add_data(pdev, &model, sizeof(model));
1399}
1400
1401struct iort_dev_config {
1402        const char *name;
1403        int (*dev_init)(struct acpi_iort_node *node);
1404        void (*dev_dma_configure)(struct device *dev,
1405                                  struct acpi_iort_node *node);
1406        int (*dev_count_resources)(struct acpi_iort_node *node);
1407        void (*dev_init_resources)(struct resource *res,
1408                                     struct acpi_iort_node *node);
1409        int (*dev_set_proximity)(struct device *dev,
1410                                    struct acpi_iort_node *node);
1411        int (*dev_add_platdata)(struct platform_device *pdev);
1412};
1413
1414static const struct iort_dev_config iort_arm_smmu_v3_cfg __initconst = {
1415        .name = "arm-smmu-v3",
1416        .dev_dma_configure = arm_smmu_v3_dma_configure,
1417        .dev_count_resources = arm_smmu_v3_count_resources,
1418        .dev_init_resources = arm_smmu_v3_init_resources,
1419        .dev_set_proximity = arm_smmu_v3_set_proximity,
1420};
1421
1422static const struct iort_dev_config iort_arm_smmu_cfg __initconst = {
1423        .name = "arm-smmu",
1424        .dev_dma_configure = arm_smmu_dma_configure,
1425        .dev_count_resources = arm_smmu_count_resources,
1426        .dev_init_resources = arm_smmu_init_resources,
1427};
1428
1429static const struct iort_dev_config iort_arm_smmu_v3_pmcg_cfg __initconst = {
1430        .name = "arm-smmu-v3-pmcg",
1431        .dev_count_resources = arm_smmu_v3_pmcg_count_resources,
1432        .dev_init_resources = arm_smmu_v3_pmcg_init_resources,
1433        .dev_add_platdata = arm_smmu_v3_pmcg_add_platdata,
1434};
1435
1436static __init const struct iort_dev_config *iort_get_dev_cfg(
1437                        struct acpi_iort_node *node)
1438{
1439        switch (node->type) {
1440        case ACPI_IORT_NODE_SMMU_V3:
1441                return &iort_arm_smmu_v3_cfg;
1442        case ACPI_IORT_NODE_SMMU:
1443                return &iort_arm_smmu_cfg;
1444        case ACPI_IORT_NODE_PMCG:
1445                return &iort_arm_smmu_v3_pmcg_cfg;
1446        default:
1447                return NULL;
1448        }
1449}
1450
1451/**
1452 * iort_add_platform_device() - Allocate a platform device for IORT node
1453 * @node: Pointer to device ACPI IORT node
1454 *
1455 * Returns: 0 on success, <0 failure
1456 */
1457static int __init iort_add_platform_device(struct acpi_iort_node *node,
1458                                           const struct iort_dev_config *ops)
1459{
1460        struct fwnode_handle *fwnode;
1461        struct platform_device *pdev;
1462        struct resource *r;
1463        int ret, count;
1464
1465        pdev = platform_device_alloc(ops->name, PLATFORM_DEVID_AUTO);
1466        if (!pdev)
1467                return -ENOMEM;
1468
1469        if (ops->dev_set_proximity) {
1470                ret = ops->dev_set_proximity(&pdev->dev, node);
1471                if (ret)
1472                        goto dev_put;
1473        }
1474
1475        count = ops->dev_count_resources(node);
1476
1477        r = kcalloc(count, sizeof(*r), GFP_KERNEL);
1478        if (!r) {
1479                ret = -ENOMEM;
1480                goto dev_put;
1481        }
1482
1483        ops->dev_init_resources(r, node);
1484
1485        ret = platform_device_add_resources(pdev, r, count);
1486        /*
1487         * Resources are duplicated in platform_device_add_resources,
1488         * free their allocated memory
1489         */
1490        kfree(r);
1491
1492        if (ret)
1493                goto dev_put;
1494
1495        /*
1496         * Platform devices based on PMCG nodes uses platform_data to
1497         * pass the hardware model info to the driver. For others, add
1498         * a copy of IORT node pointer to platform_data to be used to
1499         * retrieve IORT data information.
1500         */
1501        if (ops->dev_add_platdata)
1502                ret = ops->dev_add_platdata(pdev);
1503        else
1504                ret = platform_device_add_data(pdev, &node, sizeof(node));
1505
1506        if (ret)
1507                goto dev_put;
1508
1509        fwnode = iort_get_fwnode(node);
1510
1511        if (!fwnode) {
1512                ret = -ENODEV;
1513                goto dev_put;
1514        }
1515
1516        pdev->dev.fwnode = fwnode;
1517
1518        if (ops->dev_dma_configure)
1519                ops->dev_dma_configure(&pdev->dev, node);
1520
1521        iort_set_device_domain(&pdev->dev, node);
1522
1523        ret = platform_device_add(pdev);
1524        if (ret)
1525                goto dma_deconfigure;
1526
1527        return 0;
1528
1529dma_deconfigure:
1530        arch_teardown_dma_ops(&pdev->dev);
1531dev_put:
1532        platform_device_put(pdev);
1533
1534        return ret;
1535}
1536
1537#ifdef CONFIG_PCI
1538static void __init iort_enable_acs(struct acpi_iort_node *iort_node)
1539{
1540        static bool acs_enabled __initdata;
1541
1542        if (acs_enabled)
1543                return;
1544
1545        if (iort_node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
1546                struct acpi_iort_node *parent;
1547                struct acpi_iort_id_mapping *map;
1548                int i;
1549
1550                map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, iort_node,
1551                                   iort_node->mapping_offset);
1552
1553                for (i = 0; i < iort_node->mapping_count; i++, map++) {
1554                        if (!map->output_reference)
1555                                continue;
1556
1557                        parent = ACPI_ADD_PTR(struct acpi_iort_node,
1558                                        iort_table,  map->output_reference);
1559                        /*
1560                         * If we detect a RC->SMMU mapping, make sure
1561                         * we enable ACS on the system.
1562                         */
1563                        if ((parent->type == ACPI_IORT_NODE_SMMU) ||
1564                                (parent->type == ACPI_IORT_NODE_SMMU_V3)) {
1565                                pci_request_acs();
1566                                acs_enabled = true;
1567                                return;
1568                        }
1569                }
1570        }
1571}
1572#else
1573static inline void iort_enable_acs(struct acpi_iort_node *iort_node) { }
1574#endif
1575
1576static void __init iort_init_platform_devices(void)
1577{
1578        struct acpi_iort_node *iort_node, *iort_end;
1579        struct acpi_table_iort *iort;
1580        struct fwnode_handle *fwnode;
1581        int i, ret;
1582        const struct iort_dev_config *ops;
1583
1584        /*
1585         * iort_table and iort both point to the start of IORT table, but
1586         * have different struct types
1587         */
1588        iort = (struct acpi_table_iort *)iort_table;
1589
1590        /* Get the first IORT node */
1591        iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
1592                                 iort->node_offset);
1593        iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort,
1594                                iort_table->length);
1595
1596        for (i = 0; i < iort->node_count; i++) {
1597                if (iort_node >= iort_end) {
1598                        pr_err("iort node pointer overflows, bad table\n");
1599                        return;
1600                }
1601
1602                iort_enable_acs(iort_node);
1603
1604                ops = iort_get_dev_cfg(iort_node);
1605                if (ops) {
1606                        fwnode = acpi_alloc_fwnode_static();
1607                        if (!fwnode)
1608                                return;
1609
1610                        iort_set_fwnode(iort_node, fwnode);
1611
1612                        ret = iort_add_platform_device(iort_node, ops);
1613                        if (ret) {
1614                                iort_delete_fwnode(iort_node);
1615                                acpi_free_fwnode_static(fwnode);
1616                                return;
1617                        }
1618                }
1619
1620                iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
1621                                         iort_node->length);
1622        }
1623}
1624
1625void __init acpi_iort_init(void)
1626{
1627        acpi_status status;
1628
1629        status = acpi_get_table(ACPI_SIG_IORT, 0, &iort_table);
1630        if (ACPI_FAILURE(status)) {
1631                if (status != AE_NOT_FOUND) {
1632                        const char *msg = acpi_format_exception(status);
1633
1634                        pr_err("Failed to get table, %s\n", msg);
1635                }
1636
1637                return;
1638        }
1639
1640        iort_init_platform_devices();
1641}
1642