linux/kernel/irq/irqdomain.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2
   3#define pr_fmt(fmt)  "irq: " fmt
   4
   5#include <linux/acpi.h>
   6#include <linux/debugfs.h>
   7#include <linux/hardirq.h>
   8#include <linux/interrupt.h>
   9#include <linux/irq.h>
  10#include <linux/irqdesc.h>
  11#include <linux/irqdomain.h>
  12#include <linux/module.h>
  13#include <linux/mutex.h>
  14#include <linux/of.h>
  15#include <linux/of_address.h>
  16#include <linux/of_irq.h>
  17#include <linux/topology.h>
  18#include <linux/seq_file.h>
  19#include <linux/slab.h>
  20#include <linux/smp.h>
  21#include <linux/fs.h>
  22
  23static LIST_HEAD(irq_domain_list);
  24static DEFINE_MUTEX(irq_domain_mutex);
  25
  26static struct irq_domain *irq_default_domain;
  27
  28static void irq_domain_check_hierarchy(struct irq_domain *domain);
  29
  30struct irqchip_fwid {
  31        struct fwnode_handle    fwnode;
  32        unsigned int            type;
  33        char                    *name;
  34        void *data;
  35};
  36
  37#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
  38static void debugfs_add_domain_dir(struct irq_domain *d);
  39static void debugfs_remove_domain_dir(struct irq_domain *d);
  40#else
  41static inline void debugfs_add_domain_dir(struct irq_domain *d) { }
  42static inline void debugfs_remove_domain_dir(struct irq_domain *d) { }
  43#endif
  44
  45const struct fwnode_operations irqchip_fwnode_ops;
  46EXPORT_SYMBOL_GPL(irqchip_fwnode_ops);
  47
  48/**
  49 * irq_domain_alloc_fwnode - Allocate a fwnode_handle suitable for
  50 *                           identifying an irq domain
  51 * @type:       Type of irqchip_fwnode. See linux/irqdomain.h
  52 * @name:       Optional user provided domain name
  53 * @id:         Optional user provided id if name != NULL
  54 * @data:       Optional user-provided data
  55 *
  56 * Allocate a struct irqchip_fwid, and return a poiner to the embedded
  57 * fwnode_handle (or NULL on failure).
  58 *
  59 * Note: The types IRQCHIP_FWNODE_NAMED and IRQCHIP_FWNODE_NAMED_ID are
  60 * solely to transport name information to irqdomain creation code. The
  61 * node is not stored. For other types the pointer is kept in the irq
  62 * domain struct.
  63 */
  64struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id,
  65                                                const char *name, void *data)
  66{
  67        struct irqchip_fwid *fwid;
  68        char *n;
  69
  70        fwid = kzalloc(sizeof(*fwid), GFP_KERNEL);
  71
  72        switch (type) {
  73        case IRQCHIP_FWNODE_NAMED:
  74                n = kasprintf(GFP_KERNEL, "%s", name);
  75                break;
  76        case IRQCHIP_FWNODE_NAMED_ID:
  77                n = kasprintf(GFP_KERNEL, "%s-%d", name, id);
  78                break;
  79        default:
  80                n = kasprintf(GFP_KERNEL, "irqchip@%p", data);
  81                break;
  82        }
  83
  84        if (!fwid || !n) {
  85                kfree(fwid);
  86                kfree(n);
  87                return NULL;
  88        }
  89
  90        fwid->type = type;
  91        fwid->name = n;
  92        fwid->data = data;
  93        fwid->fwnode.ops = &irqchip_fwnode_ops;
  94        return &fwid->fwnode;
  95}
  96EXPORT_SYMBOL_GPL(__irq_domain_alloc_fwnode);
  97
  98/**
  99 * irq_domain_free_fwnode - Free a non-OF-backed fwnode_handle
 100 *
 101 * Free a fwnode_handle allocated with irq_domain_alloc_fwnode.
 102 */
 103void irq_domain_free_fwnode(struct fwnode_handle *fwnode)
 104{
 105        struct irqchip_fwid *fwid;
 106
 107        if (WARN_ON(!is_fwnode_irqchip(fwnode)))
 108                return;
 109
 110        fwid = container_of(fwnode, struct irqchip_fwid, fwnode);
 111        kfree(fwid->name);
 112        kfree(fwid);
 113}
 114EXPORT_SYMBOL_GPL(irq_domain_free_fwnode);
 115
 116/**
 117 * __irq_domain_add() - Allocate a new irq_domain data structure
 118 * @fwnode: firmware node for the interrupt controller
 119 * @size: Size of linear map; 0 for radix mapping only
 120 * @hwirq_max: Maximum number of interrupts supported by controller
 121 * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no
 122 *              direct mapping
 123 * @ops: domain callbacks
 124 * @host_data: Controller private data pointer
 125 *
 126 * Allocates and initialize and irq_domain structure.
 127 * Returns pointer to IRQ domain, or NULL on failure.
 128 */
 129struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
 130                                    irq_hw_number_t hwirq_max, int direct_max,
 131                                    const struct irq_domain_ops *ops,
 132                                    void *host_data)
 133{
 134        struct device_node *of_node = to_of_node(fwnode);
 135        struct irqchip_fwid *fwid;
 136        struct irq_domain *domain;
 137
 138        static atomic_t unknown_domains;
 139
 140        domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size),
 141                              GFP_KERNEL, of_node_to_nid(of_node));
 142        if (WARN_ON(!domain))
 143                return NULL;
 144
 145        if (fwnode && is_fwnode_irqchip(fwnode)) {
 146                fwid = container_of(fwnode, struct irqchip_fwid, fwnode);
 147
 148                switch (fwid->type) {
 149                case IRQCHIP_FWNODE_NAMED:
 150                case IRQCHIP_FWNODE_NAMED_ID:
 151                        domain->name = kstrdup(fwid->name, GFP_KERNEL);
 152                        if (!domain->name) {
 153                                kfree(domain);
 154                                return NULL;
 155                        }
 156                        domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
 157                        break;
 158                default:
 159                        domain->fwnode = fwnode;
 160                        domain->name = fwid->name;
 161                        break;
 162                }
 163#ifdef CONFIG_ACPI
 164        } else if (is_acpi_device_node(fwnode)) {
 165                struct acpi_buffer buf = {
 166                        .length = ACPI_ALLOCATE_BUFFER,
 167                };
 168                acpi_handle handle;
 169
 170                handle = acpi_device_handle(to_acpi_device_node(fwnode));
 171                if (acpi_get_name(handle, ACPI_FULL_PATHNAME, &buf) == AE_OK) {
 172                        domain->name = buf.pointer;
 173                        domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
 174                }
 175
 176                domain->fwnode = fwnode;
 177#endif
 178        } else if (of_node) {
 179                char *name;
 180
 181                /*
 182                 * DT paths contain '/', which debugfs is legitimately
 183                 * unhappy about. Replace them with ':', which does
 184                 * the trick and is not as offensive as '\'...
 185                 */
 186                name = kstrdup(of_node_full_name(of_node), GFP_KERNEL);
 187                if (!name) {
 188                        kfree(domain);
 189                        return NULL;
 190                }
 191
 192                strreplace(name, '/', ':');
 193
 194                domain->name = name;
 195                domain->fwnode = fwnode;
 196                domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
 197        }
 198
 199        if (!domain->name) {
 200                if (fwnode)
 201                        pr_err("Invalid fwnode type for irqdomain\n");
 202                domain->name = kasprintf(GFP_KERNEL, "unknown-%d",
 203                                         atomic_inc_return(&unknown_domains));
 204                if (!domain->name) {
 205                        kfree(domain);
 206                        return NULL;
 207                }
 208                domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
 209        }
 210
 211        of_node_get(of_node);
 212
 213        /* Fill structure */
 214        INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
 215        mutex_init(&domain->revmap_tree_mutex);
 216        domain->ops = ops;
 217        domain->host_data = host_data;
 218        domain->hwirq_max = hwirq_max;
 219        domain->revmap_size = size;
 220        domain->revmap_direct_max_irq = direct_max;
 221        irq_domain_check_hierarchy(domain);
 222
 223        mutex_lock(&irq_domain_mutex);
 224        debugfs_add_domain_dir(domain);
 225        list_add(&domain->link, &irq_domain_list);
 226        mutex_unlock(&irq_domain_mutex);
 227
 228        pr_debug("Added domain %s\n", domain->name);
 229        return domain;
 230}
 231EXPORT_SYMBOL_GPL(__irq_domain_add);
 232
 233/**
 234 * irq_domain_remove() - Remove an irq domain.
 235 * @domain: domain to remove
 236 *
 237 * This routine is used to remove an irq domain. The caller must ensure
 238 * that all mappings within the domain have been disposed of prior to
 239 * use, depending on the revmap type.
 240 */
 241void irq_domain_remove(struct irq_domain *domain)
 242{
 243        mutex_lock(&irq_domain_mutex);
 244        debugfs_remove_domain_dir(domain);
 245
 246        WARN_ON(!radix_tree_empty(&domain->revmap_tree));
 247
 248        list_del(&domain->link);
 249
 250        /*
 251         * If the going away domain is the default one, reset it.
 252         */
 253        if (unlikely(irq_default_domain == domain))
 254                irq_set_default_host(NULL);
 255
 256        mutex_unlock(&irq_domain_mutex);
 257
 258        pr_debug("Removed domain %s\n", domain->name);
 259
 260        of_node_put(irq_domain_get_of_node(domain));
 261        if (domain->flags & IRQ_DOMAIN_NAME_ALLOCATED)
 262                kfree(domain->name);
 263        kfree(domain);
 264}
 265EXPORT_SYMBOL_GPL(irq_domain_remove);
 266
 267void irq_domain_update_bus_token(struct irq_domain *domain,
 268                                 enum irq_domain_bus_token bus_token)
 269{
 270        char *name;
 271
 272        if (domain->bus_token == bus_token)
 273                return;
 274
 275        mutex_lock(&irq_domain_mutex);
 276
 277        domain->bus_token = bus_token;
 278
 279        name = kasprintf(GFP_KERNEL, "%s-%d", domain->name, bus_token);
 280        if (!name) {
 281                mutex_unlock(&irq_domain_mutex);
 282                return;
 283        }
 284
 285        debugfs_remove_domain_dir(domain);
 286
 287        if (domain->flags & IRQ_DOMAIN_NAME_ALLOCATED)
 288                kfree(domain->name);
 289        else
 290                domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
 291
 292        domain->name = name;
 293        debugfs_add_domain_dir(domain);
 294
 295        mutex_unlock(&irq_domain_mutex);
 296}
 297
 298/**
 299 * irq_domain_add_simple() - Register an irq_domain and optionally map a range of irqs
 300 * @of_node: pointer to interrupt controller's device tree node.
 301 * @size: total number of irqs in mapping
 302 * @first_irq: first number of irq block assigned to the domain,
 303 *      pass zero to assign irqs on-the-fly. If first_irq is non-zero, then
 304 *      pre-map all of the irqs in the domain to virqs starting at first_irq.
 305 * @ops: domain callbacks
 306 * @host_data: Controller private data pointer
 307 *
 308 * Allocates an irq_domain, and optionally if first_irq is positive then also
 309 * allocate irq_descs and map all of the hwirqs to virqs starting at first_irq.
 310 *
 311 * This is intended to implement the expected behaviour for most
 312 * interrupt controllers. If device tree is used, then first_irq will be 0 and
 313 * irqs get mapped dynamically on the fly. However, if the controller requires
 314 * static virq assignments (non-DT boot) then it will set that up correctly.
 315 */
 316struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
 317                                         unsigned int size,
 318                                         unsigned int first_irq,
 319                                         const struct irq_domain_ops *ops,
 320                                         void *host_data)
 321{
 322        struct irq_domain *domain;
 323
 324        domain = __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data);
 325        if (!domain)
 326                return NULL;
 327
 328        if (first_irq > 0) {
 329                if (IS_ENABLED(CONFIG_SPARSE_IRQ)) {
 330                        /* attempt to allocated irq_descs */
 331                        int rc = irq_alloc_descs(first_irq, first_irq, size,
 332                                                 of_node_to_nid(of_node));
 333                        if (rc < 0)
 334                                pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
 335                                        first_irq);
 336                }
 337                irq_domain_associate_many(domain, first_irq, 0, size);
 338        }
 339
 340        return domain;
 341}
 342EXPORT_SYMBOL_GPL(irq_domain_add_simple);
 343
 344/**
 345 * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain.
 346 * @of_node: pointer to interrupt controller's device tree node.
 347 * @size: total number of irqs in legacy mapping
 348 * @first_irq: first number of irq block assigned to the domain
 349 * @first_hwirq: first hwirq number to use for the translation. Should normally
 350 *               be '0', but a positive integer can be used if the effective
 351 *               hwirqs numbering does not begin at zero.
 352 * @ops: map/unmap domain callbacks
 353 * @host_data: Controller private data pointer
 354 *
 355 * Note: the map() callback will be called before this function returns
 356 * for all legacy interrupts except 0 (which is always the invalid irq for
 357 * a legacy controller).
 358 */
 359struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
 360                                         unsigned int size,
 361                                         unsigned int first_irq,
 362                                         irq_hw_number_t first_hwirq,
 363                                         const struct irq_domain_ops *ops,
 364                                         void *host_data)
 365{
 366        struct irq_domain *domain;
 367
 368        domain = __irq_domain_add(of_node_to_fwnode(of_node), first_hwirq + size,
 369                                  first_hwirq + size, 0, ops, host_data);
 370        if (domain)
 371                irq_domain_associate_many(domain, first_irq, first_hwirq, size);
 372
 373        return domain;
 374}
 375EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
 376
 377/**
 378 * irq_find_matching_fwspec() - Locates a domain for a given fwspec
 379 * @fwspec: FW specifier for an interrupt
 380 * @bus_token: domain-specific data
 381 */
 382struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
 383                                            enum irq_domain_bus_token bus_token)
 384{
 385        struct irq_domain *h, *found = NULL;
 386        struct fwnode_handle *fwnode = fwspec->fwnode;
 387        int rc;
 388
 389        /* We might want to match the legacy controller last since
 390         * it might potentially be set to match all interrupts in
 391         * the absence of a device node. This isn't a problem so far
 392         * yet though...
 393         *
 394         * bus_token == DOMAIN_BUS_ANY matches any domain, any other
 395         * values must generate an exact match for the domain to be
 396         * selected.
 397         */
 398        mutex_lock(&irq_domain_mutex);
 399        list_for_each_entry(h, &irq_domain_list, link) {
 400                if (h->ops->select && fwspec->param_count)
 401                        rc = h->ops->select(h, fwspec, bus_token);
 402                else if (h->ops->match)
 403                        rc = h->ops->match(h, to_of_node(fwnode), bus_token);
 404                else
 405                        rc = ((fwnode != NULL) && (h->fwnode == fwnode) &&
 406                              ((bus_token == DOMAIN_BUS_ANY) ||
 407                               (h->bus_token == bus_token)));
 408
 409                if (rc) {
 410                        found = h;
 411                        break;
 412                }
 413        }
 414        mutex_unlock(&irq_domain_mutex);
 415        return found;
 416}
 417EXPORT_SYMBOL_GPL(irq_find_matching_fwspec);
 418
 419/**
 420 * irq_domain_check_msi_remap - Check whether all MSI irq domains implement
 421 * IRQ remapping
 422 *
 423 * Return: false if any MSI irq domain does not support IRQ remapping,
 424 * true otherwise (including if there is no MSI irq domain)
 425 */
 426bool irq_domain_check_msi_remap(void)
 427{
 428        struct irq_domain *h;
 429        bool ret = true;
 430
 431        mutex_lock(&irq_domain_mutex);
 432        list_for_each_entry(h, &irq_domain_list, link) {
 433                if (irq_domain_is_msi(h) &&
 434                    !irq_domain_hierarchical_is_msi_remap(h)) {
 435                        ret = false;
 436                        break;
 437                }
 438        }
 439        mutex_unlock(&irq_domain_mutex);
 440        return ret;
 441}
 442EXPORT_SYMBOL_GPL(irq_domain_check_msi_remap);
 443
 444/**
 445 * irq_set_default_host() - Set a "default" irq domain
 446 * @domain: default domain pointer
 447 *
 448 * For convenience, it's possible to set a "default" domain that will be used
 449 * whenever NULL is passed to irq_create_mapping(). It makes life easier for
 450 * platforms that want to manipulate a few hard coded interrupt numbers that
 451 * aren't properly represented in the device-tree.
 452 */
 453void irq_set_default_host(struct irq_domain *domain)
 454{
 455        pr_debug("Default domain set to @0x%p\n", domain);
 456
 457        irq_default_domain = domain;
 458}
 459EXPORT_SYMBOL_GPL(irq_set_default_host);
 460
 461static void irq_domain_clear_mapping(struct irq_domain *domain,
 462                                     irq_hw_number_t hwirq)
 463{
 464        if (hwirq < domain->revmap_size) {
 465                domain->linear_revmap[hwirq] = 0;
 466        } else {
 467                mutex_lock(&domain->revmap_tree_mutex);
 468                radix_tree_delete(&domain->revmap_tree, hwirq);
 469                mutex_unlock(&domain->revmap_tree_mutex);
 470        }
 471}
 472
 473static void irq_domain_set_mapping(struct irq_domain *domain,
 474                                   irq_hw_number_t hwirq,
 475                                   struct irq_data *irq_data)
 476{
 477        if (hwirq < domain->revmap_size) {
 478                domain->linear_revmap[hwirq] = irq_data->irq;
 479        } else {
 480                mutex_lock(&domain->revmap_tree_mutex);
 481                radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
 482                mutex_unlock(&domain->revmap_tree_mutex);
 483        }
 484}
 485
 486void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
 487{
 488        struct irq_data *irq_data = irq_get_irq_data(irq);
 489        irq_hw_number_t hwirq;
 490
 491        if (WARN(!irq_data || irq_data->domain != domain,
 492                 "virq%i doesn't exist; cannot disassociate\n", irq))
 493                return;
 494
 495        hwirq = irq_data->hwirq;
 496        irq_set_status_flags(irq, IRQ_NOREQUEST);
 497
 498        /* remove chip and handler */
 499        irq_set_chip_and_handler(irq, NULL, NULL);
 500
 501        /* Make sure it's completed */
 502        synchronize_irq(irq);
 503
 504        /* Tell the PIC about it */
 505        if (domain->ops->unmap)
 506                domain->ops->unmap(domain, irq);
 507        smp_mb();
 508
 509        irq_data->domain = NULL;
 510        irq_data->hwirq = 0;
 511        domain->mapcount--;
 512
 513        /* Clear reverse map for this hwirq */
 514        irq_domain_clear_mapping(domain, hwirq);
 515}
 516
 517int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
 518                         irq_hw_number_t hwirq)
 519{
 520        struct irq_data *irq_data = irq_get_irq_data(virq);
 521        int ret;
 522
 523        if (WARN(hwirq >= domain->hwirq_max,
 524                 "error: hwirq 0x%x is too large for %s\n", (int)hwirq, domain->name))
 525                return -EINVAL;
 526        if (WARN(!irq_data, "error: virq%i is not allocated", virq))
 527                return -EINVAL;
 528        if (WARN(irq_data->domain, "error: virq%i is already associated", virq))
 529                return -EINVAL;
 530
 531        mutex_lock(&irq_domain_mutex);
 532        irq_data->hwirq = hwirq;
 533        irq_data->domain = domain;
 534        if (domain->ops->map) {
 535                ret = domain->ops->map(domain, virq, hwirq);
 536                if (ret != 0) {
 537                        /*
 538                         * If map() returns -EPERM, this interrupt is protected
 539                         * by the firmware or some other service and shall not
 540                         * be mapped. Don't bother telling the user about it.
 541                         */
 542                        if (ret != -EPERM) {
 543                                pr_info("%s didn't like hwirq-0x%lx to VIRQ%i mapping (rc=%d)\n",
 544                                       domain->name, hwirq, virq, ret);
 545                        }
 546                        irq_data->domain = NULL;
 547                        irq_data->hwirq = 0;
 548                        mutex_unlock(&irq_domain_mutex);
 549                        return ret;
 550                }
 551
 552                /* If not already assigned, give the domain the chip's name */
 553                if (!domain->name && irq_data->chip)
 554                        domain->name = irq_data->chip->name;
 555        }
 556
 557        domain->mapcount++;
 558        irq_domain_set_mapping(domain, hwirq, irq_data);
 559        mutex_unlock(&irq_domain_mutex);
 560
 561        irq_clear_status_flags(virq, IRQ_NOREQUEST);
 562
 563        return 0;
 564}
 565EXPORT_SYMBOL_GPL(irq_domain_associate);
 566
 567void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
 568                               irq_hw_number_t hwirq_base, int count)
 569{
 570        struct device_node *of_node;
 571        int i;
 572
 573        of_node = irq_domain_get_of_node(domain);
 574        pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__,
 575                of_node_full_name(of_node), irq_base, (int)hwirq_base, count);
 576
 577        for (i = 0; i < count; i++) {
 578                irq_domain_associate(domain, irq_base + i, hwirq_base + i);
 579        }
 580}
 581EXPORT_SYMBOL_GPL(irq_domain_associate_many);
 582
 583/**
 584 * irq_create_direct_mapping() - Allocate an irq for direct mapping
 585 * @domain: domain to allocate the irq for or NULL for default domain
 586 *
 587 * This routine is used for irq controllers which can choose the hardware
 588 * interrupt numbers they generate. In such a case it's simplest to use
 589 * the linux irq as the hardware interrupt number. It still uses the linear
 590 * or radix tree to store the mapping, but the irq controller can optimize
 591 * the revmap path by using the hwirq directly.
 592 */
 593unsigned int irq_create_direct_mapping(struct irq_domain *domain)
 594{
 595        struct device_node *of_node;
 596        unsigned int virq;
 597
 598        if (domain == NULL)
 599                domain = irq_default_domain;
 600
 601        of_node = irq_domain_get_of_node(domain);
 602        virq = irq_alloc_desc_from(1, of_node_to_nid(of_node));
 603        if (!virq) {
 604                pr_debug("create_direct virq allocation failed\n");
 605                return 0;
 606        }
 607        if (virq >= domain->revmap_direct_max_irq) {
 608                pr_err("ERROR: no free irqs available below %i maximum\n",
 609                        domain->revmap_direct_max_irq);
 610                irq_free_desc(virq);
 611                return 0;
 612        }
 613        pr_debug("create_direct obtained virq %d\n", virq);
 614
 615        if (irq_domain_associate(domain, virq, virq)) {
 616                irq_free_desc(virq);
 617                return 0;
 618        }
 619
 620        return virq;
 621}
 622EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
 623
 624/**
 625 * irq_create_mapping() - Map a hardware interrupt into linux irq space
 626 * @domain: domain owning this hardware interrupt or NULL for default domain
 627 * @hwirq: hardware irq number in that domain space
 628 *
 629 * Only one mapping per hardware interrupt is permitted. Returns a linux
 630 * irq number.
 631 * If the sense/trigger is to be specified, set_irq_type() should be called
 632 * on the number returned from that call.
 633 */
 634unsigned int irq_create_mapping(struct irq_domain *domain,
 635                                irq_hw_number_t hwirq)
 636{
 637        struct device_node *of_node;
 638        int virq;
 639
 640        pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
 641
 642        /* Look for default domain if nececssary */
 643        if (domain == NULL)
 644                domain = irq_default_domain;
 645        if (domain == NULL) {
 646                WARN(1, "%s(, %lx) called with NULL domain\n", __func__, hwirq);
 647                return 0;
 648        }
 649        pr_debug("-> using domain @%p\n", domain);
 650
 651        of_node = irq_domain_get_of_node(domain);
 652
 653        /* Check if mapping already exists */
 654        virq = irq_find_mapping(domain, hwirq);
 655        if (virq) {
 656                pr_debug("-> existing mapping on virq %d\n", virq);
 657                return virq;
 658        }
 659
 660        /* Allocate a virtual interrupt number */
 661        virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node), NULL);
 662        if (virq <= 0) {
 663                pr_debug("-> virq allocation failed\n");
 664                return 0;
 665        }
 666
 667        if (irq_domain_associate(domain, virq, hwirq)) {
 668                irq_free_desc(virq);
 669                return 0;
 670        }
 671
 672        pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
 673                hwirq, of_node_full_name(of_node), virq);
 674
 675        return virq;
 676}
 677EXPORT_SYMBOL_GPL(irq_create_mapping);
 678
 679/**
 680 * irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs
 681 * @domain: domain owning the interrupt range
 682 * @irq_base: beginning of linux IRQ range
 683 * @hwirq_base: beginning of hardware IRQ range
 684 * @count: Number of interrupts to map
 685 *
 686 * This routine is used for allocating and mapping a range of hardware
 687 * irqs to linux irqs where the linux irq numbers are at pre-defined
 688 * locations. For use by controllers that already have static mappings
 689 * to insert in to the domain.
 690 *
 691 * Non-linear users can use irq_create_identity_mapping() for IRQ-at-a-time
 692 * domain insertion.
 693 *
 694 * 0 is returned upon success, while any failure to establish a static
 695 * mapping is treated as an error.
 696 */
 697int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base,
 698                               irq_hw_number_t hwirq_base, int count)
 699{
 700        struct device_node *of_node;
 701        int ret;
 702
 703        of_node = irq_domain_get_of_node(domain);
 704        ret = irq_alloc_descs(irq_base, irq_base, count,
 705                              of_node_to_nid(of_node));
 706        if (unlikely(ret < 0))
 707                return ret;
 708
 709        irq_domain_associate_many(domain, irq_base, hwirq_base, count);
 710        return 0;
 711}
 712EXPORT_SYMBOL_GPL(irq_create_strict_mappings);
 713
 714static int irq_domain_translate(struct irq_domain *d,
 715                                struct irq_fwspec *fwspec,
 716                                irq_hw_number_t *hwirq, unsigned int *type)
 717{
 718#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
 719        if (d->ops->translate)
 720                return d->ops->translate(d, fwspec, hwirq, type);
 721#endif
 722        if (d->ops->xlate)
 723                return d->ops->xlate(d, to_of_node(fwspec->fwnode),
 724                                     fwspec->param, fwspec->param_count,
 725                                     hwirq, type);
 726
 727        /* If domain has no translation, then we assume interrupt line */
 728        *hwirq = fwspec->param[0];
 729        return 0;
 730}
 731
 732static void of_phandle_args_to_fwspec(struct of_phandle_args *irq_data,
 733                                      struct irq_fwspec *fwspec)
 734{
 735        int i;
 736
 737        fwspec->fwnode = irq_data->np ? &irq_data->np->fwnode : NULL;
 738        fwspec->param_count = irq_data->args_count;
 739
 740        for (i = 0; i < irq_data->args_count; i++)
 741                fwspec->param[i] = irq_data->args[i];
 742}
 743
 744unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
 745{
 746        struct irq_domain *domain;
 747        struct irq_data *irq_data;
 748        irq_hw_number_t hwirq;
 749        unsigned int type = IRQ_TYPE_NONE;
 750        int virq;
 751
 752        if (fwspec->fwnode) {
 753                domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_WIRED);
 754                if (!domain)
 755                        domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_ANY);
 756        } else {
 757                domain = irq_default_domain;
 758        }
 759
 760        if (!domain) {
 761                pr_warn("no irq domain found for %s !\n",
 762                        of_node_full_name(to_of_node(fwspec->fwnode)));
 763                return 0;
 764        }
 765
 766        if (irq_domain_translate(domain, fwspec, &hwirq, &type))
 767                return 0;
 768
 769        /*
 770         * WARN if the irqchip returns a type with bits
 771         * outside the sense mask set and clear these bits.
 772         */
 773        if (WARN_ON(type & ~IRQ_TYPE_SENSE_MASK))
 774                type &= IRQ_TYPE_SENSE_MASK;
 775
 776        /*
 777         * If we've already configured this interrupt,
 778         * don't do it again, or hell will break loose.
 779         */
 780        virq = irq_find_mapping(domain, hwirq);
 781        if (virq) {
 782                /*
 783                 * If the trigger type is not specified or matches the
 784                 * current trigger type then we are done so return the
 785                 * interrupt number.
 786                 */
 787                if (type == IRQ_TYPE_NONE || type == irq_get_trigger_type(virq))
 788                        return virq;
 789
 790                /*
 791                 * If the trigger type has not been set yet, then set
 792                 * it now and return the interrupt number.
 793                 */
 794                if (irq_get_trigger_type(virq) == IRQ_TYPE_NONE) {
 795                        irq_data = irq_get_irq_data(virq);
 796                        if (!irq_data)
 797                                return 0;
 798
 799                        irqd_set_trigger_type(irq_data, type);
 800                        return virq;
 801                }
 802
 803                pr_warn("type mismatch, failed to map hwirq-%lu for %s!\n",
 804                        hwirq, of_node_full_name(to_of_node(fwspec->fwnode)));
 805                return 0;
 806        }
 807
 808        if (irq_domain_is_hierarchy(domain)) {
 809                virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, fwspec);
 810                if (virq <= 0)
 811                        return 0;
 812        } else {
 813                /* Create mapping */
 814                virq = irq_create_mapping(domain, hwirq);
 815                if (!virq)
 816                        return virq;
 817        }
 818
 819        irq_data = irq_get_irq_data(virq);
 820        if (!irq_data) {
 821                if (irq_domain_is_hierarchy(domain))
 822                        irq_domain_free_irqs(virq, 1);
 823                else
 824                        irq_dispose_mapping(virq);
 825                return 0;
 826        }
 827
 828        /* Store trigger type */
 829        irqd_set_trigger_type(irq_data, type);
 830
 831        return virq;
 832}
 833EXPORT_SYMBOL_GPL(irq_create_fwspec_mapping);
 834
 835unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data)
 836{
 837        struct irq_fwspec fwspec;
 838
 839        of_phandle_args_to_fwspec(irq_data, &fwspec);
 840        return irq_create_fwspec_mapping(&fwspec);
 841}
 842EXPORT_SYMBOL_GPL(irq_create_of_mapping);
 843
 844/**
 845 * irq_dispose_mapping() - Unmap an interrupt
 846 * @virq: linux irq number of the interrupt to unmap
 847 */
 848void irq_dispose_mapping(unsigned int virq)
 849{
 850        struct irq_data *irq_data = irq_get_irq_data(virq);
 851        struct irq_domain *domain;
 852
 853        if (!virq || !irq_data)
 854                return;
 855
 856        domain = irq_data->domain;
 857        if (WARN_ON(domain == NULL))
 858                return;
 859
 860        if (irq_domain_is_hierarchy(domain)) {
 861                irq_domain_free_irqs(virq, 1);
 862        } else {
 863                irq_domain_disassociate(domain, virq);
 864                irq_free_desc(virq);
 865        }
 866}
 867EXPORT_SYMBOL_GPL(irq_dispose_mapping);
 868
 869/**
 870 * irq_find_mapping() - Find a linux irq from an hw irq number.
 871 * @domain: domain owning this hardware interrupt
 872 * @hwirq: hardware irq number in that domain space
 873 */
 874unsigned int irq_find_mapping(struct irq_domain *domain,
 875                              irq_hw_number_t hwirq)
 876{
 877        struct irq_data *data;
 878
 879        /* Look for default domain if nececssary */
 880        if (domain == NULL)
 881                domain = irq_default_domain;
 882        if (domain == NULL)
 883                return 0;
 884
 885        if (hwirq < domain->revmap_direct_max_irq) {
 886                data = irq_domain_get_irq_data(domain, hwirq);
 887                if (data && data->hwirq == hwirq)
 888                        return hwirq;
 889        }
 890
 891        /* Check if the hwirq is in the linear revmap. */
 892        if (hwirq < domain->revmap_size)
 893                return domain->linear_revmap[hwirq];
 894
 895        rcu_read_lock();
 896        data = radix_tree_lookup(&domain->revmap_tree, hwirq);
 897        rcu_read_unlock();
 898        return data ? data->irq : 0;
 899}
 900EXPORT_SYMBOL_GPL(irq_find_mapping);
 901
 902/**
 903 * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings
 904 *
 905 * Device Tree IRQ specifier translation function which works with one cell
 906 * bindings where the cell value maps directly to the hwirq number.
 907 */
 908int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr,
 909                             const u32 *intspec, unsigned int intsize,
 910                             unsigned long *out_hwirq, unsigned int *out_type)
 911{
 912        if (WARN_ON(intsize < 1))
 913                return -EINVAL;
 914        *out_hwirq = intspec[0];
 915        *out_type = IRQ_TYPE_NONE;
 916        return 0;
 917}
 918EXPORT_SYMBOL_GPL(irq_domain_xlate_onecell);
 919
 920/**
 921 * irq_domain_xlate_twocell() - Generic xlate for direct two cell bindings
 922 *
 923 * Device Tree IRQ specifier translation function which works with two cell
 924 * bindings where the cell values map directly to the hwirq number
 925 * and linux irq flags.
 926 */
 927int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr,
 928                        const u32 *intspec, unsigned int intsize,
 929                        irq_hw_number_t *out_hwirq, unsigned int *out_type)
 930{
 931        if (WARN_ON(intsize < 2))
 932                return -EINVAL;
 933        *out_hwirq = intspec[0];
 934        *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
 935        return 0;
 936}
 937EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell);
 938
 939/**
 940 * irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings
 941 *
 942 * Device Tree IRQ specifier translation function which works with either one
 943 * or two cell bindings where the cell values map directly to the hwirq number
 944 * and linux irq flags.
 945 *
 946 * Note: don't use this function unless your interrupt controller explicitly
 947 * supports both one and two cell bindings.  For the majority of controllers
 948 * the _onecell() or _twocell() variants above should be used.
 949 */
 950int irq_domain_xlate_onetwocell(struct irq_domain *d,
 951                                struct device_node *ctrlr,
 952                                const u32 *intspec, unsigned int intsize,
 953                                unsigned long *out_hwirq, unsigned int *out_type)
 954{
 955        if (WARN_ON(intsize < 1))
 956                return -EINVAL;
 957        *out_hwirq = intspec[0];
 958        if (intsize > 1)
 959                *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
 960        else
 961                *out_type = IRQ_TYPE_NONE;
 962        return 0;
 963}
 964EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell);
 965
 966const struct irq_domain_ops irq_domain_simple_ops = {
 967        .xlate = irq_domain_xlate_onetwocell,
 968};
 969EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
 970
 971int irq_domain_alloc_descs(int virq, unsigned int cnt, irq_hw_number_t hwirq,
 972                           int node, const struct cpumask *affinity)
 973{
 974        unsigned int hint;
 975
 976        if (virq >= 0) {
 977                virq = __irq_alloc_descs(virq, virq, cnt, node, THIS_MODULE,
 978                                         affinity);
 979        } else {
 980                hint = hwirq % nr_irqs;
 981                if (hint == 0)
 982                        hint++;
 983                virq = __irq_alloc_descs(-1, hint, cnt, node, THIS_MODULE,
 984                                         affinity);
 985                if (virq <= 0 && hint > 1) {
 986                        virq = __irq_alloc_descs(-1, 1, cnt, node, THIS_MODULE,
 987                                                 affinity);
 988                }
 989        }
 990
 991        return virq;
 992}
 993
 994#ifdef  CONFIG_IRQ_DOMAIN_HIERARCHY
 995/**
 996 * irq_domain_create_hierarchy - Add a irqdomain into the hierarchy
 997 * @parent:     Parent irq domain to associate with the new domain
 998 * @flags:      Irq domain flags associated to the domain
 999 * @size:       Size of the domain. See below
1000 * @fwnode:     Optional fwnode of the interrupt controller
1001 * @ops:        Pointer to the interrupt domain callbacks
1002 * @host_data:  Controller private data pointer
1003 *
1004 * If @size is 0 a tree domain is created, otherwise a linear domain.
1005 *
1006 * If successful the parent is associated to the new domain and the
1007 * domain flags are set.
1008 * Returns pointer to IRQ domain, or NULL on failure.
1009 */
1010struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent,
1011                                            unsigned int flags,
1012                                            unsigned int size,
1013                                            struct fwnode_handle *fwnode,
1014                                            const struct irq_domain_ops *ops,
1015                                            void *host_data)
1016{
1017        struct irq_domain *domain;
1018
1019        if (size)
1020                domain = irq_domain_create_linear(fwnode, size, ops, host_data);
1021        else
1022                domain = irq_domain_create_tree(fwnode, ops, host_data);
1023        if (domain) {
1024                domain->parent = parent;
1025                domain->flags |= flags;
1026        }
1027
1028        return domain;
1029}
1030EXPORT_SYMBOL_GPL(irq_domain_create_hierarchy);
1031
1032static void irq_domain_insert_irq(int virq)
1033{
1034        struct irq_data *data;
1035
1036        for (data = irq_get_irq_data(virq); data; data = data->parent_data) {
1037                struct irq_domain *domain = data->domain;
1038
1039                domain->mapcount++;
1040                irq_domain_set_mapping(domain, data->hwirq, data);
1041
1042                /* If not already assigned, give the domain the chip's name */
1043                if (!domain->name && data->chip)
1044                        domain->name = data->chip->name;
1045        }
1046
1047        irq_clear_status_flags(virq, IRQ_NOREQUEST);
1048}
1049
1050static void irq_domain_remove_irq(int virq)
1051{
1052        struct irq_data *data;
1053
1054        irq_set_status_flags(virq, IRQ_NOREQUEST);
1055        irq_set_chip_and_handler(virq, NULL, NULL);
1056        synchronize_irq(virq);
1057        smp_mb();
1058
1059        for (data = irq_get_irq_data(virq); data; data = data->parent_data) {
1060                struct irq_domain *domain = data->domain;
1061                irq_hw_number_t hwirq = data->hwirq;
1062
1063                domain->mapcount--;
1064                irq_domain_clear_mapping(domain, hwirq);
1065        }
1066}
1067
1068static struct irq_data *irq_domain_insert_irq_data(struct irq_domain *domain,
1069                                                   struct irq_data *child)
1070{
1071        struct irq_data *irq_data;
1072
1073        irq_data = kzalloc_node(sizeof(*irq_data), GFP_KERNEL,
1074                                irq_data_get_node(child));
1075        if (irq_data) {
1076                child->parent_data = irq_data;
1077                irq_data->irq = child->irq;
1078                irq_data->common = child->common;
1079                irq_data->domain = domain;
1080        }
1081
1082        return irq_data;
1083}
1084
1085static void irq_domain_free_irq_data(unsigned int virq, unsigned int nr_irqs)
1086{
1087        struct irq_data *irq_data, *tmp;
1088        int i;
1089
1090        for (i = 0; i < nr_irqs; i++) {
1091                irq_data = irq_get_irq_data(virq + i);
1092                tmp = irq_data->parent_data;
1093                irq_data->parent_data = NULL;
1094                irq_data->domain = NULL;
1095
1096                while (tmp) {
1097                        irq_data = tmp;
1098                        tmp = tmp->parent_data;
1099                        kfree(irq_data);
1100                }
1101        }
1102}
1103
1104static int irq_domain_alloc_irq_data(struct irq_domain *domain,
1105                                     unsigned int virq, unsigned int nr_irqs)
1106{
1107        struct irq_data *irq_data;
1108        struct irq_domain *parent;
1109        int i;
1110
1111        /* The outermost irq_data is embedded in struct irq_desc */
1112        for (i = 0; i < nr_irqs; i++) {
1113                irq_data = irq_get_irq_data(virq + i);
1114                irq_data->domain = domain;
1115
1116                for (parent = domain->parent; parent; parent = parent->parent) {
1117                        irq_data = irq_domain_insert_irq_data(parent, irq_data);
1118                        if (!irq_data) {
1119                                irq_domain_free_irq_data(virq, i + 1);
1120                                return -ENOMEM;
1121                        }
1122                }
1123        }
1124
1125        return 0;
1126}
1127
1128/**
1129 * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain
1130 * @domain:     domain to match
1131 * @virq:       IRQ number to get irq_data
1132 */
1133struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
1134                                         unsigned int virq)
1135{
1136        struct irq_data *irq_data;
1137
1138        for (irq_data = irq_get_irq_data(virq); irq_data;
1139             irq_data = irq_data->parent_data)
1140                if (irq_data->domain == domain)
1141                        return irq_data;
1142
1143        return NULL;
1144}
1145EXPORT_SYMBOL_GPL(irq_domain_get_irq_data);
1146
1147/**
1148 * irq_domain_set_hwirq_and_chip - Set hwirq and irqchip of @virq at @domain
1149 * @domain:     Interrupt domain to match
1150 * @virq:       IRQ number
1151 * @hwirq:      The hwirq number
1152 * @chip:       The associated interrupt chip
1153 * @chip_data:  The associated chip data
1154 */
1155int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq,
1156                                  irq_hw_number_t hwirq, struct irq_chip *chip,
1157                                  void *chip_data)
1158{
1159        struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq);
1160
1161        if (!irq_data)
1162                return -ENOENT;
1163
1164        irq_data->hwirq = hwirq;
1165        irq_data->chip = chip ? chip : &no_irq_chip;
1166        irq_data->chip_data = chip_data;
1167
1168        return 0;
1169}
1170EXPORT_SYMBOL_GPL(irq_domain_set_hwirq_and_chip);
1171
1172/**
1173 * irq_domain_set_info - Set the complete data for a @virq in @domain
1174 * @domain:             Interrupt domain to match
1175 * @virq:               IRQ number
1176 * @hwirq:              The hardware interrupt number
1177 * @chip:               The associated interrupt chip
1178 * @chip_data:          The associated interrupt chip data
1179 * @handler:            The interrupt flow handler
1180 * @handler_data:       The interrupt flow handler data
1181 * @handler_name:       The interrupt handler name
1182 */
1183void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
1184                         irq_hw_number_t hwirq, struct irq_chip *chip,
1185                         void *chip_data, irq_flow_handler_t handler,
1186                         void *handler_data, const char *handler_name)
1187{
1188        irq_domain_set_hwirq_and_chip(domain, virq, hwirq, chip, chip_data);
1189        __irq_set_handler(virq, handler, 0, handler_name);
1190        irq_set_handler_data(virq, handler_data);
1191}
1192EXPORT_SYMBOL(irq_domain_set_info);
1193
1194/**
1195 * irq_domain_reset_irq_data - Clear hwirq, chip and chip_data in @irq_data
1196 * @irq_data:   The pointer to irq_data
1197 */
1198void irq_domain_reset_irq_data(struct irq_data *irq_data)
1199{
1200        irq_data->hwirq = 0;
1201        irq_data->chip = &no_irq_chip;
1202        irq_data->chip_data = NULL;
1203}
1204EXPORT_SYMBOL_GPL(irq_domain_reset_irq_data);
1205
1206/**
1207 * irq_domain_free_irqs_common - Clear irq_data and free the parent
1208 * @domain:     Interrupt domain to match
1209 * @virq:       IRQ number to start with
1210 * @nr_irqs:    The number of irqs to free
1211 */
1212void irq_domain_free_irqs_common(struct irq_domain *domain, unsigned int virq,
1213                                 unsigned int nr_irqs)
1214{
1215        struct irq_data *irq_data;
1216        int i;
1217
1218        for (i = 0; i < nr_irqs; i++) {
1219                irq_data = irq_domain_get_irq_data(domain, virq + i);
1220                if (irq_data)
1221                        irq_domain_reset_irq_data(irq_data);
1222        }
1223        irq_domain_free_irqs_parent(domain, virq, nr_irqs);
1224}
1225EXPORT_SYMBOL_GPL(irq_domain_free_irqs_common);
1226
1227/**
1228 * irq_domain_free_irqs_top - Clear handler and handler data, clear irqdata and free parent
1229 * @domain:     Interrupt domain to match
1230 * @virq:       IRQ number to start with
1231 * @nr_irqs:    The number of irqs to free
1232 */
1233void irq_domain_free_irqs_top(struct irq_domain *domain, unsigned int virq,
1234                              unsigned int nr_irqs)
1235{
1236        int i;
1237
1238        for (i = 0; i < nr_irqs; i++) {
1239                irq_set_handler_data(virq + i, NULL);
1240                irq_set_handler(virq + i, NULL);
1241        }
1242        irq_domain_free_irqs_common(domain, virq, nr_irqs);
1243}
1244
1245static void irq_domain_free_irqs_hierarchy(struct irq_domain *domain,
1246                                           unsigned int irq_base,
1247                                           unsigned int nr_irqs)
1248{
1249        if (domain->ops->free)
1250                domain->ops->free(domain, irq_base, nr_irqs);
1251}
1252
1253int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
1254                                    unsigned int irq_base,
1255                                    unsigned int nr_irqs, void *arg)
1256{
1257        return domain->ops->alloc(domain, irq_base, nr_irqs, arg);
1258}
1259
1260/**
1261 * __irq_domain_alloc_irqs - Allocate IRQs from domain
1262 * @domain:     domain to allocate from
1263 * @irq_base:   allocate specified IRQ nubmer if irq_base >= 0
1264 * @nr_irqs:    number of IRQs to allocate
1265 * @node:       NUMA node id for memory allocation
1266 * @arg:        domain specific argument
1267 * @realloc:    IRQ descriptors have already been allocated if true
1268 * @affinity:   Optional irq affinity mask for multiqueue devices
1269 *
1270 * Allocate IRQ numbers and initialized all data structures to support
1271 * hierarchy IRQ domains.
1272 * Parameter @realloc is mainly to support legacy IRQs.
1273 * Returns error code or allocated IRQ number
1274 *
1275 * The whole process to setup an IRQ has been split into two steps.
1276 * The first step, __irq_domain_alloc_irqs(), is to allocate IRQ
1277 * descriptor and required hardware resources. The second step,
1278 * irq_domain_activate_irq(), is to program hardwares with preallocated
1279 * resources. In this way, it's easier to rollback when failing to
1280 * allocate resources.
1281 */
1282int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
1283                            unsigned int nr_irqs, int node, void *arg,
1284                            bool realloc, const struct cpumask *affinity)
1285{
1286        int i, ret, virq;
1287
1288        if (domain == NULL) {
1289                domain = irq_default_domain;
1290                if (WARN(!domain, "domain is NULL; cannot allocate IRQ\n"))
1291                        return -EINVAL;
1292        }
1293
1294        if (!domain->ops->alloc) {
1295                pr_debug("domain->ops->alloc() is NULL\n");
1296                return -ENOSYS;
1297        }
1298
1299        if (realloc && irq_base >= 0) {
1300                virq = irq_base;
1301        } else {
1302                virq = irq_domain_alloc_descs(irq_base, nr_irqs, 0, node,
1303                                              affinity);
1304                if (virq < 0) {
1305                        pr_debug("cannot allocate IRQ(base %d, count %d)\n",
1306                                 irq_base, nr_irqs);
1307                        return virq;
1308                }
1309        }
1310
1311        if (irq_domain_alloc_irq_data(domain, virq, nr_irqs)) {
1312                pr_debug("cannot allocate memory for IRQ%d\n", virq);
1313                ret = -ENOMEM;
1314                goto out_free_desc;
1315        }
1316
1317        mutex_lock(&irq_domain_mutex);
1318        ret = irq_domain_alloc_irqs_hierarchy(domain, virq, nr_irqs, arg);
1319        if (ret < 0) {
1320                mutex_unlock(&irq_domain_mutex);
1321                goto out_free_irq_data;
1322        }
1323        for (i = 0; i < nr_irqs; i++)
1324                irq_domain_insert_irq(virq + i);
1325        mutex_unlock(&irq_domain_mutex);
1326
1327        return virq;
1328
1329out_free_irq_data:
1330        irq_domain_free_irq_data(virq, nr_irqs);
1331out_free_desc:
1332        irq_free_descs(virq, nr_irqs);
1333        return ret;
1334}
1335
1336/* The irq_data was moved, fix the revmap to refer to the new location */
1337static void irq_domain_fix_revmap(struct irq_data *d)
1338{
1339        void __rcu **slot;
1340
1341        if (d->hwirq < d->domain->revmap_size)
1342                return; /* Not using radix tree. */
1343
1344        /* Fix up the revmap. */
1345        mutex_lock(&d->domain->revmap_tree_mutex);
1346        slot = radix_tree_lookup_slot(&d->domain->revmap_tree, d->hwirq);
1347        if (slot)
1348                radix_tree_replace_slot(&d->domain->revmap_tree, slot, d);
1349        mutex_unlock(&d->domain->revmap_tree_mutex);
1350}
1351
1352/**
1353 * irq_domain_push_irq() - Push a domain in to the top of a hierarchy.
1354 * @domain:     Domain to push.
1355 * @virq:       Irq to push the domain in to.
1356 * @arg:        Passed to the irq_domain_ops alloc() function.
1357 *
1358 * For an already existing irqdomain hierarchy, as might be obtained
1359 * via a call to pci_enable_msix(), add an additional domain to the
1360 * head of the processing chain.  Must be called before request_irq()
1361 * has been called.
1362 */
1363int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg)
1364{
1365        struct irq_data *child_irq_data;
1366        struct irq_data *root_irq_data = irq_get_irq_data(virq);
1367        struct irq_desc *desc;
1368        int rv = 0;
1369
1370        /*
1371         * Check that no action has been set, which indicates the virq
1372         * is in a state where this function doesn't have to deal with
1373         * races between interrupt handling and maintaining the
1374         * hierarchy.  This will catch gross misuse.  Attempting to
1375         * make the check race free would require holding locks across
1376         * calls to struct irq_domain_ops->alloc(), which could lead
1377         * to deadlock, so we just do a simple check before starting.
1378         */
1379        desc = irq_to_desc(virq);
1380        if (!desc)
1381                return -EINVAL;
1382        if (WARN_ON(desc->action))
1383                return -EBUSY;
1384
1385        if (domain == NULL)
1386                return -EINVAL;
1387
1388        if (WARN_ON(!irq_domain_is_hierarchy(domain)))
1389                return -EINVAL;
1390
1391        if (!root_irq_data)
1392                return -EINVAL;
1393
1394        if (domain->parent != root_irq_data->domain)
1395                return -EINVAL;
1396
1397        child_irq_data = kzalloc_node(sizeof(*child_irq_data), GFP_KERNEL,
1398                                      irq_data_get_node(root_irq_data));
1399        if (!child_irq_data)
1400                return -ENOMEM;
1401
1402        mutex_lock(&irq_domain_mutex);
1403
1404        /* Copy the original irq_data. */
1405        *child_irq_data = *root_irq_data;
1406
1407        /*
1408         * Overwrite the root_irq_data, which is embedded in struct
1409         * irq_desc, with values for this domain.
1410         */
1411        root_irq_data->parent_data = child_irq_data;
1412        root_irq_data->domain = domain;
1413        root_irq_data->mask = 0;
1414        root_irq_data->hwirq = 0;
1415        root_irq_data->chip = NULL;
1416        root_irq_data->chip_data = NULL;
1417
1418        /* May (probably does) set hwirq, chip, etc. */
1419        rv = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg);
1420        if (rv) {
1421                /* Restore the original irq_data. */
1422                *root_irq_data = *child_irq_data;
1423                goto error;
1424        }
1425
1426        irq_domain_fix_revmap(child_irq_data);
1427        irq_domain_set_mapping(domain, root_irq_data->hwirq, root_irq_data);
1428
1429error:
1430        mutex_unlock(&irq_domain_mutex);
1431
1432        return rv;
1433}
1434EXPORT_SYMBOL_GPL(irq_domain_push_irq);
1435
1436/**
1437 * irq_domain_pop_irq() - Remove a domain from the top of a hierarchy.
1438 * @domain:     Domain to remove.
1439 * @virq:       Irq to remove the domain from.
1440 *
1441 * Undo the effects of a call to irq_domain_push_irq().  Must be
1442 * called either before request_irq() or after free_irq().
1443 */
1444int irq_domain_pop_irq(struct irq_domain *domain, int virq)
1445{
1446        struct irq_data *root_irq_data = irq_get_irq_data(virq);
1447        struct irq_data *child_irq_data;
1448        struct irq_data *tmp_irq_data;
1449        struct irq_desc *desc;
1450
1451        /*
1452         * Check that no action is set, which indicates the virq is in
1453         * a state where this function doesn't have to deal with races
1454         * between interrupt handling and maintaining the hierarchy.
1455         * This will catch gross misuse.  Attempting to make the check
1456         * race free would require holding locks across calls to
1457         * struct irq_domain_ops->free(), which could lead to
1458         * deadlock, so we just do a simple check before starting.
1459         */
1460        desc = irq_to_desc(virq);
1461        if (!desc)
1462                return -EINVAL;
1463        if (WARN_ON(desc->action))
1464                return -EBUSY;
1465
1466        if (domain == NULL)
1467                return -EINVAL;
1468
1469        if (!root_irq_data)
1470                return -EINVAL;
1471
1472        tmp_irq_data = irq_domain_get_irq_data(domain, virq);
1473
1474        /* We can only "pop" if this domain is at the top of the list */
1475        if (WARN_ON(root_irq_data != tmp_irq_data))
1476                return -EINVAL;
1477
1478        if (WARN_ON(root_irq_data->domain != domain))
1479                return -EINVAL;
1480
1481        child_irq_data = root_irq_data->parent_data;
1482        if (WARN_ON(!child_irq_data))
1483                return -EINVAL;
1484
1485        mutex_lock(&irq_domain_mutex);
1486
1487        root_irq_data->parent_data = NULL;
1488
1489        irq_domain_clear_mapping(domain, root_irq_data->hwirq);
1490        irq_domain_free_irqs_hierarchy(domain, virq, 1);
1491
1492        /* Restore the original irq_data. */
1493        *root_irq_data = *child_irq_data;
1494
1495        irq_domain_fix_revmap(root_irq_data);
1496
1497        mutex_unlock(&irq_domain_mutex);
1498
1499        kfree(child_irq_data);
1500
1501        return 0;
1502}
1503EXPORT_SYMBOL_GPL(irq_domain_pop_irq);
1504
1505/**
1506 * irq_domain_free_irqs - Free IRQ number and associated data structures
1507 * @virq:       base IRQ number
1508 * @nr_irqs:    number of IRQs to free
1509 */
1510void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs)
1511{
1512        struct irq_data *data = irq_get_irq_data(virq);
1513        int i;
1514
1515        if (WARN(!data || !data->domain || !data->domain->ops->free,
1516                 "NULL pointer, cannot free irq\n"))
1517                return;
1518
1519        mutex_lock(&irq_domain_mutex);
1520        for (i = 0; i < nr_irqs; i++)
1521                irq_domain_remove_irq(virq + i);
1522        irq_domain_free_irqs_hierarchy(data->domain, virq, nr_irqs);
1523        mutex_unlock(&irq_domain_mutex);
1524
1525        irq_domain_free_irq_data(virq, nr_irqs);
1526        irq_free_descs(virq, nr_irqs);
1527}
1528
1529/**
1530 * irq_domain_alloc_irqs_parent - Allocate interrupts from parent domain
1531 * @irq_base:   Base IRQ number
1532 * @nr_irqs:    Number of IRQs to allocate
1533 * @arg:        Allocation data (arch/domain specific)
1534 *
1535 * Check whether the domain has been setup recursive. If not allocate
1536 * through the parent domain.
1537 */
1538int irq_domain_alloc_irqs_parent(struct irq_domain *domain,
1539                                 unsigned int irq_base, unsigned int nr_irqs,
1540                                 void *arg)
1541{
1542        if (!domain->parent)
1543                return -ENOSYS;
1544
1545        return irq_domain_alloc_irqs_hierarchy(domain->parent, irq_base,
1546                                               nr_irqs, arg);
1547}
1548EXPORT_SYMBOL_GPL(irq_domain_alloc_irqs_parent);
1549
1550/**
1551 * irq_domain_free_irqs_parent - Free interrupts from parent domain
1552 * @irq_base:   Base IRQ number
1553 * @nr_irqs:    Number of IRQs to free
1554 *
1555 * Check whether the domain has been setup recursive. If not free
1556 * through the parent domain.
1557 */
1558void irq_domain_free_irqs_parent(struct irq_domain *domain,
1559                                 unsigned int irq_base, unsigned int nr_irqs)
1560{
1561        if (!domain->parent)
1562                return;
1563
1564        irq_domain_free_irqs_hierarchy(domain->parent, irq_base, nr_irqs);
1565}
1566EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
1567
1568static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
1569{
1570        if (irq_data && irq_data->domain) {
1571                struct irq_domain *domain = irq_data->domain;
1572
1573                if (domain->ops->deactivate)
1574                        domain->ops->deactivate(domain, irq_data);
1575                if (irq_data->parent_data)
1576                        __irq_domain_deactivate_irq(irq_data->parent_data);
1577        }
1578}
1579
1580static int __irq_domain_activate_irq(struct irq_data *irqd, bool reserve)
1581{
1582        int ret = 0;
1583
1584        if (irqd && irqd->domain) {
1585                struct irq_domain *domain = irqd->domain;
1586
1587                if (irqd->parent_data)
1588                        ret = __irq_domain_activate_irq(irqd->parent_data,
1589                                                        reserve);
1590                if (!ret && domain->ops->activate) {
1591                        ret = domain->ops->activate(domain, irqd, reserve);
1592                        /* Rollback in case of error */
1593                        if (ret && irqd->parent_data)
1594                                __irq_domain_deactivate_irq(irqd->parent_data);
1595                }
1596        }
1597        return ret;
1598}
1599
1600/**
1601 * irq_domain_activate_irq - Call domain_ops->activate recursively to activate
1602 *                           interrupt
1603 * @irq_data:   Outermost irq_data associated with interrupt
1604 * @reserve:    If set only reserve an interrupt vector instead of assigning one
1605 *
1606 * This is the second step to call domain_ops->activate to program interrupt
1607 * controllers, so the interrupt could actually get delivered.
1608 */
1609int irq_domain_activate_irq(struct irq_data *irq_data, bool reserve)
1610{
1611        int ret = 0;
1612
1613        if (!irqd_is_activated(irq_data))
1614                ret = __irq_domain_activate_irq(irq_data, reserve);
1615        if (!ret)
1616                irqd_set_activated(irq_data);
1617        return ret;
1618}
1619
1620/**
1621 * irq_domain_deactivate_irq - Call domain_ops->deactivate recursively to
1622 *                             deactivate interrupt
1623 * @irq_data: outermost irq_data associated with interrupt
1624 *
1625 * It calls domain_ops->deactivate to program interrupt controllers to disable
1626 * interrupt delivery.
1627 */
1628void irq_domain_deactivate_irq(struct irq_data *irq_data)
1629{
1630        if (irqd_is_activated(irq_data)) {
1631                __irq_domain_deactivate_irq(irq_data);
1632                irqd_clr_activated(irq_data);
1633        }
1634}
1635
1636static void irq_domain_check_hierarchy(struct irq_domain *domain)
1637{
1638        /* Hierarchy irq_domains must implement callback alloc() */
1639        if (domain->ops->alloc)
1640                domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY;
1641}
1642
1643/**
1644 * irq_domain_hierarchical_is_msi_remap - Check if the domain or any
1645 * parent has MSI remapping support
1646 * @domain: domain pointer
1647 */
1648bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain)
1649{
1650        for (; domain; domain = domain->parent) {
1651                if (irq_domain_is_msi_remap(domain))
1652                        return true;
1653        }
1654        return false;
1655}
1656#else   /* CONFIG_IRQ_DOMAIN_HIERARCHY */
1657/**
1658 * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain
1659 * @domain:     domain to match
1660 * @virq:       IRQ number to get irq_data
1661 */
1662struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
1663                                         unsigned int virq)
1664{
1665        struct irq_data *irq_data = irq_get_irq_data(virq);
1666
1667        return (irq_data && irq_data->domain == domain) ? irq_data : NULL;
1668}
1669EXPORT_SYMBOL_GPL(irq_domain_get_irq_data);
1670
1671/**
1672 * irq_domain_set_info - Set the complete data for a @virq in @domain
1673 * @domain:             Interrupt domain to match
1674 * @virq:               IRQ number
1675 * @hwirq:              The hardware interrupt number
1676 * @chip:               The associated interrupt chip
1677 * @chip_data:          The associated interrupt chip data
1678 * @handler:            The interrupt flow handler
1679 * @handler_data:       The interrupt flow handler data
1680 * @handler_name:       The interrupt handler name
1681 */
1682void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
1683                         irq_hw_number_t hwirq, struct irq_chip *chip,
1684                         void *chip_data, irq_flow_handler_t handler,
1685                         void *handler_data, const char *handler_name)
1686{
1687        irq_set_chip_and_handler_name(virq, chip, handler, handler_name);
1688        irq_set_chip_data(virq, chip_data);
1689        irq_set_handler_data(virq, handler_data);
1690}
1691
1692static void irq_domain_check_hierarchy(struct irq_domain *domain)
1693{
1694}
1695#endif  /* CONFIG_IRQ_DOMAIN_HIERARCHY */
1696
1697#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
1698static struct dentry *domain_dir;
1699
1700static void
1701irq_domain_debug_show_one(struct seq_file *m, struct irq_domain *d, int ind)
1702{
1703        seq_printf(m, "%*sname:   %s\n", ind, "", d->name);
1704        seq_printf(m, "%*ssize:   %u\n", ind + 1, "",
1705                   d->revmap_size + d->revmap_direct_max_irq);
1706        seq_printf(m, "%*smapped: %u\n", ind + 1, "", d->mapcount);
1707        seq_printf(m, "%*sflags:  0x%08x\n", ind +1 , "", d->flags);
1708        if (d->ops && d->ops->debug_show)
1709                d->ops->debug_show(m, d, NULL, ind + 1);
1710#ifdef  CONFIG_IRQ_DOMAIN_HIERARCHY
1711        if (!d->parent)
1712                return;
1713        seq_printf(m, "%*sparent: %s\n", ind + 1, "", d->parent->name);
1714        irq_domain_debug_show_one(m, d->parent, ind + 4);
1715#endif
1716}
1717
1718static int irq_domain_debug_show(struct seq_file *m, void *p)
1719{
1720        struct irq_domain *d = m->private;
1721
1722        /* Default domain? Might be NULL */
1723        if (!d) {
1724                if (!irq_default_domain)
1725                        return 0;
1726                d = irq_default_domain;
1727        }
1728        irq_domain_debug_show_one(m, d, 0);
1729        return 0;
1730}
1731DEFINE_SHOW_ATTRIBUTE(irq_domain_debug);
1732
1733static void debugfs_add_domain_dir(struct irq_domain *d)
1734{
1735        if (!d->name || !domain_dir || d->debugfs_file)
1736                return;
1737        d->debugfs_file = debugfs_create_file(d->name, 0444, domain_dir, d,
1738                                              &irq_domain_debug_fops);
1739}
1740
1741static void debugfs_remove_domain_dir(struct irq_domain *d)
1742{
1743        debugfs_remove(d->debugfs_file);
1744}
1745
1746void __init irq_domain_debugfs_init(struct dentry *root)
1747{
1748        struct irq_domain *d;
1749
1750        domain_dir = debugfs_create_dir("domains", root);
1751        if (!domain_dir)
1752                return;
1753
1754        debugfs_create_file("default", 0444, domain_dir, NULL,
1755                            &irq_domain_debug_fops);
1756        mutex_lock(&irq_domain_mutex);
1757        list_for_each_entry(d, &irq_domain_list, link)
1758                debugfs_add_domain_dir(d);
1759        mutex_unlock(&irq_domain_mutex);
1760}
1761#endif
1762