linux/kernel/irq/irqdomain.c
<<
>>
Prefs
   1#define pr_fmt(fmt)  "irq: " fmt
   2
   3#include <linux/acpi.h>
   4#include <linux/debugfs.h>
   5#include <linux/hardirq.h>
   6#include <linux/interrupt.h>
   7#include <linux/irq.h>
   8#include <linux/irqdesc.h>
   9#include <linux/irqdomain.h>
  10#include <linux/module.h>
  11#include <linux/mutex.h>
  12#include <linux/of.h>
  13#include <linux/of_address.h>
  14#include <linux/of_irq.h>
  15#include <linux/topology.h>
  16#include <linux/seq_file.h>
  17#include <linux/slab.h>
  18#include <linux/smp.h>
  19#include <linux/fs.h>
  20
  21static LIST_HEAD(irq_domain_list);
  22static DEFINE_MUTEX(irq_domain_mutex);
  23
  24static struct irq_domain *irq_default_domain;
  25
  26static void irq_domain_check_hierarchy(struct irq_domain *domain);
  27
  28struct irqchip_fwid {
  29        struct fwnode_handle    fwnode;
  30        unsigned int            type;
  31        char                    *name;
  32        void *data;
  33};
  34
  35#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
  36static void debugfs_add_domain_dir(struct irq_domain *d);
  37static void debugfs_remove_domain_dir(struct irq_domain *d);
  38#else
  39static inline void debugfs_add_domain_dir(struct irq_domain *d) { }
  40static inline void debugfs_remove_domain_dir(struct irq_domain *d) { }
  41#endif
  42
  43const struct fwnode_operations irqchip_fwnode_ops;
  44EXPORT_SYMBOL_GPL(irqchip_fwnode_ops);
  45
  46/**
  47 * irq_domain_alloc_fwnode - Allocate a fwnode_handle suitable for
  48 *                           identifying an irq domain
  49 * @type:       Type of irqchip_fwnode. See linux/irqdomain.h
  50 * @name:       Optional user provided domain name
  51 * @id:         Optional user provided id if name != NULL
  52 * @data:       Optional user-provided data
  53 *
  54 * Allocate a struct irqchip_fwid, and return a poiner to the embedded
  55 * fwnode_handle (or NULL on failure).
  56 *
  57 * Note: The types IRQCHIP_FWNODE_NAMED and IRQCHIP_FWNODE_NAMED_ID are
  58 * solely to transport name information to irqdomain creation code. The
  59 * node is not stored. For other types the pointer is kept in the irq
  60 * domain struct.
  61 */
  62struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id,
  63                                                const char *name, void *data)
  64{
  65        struct irqchip_fwid *fwid;
  66        char *n;
  67
  68        fwid = kzalloc(sizeof(*fwid), GFP_KERNEL);
  69
  70        switch (type) {
  71        case IRQCHIP_FWNODE_NAMED:
  72                n = kasprintf(GFP_KERNEL, "%s", name);
  73                break;
  74        case IRQCHIP_FWNODE_NAMED_ID:
  75                n = kasprintf(GFP_KERNEL, "%s-%d", name, id);
  76                break;
  77        default:
  78                n = kasprintf(GFP_KERNEL, "irqchip@%p", data);
  79                break;
  80        }
  81
  82        if (!fwid || !n) {
  83                kfree(fwid);
  84                kfree(n);
  85                return NULL;
  86        }
  87
  88        fwid->type = type;
  89        fwid->name = n;
  90        fwid->data = data;
  91        fwid->fwnode.ops = &irqchip_fwnode_ops;
  92        return &fwid->fwnode;
  93}
  94EXPORT_SYMBOL_GPL(__irq_domain_alloc_fwnode);
  95
  96/**
  97 * irq_domain_free_fwnode - Free a non-OF-backed fwnode_handle
  98 *
  99 * Free a fwnode_handle allocated with irq_domain_alloc_fwnode.
 100 */
 101void irq_domain_free_fwnode(struct fwnode_handle *fwnode)
 102{
 103        struct irqchip_fwid *fwid;
 104
 105        if (WARN_ON(!is_fwnode_irqchip(fwnode)))
 106                return;
 107
 108        fwid = container_of(fwnode, struct irqchip_fwid, fwnode);
 109        kfree(fwid->name);
 110        kfree(fwid);
 111}
 112EXPORT_SYMBOL_GPL(irq_domain_free_fwnode);
 113
 114/**
 115 * __irq_domain_add() - Allocate a new irq_domain data structure
 116 * @fwnode: firmware node for the interrupt controller
 117 * @size: Size of linear map; 0 for radix mapping only
 118 * @hwirq_max: Maximum number of interrupts supported by controller
 119 * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no
 120 *              direct mapping
 121 * @ops: domain callbacks
 122 * @host_data: Controller private data pointer
 123 *
 124 * Allocates and initialize and irq_domain structure.
 125 * Returns pointer to IRQ domain, or NULL on failure.
 126 */
 127struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
 128                                    irq_hw_number_t hwirq_max, int direct_max,
 129                                    const struct irq_domain_ops *ops,
 130                                    void *host_data)
 131{
 132        struct device_node *of_node = to_of_node(fwnode);
 133        struct irqchip_fwid *fwid;
 134        struct irq_domain *domain;
 135
 136        static atomic_t unknown_domains;
 137
 138        domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size),
 139                              GFP_KERNEL, of_node_to_nid(of_node));
 140        if (WARN_ON(!domain))
 141                return NULL;
 142
 143        if (fwnode && is_fwnode_irqchip(fwnode)) {
 144                fwid = container_of(fwnode, struct irqchip_fwid, fwnode);
 145
 146                switch (fwid->type) {
 147                case IRQCHIP_FWNODE_NAMED:
 148                case IRQCHIP_FWNODE_NAMED_ID:
 149                        domain->name = kstrdup(fwid->name, GFP_KERNEL);
 150                        if (!domain->name) {
 151                                kfree(domain);
 152                                return NULL;
 153                        }
 154                        domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
 155                        break;
 156                default:
 157                        domain->fwnode = fwnode;
 158                        domain->name = fwid->name;
 159                        break;
 160                }
 161#ifdef CONFIG_ACPI
 162        } else if (is_acpi_device_node(fwnode)) {
 163                struct acpi_buffer buf = {
 164                        .length = ACPI_ALLOCATE_BUFFER,
 165                };
 166                acpi_handle handle;
 167
 168                handle = acpi_device_handle(to_acpi_device_node(fwnode));
 169                if (acpi_get_name(handle, ACPI_FULL_PATHNAME, &buf) == AE_OK) {
 170                        domain->name = buf.pointer;
 171                        domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
 172                }
 173
 174                domain->fwnode = fwnode;
 175#endif
 176        } else if (of_node) {
 177                char *name;
 178
 179                /*
 180                 * DT paths contain '/', which debugfs is legitimately
 181                 * unhappy about. Replace them with ':', which does
 182                 * the trick and is not as offensive as '\'...
 183                 */
 184                name = kstrdup(of_node_full_name(of_node), GFP_KERNEL);
 185                if (!name) {
 186                        kfree(domain);
 187                        return NULL;
 188                }
 189
 190                strreplace(name, '/', ':');
 191
 192                domain->name = name;
 193                domain->fwnode = fwnode;
 194                domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
 195        }
 196
 197        if (!domain->name) {
 198                if (fwnode)
 199                        pr_err("Invalid fwnode type for irqdomain\n");
 200                domain->name = kasprintf(GFP_KERNEL, "unknown-%d",
 201                                         atomic_inc_return(&unknown_domains));
 202                if (!domain->name) {
 203                        kfree(domain);
 204                        return NULL;
 205                }
 206                domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
 207        }
 208
 209        of_node_get(of_node);
 210
 211        /* Fill structure */
 212        INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
 213        mutex_init(&domain->revmap_tree_mutex);
 214        domain->ops = ops;
 215        domain->host_data = host_data;
 216        domain->hwirq_max = hwirq_max;
 217        domain->revmap_size = size;
 218        domain->revmap_direct_max_irq = direct_max;
 219        irq_domain_check_hierarchy(domain);
 220
 221        mutex_lock(&irq_domain_mutex);
 222        debugfs_add_domain_dir(domain);
 223        list_add(&domain->link, &irq_domain_list);
 224        mutex_unlock(&irq_domain_mutex);
 225
 226        pr_debug("Added domain %s\n", domain->name);
 227        return domain;
 228}
 229EXPORT_SYMBOL_GPL(__irq_domain_add);
 230
 231/**
 232 * irq_domain_remove() - Remove an irq domain.
 233 * @domain: domain to remove
 234 *
 235 * This routine is used to remove an irq domain. The caller must ensure
 236 * that all mappings within the domain have been disposed of prior to
 237 * use, depending on the revmap type.
 238 */
 239void irq_domain_remove(struct irq_domain *domain)
 240{
 241        mutex_lock(&irq_domain_mutex);
 242        debugfs_remove_domain_dir(domain);
 243
 244        WARN_ON(!radix_tree_empty(&domain->revmap_tree));
 245
 246        list_del(&domain->link);
 247
 248        /*
 249         * If the going away domain is the default one, reset it.
 250         */
 251        if (unlikely(irq_default_domain == domain))
 252                irq_set_default_host(NULL);
 253
 254        mutex_unlock(&irq_domain_mutex);
 255
 256        pr_debug("Removed domain %s\n", domain->name);
 257
 258        of_node_put(irq_domain_get_of_node(domain));
 259        if (domain->flags & IRQ_DOMAIN_NAME_ALLOCATED)
 260                kfree(domain->name);
 261        kfree(domain);
 262}
 263EXPORT_SYMBOL_GPL(irq_domain_remove);
 264
 265void irq_domain_update_bus_token(struct irq_domain *domain,
 266                                 enum irq_domain_bus_token bus_token)
 267{
 268        char *name;
 269
 270        if (domain->bus_token == bus_token)
 271                return;
 272
 273        mutex_lock(&irq_domain_mutex);
 274
 275        domain->bus_token = bus_token;
 276
 277        name = kasprintf(GFP_KERNEL, "%s-%d", domain->name, bus_token);
 278        if (!name) {
 279                mutex_unlock(&irq_domain_mutex);
 280                return;
 281        }
 282
 283        debugfs_remove_domain_dir(domain);
 284
 285        if (domain->flags & IRQ_DOMAIN_NAME_ALLOCATED)
 286                kfree(domain->name);
 287        else
 288                domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
 289
 290        domain->name = name;
 291        debugfs_add_domain_dir(domain);
 292
 293        mutex_unlock(&irq_domain_mutex);
 294}
 295
 296/**
 297 * irq_domain_add_simple() - Register an irq_domain and optionally map a range of irqs
 298 * @of_node: pointer to interrupt controller's device tree node.
 299 * @size: total number of irqs in mapping
 300 * @first_irq: first number of irq block assigned to the domain,
 301 *      pass zero to assign irqs on-the-fly. If first_irq is non-zero, then
 302 *      pre-map all of the irqs in the domain to virqs starting at first_irq.
 303 * @ops: domain callbacks
 304 * @host_data: Controller private data pointer
 305 *
 306 * Allocates an irq_domain, and optionally if first_irq is positive then also
 307 * allocate irq_descs and map all of the hwirqs to virqs starting at first_irq.
 308 *
 309 * This is intended to implement the expected behaviour for most
 310 * interrupt controllers. If device tree is used, then first_irq will be 0 and
 311 * irqs get mapped dynamically on the fly. However, if the controller requires
 312 * static virq assignments (non-DT boot) then it will set that up correctly.
 313 */
 314struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
 315                                         unsigned int size,
 316                                         unsigned int first_irq,
 317                                         const struct irq_domain_ops *ops,
 318                                         void *host_data)
 319{
 320        struct irq_domain *domain;
 321
 322        domain = __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data);
 323        if (!domain)
 324                return NULL;
 325
 326        if (first_irq > 0) {
 327                if (IS_ENABLED(CONFIG_SPARSE_IRQ)) {
 328                        /* attempt to allocated irq_descs */
 329                        int rc = irq_alloc_descs(first_irq, first_irq, size,
 330                                                 of_node_to_nid(of_node));
 331                        if (rc < 0)
 332                                pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
 333                                        first_irq);
 334                }
 335                irq_domain_associate_many(domain, first_irq, 0, size);
 336        }
 337
 338        return domain;
 339}
 340EXPORT_SYMBOL_GPL(irq_domain_add_simple);
 341
 342/**
 343 * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain.
 344 * @of_node: pointer to interrupt controller's device tree node.
 345 * @size: total number of irqs in legacy mapping
 346 * @first_irq: first number of irq block assigned to the domain
 347 * @first_hwirq: first hwirq number to use for the translation. Should normally
 348 *               be '0', but a positive integer can be used if the effective
 349 *               hwirqs numbering does not begin at zero.
 350 * @ops: map/unmap domain callbacks
 351 * @host_data: Controller private data pointer
 352 *
 353 * Note: the map() callback will be called before this function returns
 354 * for all legacy interrupts except 0 (which is always the invalid irq for
 355 * a legacy controller).
 356 */
 357struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
 358                                         unsigned int size,
 359                                         unsigned int first_irq,
 360                                         irq_hw_number_t first_hwirq,
 361                                         const struct irq_domain_ops *ops,
 362                                         void *host_data)
 363{
 364        struct irq_domain *domain;
 365
 366        domain = __irq_domain_add(of_node_to_fwnode(of_node), first_hwirq + size,
 367                                  first_hwirq + size, 0, ops, host_data);
 368        if (domain)
 369                irq_domain_associate_many(domain, first_irq, first_hwirq, size);
 370
 371        return domain;
 372}
 373EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
 374
 375/**
 376 * irq_find_matching_fwspec() - Locates a domain for a given fwspec
 377 * @fwspec: FW specifier for an interrupt
 378 * @bus_token: domain-specific data
 379 */
 380struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
 381                                            enum irq_domain_bus_token bus_token)
 382{
 383        struct irq_domain *h, *found = NULL;
 384        struct fwnode_handle *fwnode = fwspec->fwnode;
 385        int rc;
 386
 387        /* We might want to match the legacy controller last since
 388         * it might potentially be set to match all interrupts in
 389         * the absence of a device node. This isn't a problem so far
 390         * yet though...
 391         *
 392         * bus_token == DOMAIN_BUS_ANY matches any domain, any other
 393         * values must generate an exact match for the domain to be
 394         * selected.
 395         */
 396        mutex_lock(&irq_domain_mutex);
 397        list_for_each_entry(h, &irq_domain_list, link) {
 398                if (h->ops->select && fwspec->param_count)
 399                        rc = h->ops->select(h, fwspec, bus_token);
 400                else if (h->ops->match)
 401                        rc = h->ops->match(h, to_of_node(fwnode), bus_token);
 402                else
 403                        rc = ((fwnode != NULL) && (h->fwnode == fwnode) &&
 404                              ((bus_token == DOMAIN_BUS_ANY) ||
 405                               (h->bus_token == bus_token)));
 406
 407                if (rc) {
 408                        found = h;
 409                        break;
 410                }
 411        }
 412        mutex_unlock(&irq_domain_mutex);
 413        return found;
 414}
 415EXPORT_SYMBOL_GPL(irq_find_matching_fwspec);
 416
 417/**
 418 * irq_domain_check_msi_remap - Check whether all MSI irq domains implement
 419 * IRQ remapping
 420 *
 421 * Return: false if any MSI irq domain does not support IRQ remapping,
 422 * true otherwise (including if there is no MSI irq domain)
 423 */
 424bool irq_domain_check_msi_remap(void)
 425{
 426        struct irq_domain *h;
 427        bool ret = true;
 428
 429        mutex_lock(&irq_domain_mutex);
 430        list_for_each_entry(h, &irq_domain_list, link) {
 431                if (irq_domain_is_msi(h) &&
 432                    !irq_domain_hierarchical_is_msi_remap(h)) {
 433                        ret = false;
 434                        break;
 435                }
 436        }
 437        mutex_unlock(&irq_domain_mutex);
 438        return ret;
 439}
 440EXPORT_SYMBOL_GPL(irq_domain_check_msi_remap);
 441
 442/**
 443 * irq_set_default_host() - Set a "default" irq domain
 444 * @domain: default domain pointer
 445 *
 446 * For convenience, it's possible to set a "default" domain that will be used
 447 * whenever NULL is passed to irq_create_mapping(). It makes life easier for
 448 * platforms that want to manipulate a few hard coded interrupt numbers that
 449 * aren't properly represented in the device-tree.
 450 */
 451void irq_set_default_host(struct irq_domain *domain)
 452{
 453        pr_debug("Default domain set to @0x%p\n", domain);
 454
 455        irq_default_domain = domain;
 456}
 457EXPORT_SYMBOL_GPL(irq_set_default_host);
 458
 459static void irq_domain_clear_mapping(struct irq_domain *domain,
 460                                     irq_hw_number_t hwirq)
 461{
 462        if (hwirq < domain->revmap_size) {
 463                domain->linear_revmap[hwirq] = 0;
 464        } else {
 465                mutex_lock(&domain->revmap_tree_mutex);
 466                radix_tree_delete(&domain->revmap_tree, hwirq);
 467                mutex_unlock(&domain->revmap_tree_mutex);
 468        }
 469}
 470
 471static void irq_domain_set_mapping(struct irq_domain *domain,
 472                                   irq_hw_number_t hwirq,
 473                                   struct irq_data *irq_data)
 474{
 475        if (hwirq < domain->revmap_size) {
 476                domain->linear_revmap[hwirq] = irq_data->irq;
 477        } else {
 478                mutex_lock(&domain->revmap_tree_mutex);
 479                radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
 480                mutex_unlock(&domain->revmap_tree_mutex);
 481        }
 482}
 483
 484void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
 485{
 486        struct irq_data *irq_data = irq_get_irq_data(irq);
 487        irq_hw_number_t hwirq;
 488
 489        if (WARN(!irq_data || irq_data->domain != domain,
 490                 "virq%i doesn't exist; cannot disassociate\n", irq))
 491                return;
 492
 493        hwirq = irq_data->hwirq;
 494        irq_set_status_flags(irq, IRQ_NOREQUEST);
 495
 496        /* remove chip and handler */
 497        irq_set_chip_and_handler(irq, NULL, NULL);
 498
 499        /* Make sure it's completed */
 500        synchronize_irq(irq);
 501
 502        /* Tell the PIC about it */
 503        if (domain->ops->unmap)
 504                domain->ops->unmap(domain, irq);
 505        smp_mb();
 506
 507        irq_data->domain = NULL;
 508        irq_data->hwirq = 0;
 509        domain->mapcount--;
 510
 511        /* Clear reverse map for this hwirq */
 512        irq_domain_clear_mapping(domain, hwirq);
 513}
 514
 515int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
 516                         irq_hw_number_t hwirq)
 517{
 518        struct irq_data *irq_data = irq_get_irq_data(virq);
 519        int ret;
 520
 521        if (WARN(hwirq >= domain->hwirq_max,
 522                 "error: hwirq 0x%x is too large for %s\n", (int)hwirq, domain->name))
 523                return -EINVAL;
 524        if (WARN(!irq_data, "error: virq%i is not allocated", virq))
 525                return -EINVAL;
 526        if (WARN(irq_data->domain, "error: virq%i is already associated", virq))
 527                return -EINVAL;
 528
 529        mutex_lock(&irq_domain_mutex);
 530        irq_data->hwirq = hwirq;
 531        irq_data->domain = domain;
 532        if (domain->ops->map) {
 533                ret = domain->ops->map(domain, virq, hwirq);
 534                if (ret != 0) {
 535                        /*
 536                         * If map() returns -EPERM, this interrupt is protected
 537                         * by the firmware or some other service and shall not
 538                         * be mapped. Don't bother telling the user about it.
 539                         */
 540                        if (ret != -EPERM) {
 541                                pr_info("%s didn't like hwirq-0x%lx to VIRQ%i mapping (rc=%d)\n",
 542                                       domain->name, hwirq, virq, ret);
 543                        }
 544                        irq_data->domain = NULL;
 545                        irq_data->hwirq = 0;
 546                        mutex_unlock(&irq_domain_mutex);
 547                        return ret;
 548                }
 549
 550                /* If not already assigned, give the domain the chip's name */
 551                if (!domain->name && irq_data->chip)
 552                        domain->name = irq_data->chip->name;
 553        }
 554
 555        domain->mapcount++;
 556        irq_domain_set_mapping(domain, hwirq, irq_data);
 557        mutex_unlock(&irq_domain_mutex);
 558
 559        irq_clear_status_flags(virq, IRQ_NOREQUEST);
 560
 561        return 0;
 562}
 563EXPORT_SYMBOL_GPL(irq_domain_associate);
 564
 565void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
 566                               irq_hw_number_t hwirq_base, int count)
 567{
 568        struct device_node *of_node;
 569        int i;
 570
 571        of_node = irq_domain_get_of_node(domain);
 572        pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__,
 573                of_node_full_name(of_node), irq_base, (int)hwirq_base, count);
 574
 575        for (i = 0; i < count; i++) {
 576                irq_domain_associate(domain, irq_base + i, hwirq_base + i);
 577        }
 578}
 579EXPORT_SYMBOL_GPL(irq_domain_associate_many);
 580
 581/**
 582 * irq_create_direct_mapping() - Allocate an irq for direct mapping
 583 * @domain: domain to allocate the irq for or NULL for default domain
 584 *
 585 * This routine is used for irq controllers which can choose the hardware
 586 * interrupt numbers they generate. In such a case it's simplest to use
 587 * the linux irq as the hardware interrupt number. It still uses the linear
 588 * or radix tree to store the mapping, but the irq controller can optimize
 589 * the revmap path by using the hwirq directly.
 590 */
 591unsigned int irq_create_direct_mapping(struct irq_domain *domain)
 592{
 593        struct device_node *of_node;
 594        unsigned int virq;
 595
 596        if (domain == NULL)
 597                domain = irq_default_domain;
 598
 599        of_node = irq_domain_get_of_node(domain);
 600        virq = irq_alloc_desc_from(1, of_node_to_nid(of_node));
 601        if (!virq) {
 602                pr_debug("create_direct virq allocation failed\n");
 603                return 0;
 604        }
 605        if (virq >= domain->revmap_direct_max_irq) {
 606                pr_err("ERROR: no free irqs available below %i maximum\n",
 607                        domain->revmap_direct_max_irq);
 608                irq_free_desc(virq);
 609                return 0;
 610        }
 611        pr_debug("create_direct obtained virq %d\n", virq);
 612
 613        if (irq_domain_associate(domain, virq, virq)) {
 614                irq_free_desc(virq);
 615                return 0;
 616        }
 617
 618        return virq;
 619}
 620EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
 621
 622/**
 623 * irq_create_mapping() - Map a hardware interrupt into linux irq space
 624 * @domain: domain owning this hardware interrupt or NULL for default domain
 625 * @hwirq: hardware irq number in that domain space
 626 *
 627 * Only one mapping per hardware interrupt is permitted. Returns a linux
 628 * irq number.
 629 * If the sense/trigger is to be specified, set_irq_type() should be called
 630 * on the number returned from that call.
 631 */
 632unsigned int irq_create_mapping(struct irq_domain *domain,
 633                                irq_hw_number_t hwirq)
 634{
 635        struct device_node *of_node;
 636        int virq;
 637
 638        pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
 639
 640        /* Look for default domain if nececssary */
 641        if (domain == NULL)
 642                domain = irq_default_domain;
 643        if (domain == NULL) {
 644                WARN(1, "%s(, %lx) called with NULL domain\n", __func__, hwirq);
 645                return 0;
 646        }
 647        pr_debug("-> using domain @%p\n", domain);
 648
 649        of_node = irq_domain_get_of_node(domain);
 650
 651        /* Check if mapping already exists */
 652        virq = irq_find_mapping(domain, hwirq);
 653        if (virq) {
 654                pr_debug("-> existing mapping on virq %d\n", virq);
 655                return virq;
 656        }
 657
 658        /* Allocate a virtual interrupt number */
 659        virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node), NULL);
 660        if (virq <= 0) {
 661                pr_debug("-> virq allocation failed\n");
 662                return 0;
 663        }
 664
 665        if (irq_domain_associate(domain, virq, hwirq)) {
 666                irq_free_desc(virq);
 667                return 0;
 668        }
 669
 670        pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
 671                hwirq, of_node_full_name(of_node), virq);
 672
 673        return virq;
 674}
 675EXPORT_SYMBOL_GPL(irq_create_mapping);
 676
 677/**
 678 * irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs
 679 * @domain: domain owning the interrupt range
 680 * @irq_base: beginning of linux IRQ range
 681 * @hwirq_base: beginning of hardware IRQ range
 682 * @count: Number of interrupts to map
 683 *
 684 * This routine is used for allocating and mapping a range of hardware
 685 * irqs to linux irqs where the linux irq numbers are at pre-defined
 686 * locations. For use by controllers that already have static mappings
 687 * to insert in to the domain.
 688 *
 689 * Non-linear users can use irq_create_identity_mapping() for IRQ-at-a-time
 690 * domain insertion.
 691 *
 692 * 0 is returned upon success, while any failure to establish a static
 693 * mapping is treated as an error.
 694 */
 695int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base,
 696                               irq_hw_number_t hwirq_base, int count)
 697{
 698        struct device_node *of_node;
 699        int ret;
 700
 701        of_node = irq_domain_get_of_node(domain);
 702        ret = irq_alloc_descs(irq_base, irq_base, count,
 703                              of_node_to_nid(of_node));
 704        if (unlikely(ret < 0))
 705                return ret;
 706
 707        irq_domain_associate_many(domain, irq_base, hwirq_base, count);
 708        return 0;
 709}
 710EXPORT_SYMBOL_GPL(irq_create_strict_mappings);
 711
 712static int irq_domain_translate(struct irq_domain *d,
 713                                struct irq_fwspec *fwspec,
 714                                irq_hw_number_t *hwirq, unsigned int *type)
 715{
 716#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
 717        if (d->ops->translate)
 718                return d->ops->translate(d, fwspec, hwirq, type);
 719#endif
 720        if (d->ops->xlate)
 721                return d->ops->xlate(d, to_of_node(fwspec->fwnode),
 722                                     fwspec->param, fwspec->param_count,
 723                                     hwirq, type);
 724
 725        /* If domain has no translation, then we assume interrupt line */
 726        *hwirq = fwspec->param[0];
 727        return 0;
 728}
 729
 730static void of_phandle_args_to_fwspec(struct of_phandle_args *irq_data,
 731                                      struct irq_fwspec *fwspec)
 732{
 733        int i;
 734
 735        fwspec->fwnode = irq_data->np ? &irq_data->np->fwnode : NULL;
 736        fwspec->param_count = irq_data->args_count;
 737
 738        for (i = 0; i < irq_data->args_count; i++)
 739                fwspec->param[i] = irq_data->args[i];
 740}
 741
 742unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
 743{
 744        struct irq_domain *domain;
 745        struct irq_data *irq_data;
 746        irq_hw_number_t hwirq;
 747        unsigned int type = IRQ_TYPE_NONE;
 748        int virq;
 749
 750        if (fwspec->fwnode) {
 751                domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_WIRED);
 752                if (!domain)
 753                        domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_ANY);
 754        } else {
 755                domain = irq_default_domain;
 756        }
 757
 758        if (!domain) {
 759                pr_warn("no irq domain found for %s !\n",
 760                        of_node_full_name(to_of_node(fwspec->fwnode)));
 761                return 0;
 762        }
 763
 764        if (irq_domain_translate(domain, fwspec, &hwirq, &type))
 765                return 0;
 766
 767        /*
 768         * WARN if the irqchip returns a type with bits
 769         * outside the sense mask set and clear these bits.
 770         */
 771        if (WARN_ON(type & ~IRQ_TYPE_SENSE_MASK))
 772                type &= IRQ_TYPE_SENSE_MASK;
 773
 774        /*
 775         * If we've already configured this interrupt,
 776         * don't do it again, or hell will break loose.
 777         */
 778        virq = irq_find_mapping(domain, hwirq);
 779        if (virq) {
 780                /*
 781                 * If the trigger type is not specified or matches the
 782                 * current trigger type then we are done so return the
 783                 * interrupt number.
 784                 */
 785                if (type == IRQ_TYPE_NONE || type == irq_get_trigger_type(virq))
 786                        return virq;
 787
 788                /*
 789                 * If the trigger type has not been set yet, then set
 790                 * it now and return the interrupt number.
 791                 */
 792                if (irq_get_trigger_type(virq) == IRQ_TYPE_NONE) {
 793                        irq_data = irq_get_irq_data(virq);
 794                        if (!irq_data)
 795                                return 0;
 796
 797                        irqd_set_trigger_type(irq_data, type);
 798                        return virq;
 799                }
 800
 801                pr_warn("type mismatch, failed to map hwirq-%lu for %s!\n",
 802                        hwirq, of_node_full_name(to_of_node(fwspec->fwnode)));
 803                return 0;
 804        }
 805
 806        if (irq_domain_is_hierarchy(domain)) {
 807                virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, fwspec);
 808                if (virq <= 0)
 809                        return 0;
 810        } else {
 811                /* Create mapping */
 812                virq = irq_create_mapping(domain, hwirq);
 813                if (!virq)
 814                        return virq;
 815        }
 816
 817        irq_data = irq_get_irq_data(virq);
 818        if (!irq_data) {
 819                if (irq_domain_is_hierarchy(domain))
 820                        irq_domain_free_irqs(virq, 1);
 821                else
 822                        irq_dispose_mapping(virq);
 823                return 0;
 824        }
 825
 826        /* Store trigger type */
 827        irqd_set_trigger_type(irq_data, type);
 828
 829        return virq;
 830}
 831EXPORT_SYMBOL_GPL(irq_create_fwspec_mapping);
 832
 833unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data)
 834{
 835        struct irq_fwspec fwspec;
 836
 837        of_phandle_args_to_fwspec(irq_data, &fwspec);
 838        return irq_create_fwspec_mapping(&fwspec);
 839}
 840EXPORT_SYMBOL_GPL(irq_create_of_mapping);
 841
 842/**
 843 * irq_dispose_mapping() - Unmap an interrupt
 844 * @virq: linux irq number of the interrupt to unmap
 845 */
 846void irq_dispose_mapping(unsigned int virq)
 847{
 848        struct irq_data *irq_data = irq_get_irq_data(virq);
 849        struct irq_domain *domain;
 850
 851        if (!virq || !irq_data)
 852                return;
 853
 854        domain = irq_data->domain;
 855        if (WARN_ON(domain == NULL))
 856                return;
 857
 858        if (irq_domain_is_hierarchy(domain)) {
 859                irq_domain_free_irqs(virq, 1);
 860        } else {
 861                irq_domain_disassociate(domain, virq);
 862                irq_free_desc(virq);
 863        }
 864}
 865EXPORT_SYMBOL_GPL(irq_dispose_mapping);
 866
 867/**
 868 * irq_find_mapping() - Find a linux irq from an hw irq number.
 869 * @domain: domain owning this hardware interrupt
 870 * @hwirq: hardware irq number in that domain space
 871 */
 872unsigned int irq_find_mapping(struct irq_domain *domain,
 873                              irq_hw_number_t hwirq)
 874{
 875        struct irq_data *data;
 876
 877        /* Look for default domain if nececssary */
 878        if (domain == NULL)
 879                domain = irq_default_domain;
 880        if (domain == NULL)
 881                return 0;
 882
 883        if (hwirq < domain->revmap_direct_max_irq) {
 884                data = irq_domain_get_irq_data(domain, hwirq);
 885                if (data && data->hwirq == hwirq)
 886                        return hwirq;
 887        }
 888
 889        /* Check if the hwirq is in the linear revmap. */
 890        if (hwirq < domain->revmap_size)
 891                return domain->linear_revmap[hwirq];
 892
 893        rcu_read_lock();
 894        data = radix_tree_lookup(&domain->revmap_tree, hwirq);
 895        rcu_read_unlock();
 896        return data ? data->irq : 0;
 897}
 898EXPORT_SYMBOL_GPL(irq_find_mapping);
 899
 900/**
 901 * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings
 902 *
 903 * Device Tree IRQ specifier translation function which works with one cell
 904 * bindings where the cell value maps directly to the hwirq number.
 905 */
 906int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr,
 907                             const u32 *intspec, unsigned int intsize,
 908                             unsigned long *out_hwirq, unsigned int *out_type)
 909{
 910        if (WARN_ON(intsize < 1))
 911                return -EINVAL;
 912        *out_hwirq = intspec[0];
 913        *out_type = IRQ_TYPE_NONE;
 914        return 0;
 915}
 916EXPORT_SYMBOL_GPL(irq_domain_xlate_onecell);
 917
 918/**
 919 * irq_domain_xlate_twocell() - Generic xlate for direct two cell bindings
 920 *
 921 * Device Tree IRQ specifier translation function which works with two cell
 922 * bindings where the cell values map directly to the hwirq number
 923 * and linux irq flags.
 924 */
 925int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr,
 926                        const u32 *intspec, unsigned int intsize,
 927                        irq_hw_number_t *out_hwirq, unsigned int *out_type)
 928{
 929        if (WARN_ON(intsize < 2))
 930                return -EINVAL;
 931        *out_hwirq = intspec[0];
 932        *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
 933        return 0;
 934}
 935EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell);
 936
 937/**
 938 * irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings
 939 *
 940 * Device Tree IRQ specifier translation function which works with either one
 941 * or two cell bindings where the cell values map directly to the hwirq number
 942 * and linux irq flags.
 943 *
 944 * Note: don't use this function unless your interrupt controller explicitly
 945 * supports both one and two cell bindings.  For the majority of controllers
 946 * the _onecell() or _twocell() variants above should be used.
 947 */
 948int irq_domain_xlate_onetwocell(struct irq_domain *d,
 949                                struct device_node *ctrlr,
 950                                const u32 *intspec, unsigned int intsize,
 951                                unsigned long *out_hwirq, unsigned int *out_type)
 952{
 953        if (WARN_ON(intsize < 1))
 954                return -EINVAL;
 955        *out_hwirq = intspec[0];
 956        if (intsize > 1)
 957                *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
 958        else
 959                *out_type = IRQ_TYPE_NONE;
 960        return 0;
 961}
 962EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell);
 963
 964const struct irq_domain_ops irq_domain_simple_ops = {
 965        .xlate = irq_domain_xlate_onetwocell,
 966};
 967EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
 968
 969int irq_domain_alloc_descs(int virq, unsigned int cnt, irq_hw_number_t hwirq,
 970                           int node, const struct cpumask *affinity)
 971{
 972        unsigned int hint;
 973
 974        if (virq >= 0) {
 975                virq = __irq_alloc_descs(virq, virq, cnt, node, THIS_MODULE,
 976                                         affinity);
 977        } else {
 978                hint = hwirq % nr_irqs;
 979                if (hint == 0)
 980                        hint++;
 981                virq = __irq_alloc_descs(-1, hint, cnt, node, THIS_MODULE,
 982                                         affinity);
 983                if (virq <= 0 && hint > 1) {
 984                        virq = __irq_alloc_descs(-1, 1, cnt, node, THIS_MODULE,
 985                                                 affinity);
 986                }
 987        }
 988
 989        return virq;
 990}
 991
 992#ifdef  CONFIG_IRQ_DOMAIN_HIERARCHY
 993/**
 994 * irq_domain_create_hierarchy - Add a irqdomain into the hierarchy
 995 * @parent:     Parent irq domain to associate with the new domain
 996 * @flags:      Irq domain flags associated to the domain
 997 * @size:       Size of the domain. See below
 998 * @fwnode:     Optional fwnode of the interrupt controller
 999 * @ops:        Pointer to the interrupt domain callbacks
1000 * @host_data:  Controller private data pointer
1001 *
1002 * If @size is 0 a tree domain is created, otherwise a linear domain.
1003 *
1004 * If successful the parent is associated to the new domain and the
1005 * domain flags are set.
1006 * Returns pointer to IRQ domain, or NULL on failure.
1007 */
1008struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent,
1009                                            unsigned int flags,
1010                                            unsigned int size,
1011                                            struct fwnode_handle *fwnode,
1012                                            const struct irq_domain_ops *ops,
1013                                            void *host_data)
1014{
1015        struct irq_domain *domain;
1016
1017        if (size)
1018                domain = irq_domain_create_linear(fwnode, size, ops, host_data);
1019        else
1020                domain = irq_domain_create_tree(fwnode, ops, host_data);
1021        if (domain) {
1022                domain->parent = parent;
1023                domain->flags |= flags;
1024        }
1025
1026        return domain;
1027}
1028EXPORT_SYMBOL_GPL(irq_domain_create_hierarchy);
1029
1030static void irq_domain_insert_irq(int virq)
1031{
1032        struct irq_data *data;
1033
1034        for (data = irq_get_irq_data(virq); data; data = data->parent_data) {
1035                struct irq_domain *domain = data->domain;
1036
1037                domain->mapcount++;
1038                irq_domain_set_mapping(domain, data->hwirq, data);
1039
1040                /* If not already assigned, give the domain the chip's name */
1041                if (!domain->name && data->chip)
1042                        domain->name = data->chip->name;
1043        }
1044
1045        irq_clear_status_flags(virq, IRQ_NOREQUEST);
1046}
1047
1048static void irq_domain_remove_irq(int virq)
1049{
1050        struct irq_data *data;
1051
1052        irq_set_status_flags(virq, IRQ_NOREQUEST);
1053        irq_set_chip_and_handler(virq, NULL, NULL);
1054        synchronize_irq(virq);
1055        smp_mb();
1056
1057        for (data = irq_get_irq_data(virq); data; data = data->parent_data) {
1058                struct irq_domain *domain = data->domain;
1059                irq_hw_number_t hwirq = data->hwirq;
1060
1061                domain->mapcount--;
1062                irq_domain_clear_mapping(domain, hwirq);
1063        }
1064}
1065
1066static struct irq_data *irq_domain_insert_irq_data(struct irq_domain *domain,
1067                                                   struct irq_data *child)
1068{
1069        struct irq_data *irq_data;
1070
1071        irq_data = kzalloc_node(sizeof(*irq_data), GFP_KERNEL,
1072                                irq_data_get_node(child));
1073        if (irq_data) {
1074                child->parent_data = irq_data;
1075                irq_data->irq = child->irq;
1076                irq_data->common = child->common;
1077                irq_data->domain = domain;
1078        }
1079
1080        return irq_data;
1081}
1082
1083static void irq_domain_free_irq_data(unsigned int virq, unsigned int nr_irqs)
1084{
1085        struct irq_data *irq_data, *tmp;
1086        int i;
1087
1088        for (i = 0; i < nr_irqs; i++) {
1089                irq_data = irq_get_irq_data(virq + i);
1090                tmp = irq_data->parent_data;
1091                irq_data->parent_data = NULL;
1092                irq_data->domain = NULL;
1093
1094                while (tmp) {
1095                        irq_data = tmp;
1096                        tmp = tmp->parent_data;
1097                        kfree(irq_data);
1098                }
1099        }
1100}
1101
1102static int irq_domain_alloc_irq_data(struct irq_domain *domain,
1103                                     unsigned int virq, unsigned int nr_irqs)
1104{
1105        struct irq_data *irq_data;
1106        struct irq_domain *parent;
1107        int i;
1108
1109        /* The outermost irq_data is embedded in struct irq_desc */
1110        for (i = 0; i < nr_irqs; i++) {
1111                irq_data = irq_get_irq_data(virq + i);
1112                irq_data->domain = domain;
1113
1114                for (parent = domain->parent; parent; parent = parent->parent) {
1115                        irq_data = irq_domain_insert_irq_data(parent, irq_data);
1116                        if (!irq_data) {
1117                                irq_domain_free_irq_data(virq, i + 1);
1118                                return -ENOMEM;
1119                        }
1120                }
1121        }
1122
1123        return 0;
1124}
1125
1126/**
1127 * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain
1128 * @domain:     domain to match
1129 * @virq:       IRQ number to get irq_data
1130 */
1131struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
1132                                         unsigned int virq)
1133{
1134        struct irq_data *irq_data;
1135
1136        for (irq_data = irq_get_irq_data(virq); irq_data;
1137             irq_data = irq_data->parent_data)
1138                if (irq_data->domain == domain)
1139                        return irq_data;
1140
1141        return NULL;
1142}
1143EXPORT_SYMBOL_GPL(irq_domain_get_irq_data);
1144
1145/**
1146 * irq_domain_set_hwirq_and_chip - Set hwirq and irqchip of @virq at @domain
1147 * @domain:     Interrupt domain to match
1148 * @virq:       IRQ number
1149 * @hwirq:      The hwirq number
1150 * @chip:       The associated interrupt chip
1151 * @chip_data:  The associated chip data
1152 */
1153int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq,
1154                                  irq_hw_number_t hwirq, struct irq_chip *chip,
1155                                  void *chip_data)
1156{
1157        struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq);
1158
1159        if (!irq_data)
1160                return -ENOENT;
1161
1162        irq_data->hwirq = hwirq;
1163        irq_data->chip = chip ? chip : &no_irq_chip;
1164        irq_data->chip_data = chip_data;
1165
1166        return 0;
1167}
1168EXPORT_SYMBOL_GPL(irq_domain_set_hwirq_and_chip);
1169
1170/**
1171 * irq_domain_set_info - Set the complete data for a @virq in @domain
1172 * @domain:             Interrupt domain to match
1173 * @virq:               IRQ number
1174 * @hwirq:              The hardware interrupt number
1175 * @chip:               The associated interrupt chip
1176 * @chip_data:          The associated interrupt chip data
1177 * @handler:            The interrupt flow handler
1178 * @handler_data:       The interrupt flow handler data
1179 * @handler_name:       The interrupt handler name
1180 */
1181void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
1182                         irq_hw_number_t hwirq, struct irq_chip *chip,
1183                         void *chip_data, irq_flow_handler_t handler,
1184                         void *handler_data, const char *handler_name)
1185{
1186        irq_domain_set_hwirq_and_chip(domain, virq, hwirq, chip, chip_data);
1187        __irq_set_handler(virq, handler, 0, handler_name);
1188        irq_set_handler_data(virq, handler_data);
1189}
1190EXPORT_SYMBOL(irq_domain_set_info);
1191
1192/**
1193 * irq_domain_reset_irq_data - Clear hwirq, chip and chip_data in @irq_data
1194 * @irq_data:   The pointer to irq_data
1195 */
1196void irq_domain_reset_irq_data(struct irq_data *irq_data)
1197{
1198        irq_data->hwirq = 0;
1199        irq_data->chip = &no_irq_chip;
1200        irq_data->chip_data = NULL;
1201}
1202EXPORT_SYMBOL_GPL(irq_domain_reset_irq_data);
1203
1204/**
1205 * irq_domain_free_irqs_common - Clear irq_data and free the parent
1206 * @domain:     Interrupt domain to match
1207 * @virq:       IRQ number to start with
1208 * @nr_irqs:    The number of irqs to free
1209 */
1210void irq_domain_free_irqs_common(struct irq_domain *domain, unsigned int virq,
1211                                 unsigned int nr_irqs)
1212{
1213        struct irq_data *irq_data;
1214        int i;
1215
1216        for (i = 0; i < nr_irqs; i++) {
1217                irq_data = irq_domain_get_irq_data(domain, virq + i);
1218                if (irq_data)
1219                        irq_domain_reset_irq_data(irq_data);
1220        }
1221        irq_domain_free_irqs_parent(domain, virq, nr_irqs);
1222}
1223EXPORT_SYMBOL_GPL(irq_domain_free_irqs_common);
1224
1225/**
1226 * irq_domain_free_irqs_top - Clear handler and handler data, clear irqdata and free parent
1227 * @domain:     Interrupt domain to match
1228 * @virq:       IRQ number to start with
1229 * @nr_irqs:    The number of irqs to free
1230 */
1231void irq_domain_free_irqs_top(struct irq_domain *domain, unsigned int virq,
1232                              unsigned int nr_irqs)
1233{
1234        int i;
1235
1236        for (i = 0; i < nr_irqs; i++) {
1237                irq_set_handler_data(virq + i, NULL);
1238                irq_set_handler(virq + i, NULL);
1239        }
1240        irq_domain_free_irqs_common(domain, virq, nr_irqs);
1241}
1242
1243static void irq_domain_free_irqs_hierarchy(struct irq_domain *domain,
1244                                           unsigned int irq_base,
1245                                           unsigned int nr_irqs)
1246{
1247        if (domain->ops->free)
1248                domain->ops->free(domain, irq_base, nr_irqs);
1249}
1250
1251int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
1252                                    unsigned int irq_base,
1253                                    unsigned int nr_irqs, void *arg)
1254{
1255        return domain->ops->alloc(domain, irq_base, nr_irqs, arg);
1256}
1257
1258/**
1259 * __irq_domain_alloc_irqs - Allocate IRQs from domain
1260 * @domain:     domain to allocate from
1261 * @irq_base:   allocate specified IRQ nubmer if irq_base >= 0
1262 * @nr_irqs:    number of IRQs to allocate
1263 * @node:       NUMA node id for memory allocation
1264 * @arg:        domain specific argument
1265 * @realloc:    IRQ descriptors have already been allocated if true
1266 * @affinity:   Optional irq affinity mask for multiqueue devices
1267 *
1268 * Allocate IRQ numbers and initialized all data structures to support
1269 * hierarchy IRQ domains.
1270 * Parameter @realloc is mainly to support legacy IRQs.
1271 * Returns error code or allocated IRQ number
1272 *
1273 * The whole process to setup an IRQ has been split into two steps.
1274 * The first step, __irq_domain_alloc_irqs(), is to allocate IRQ
1275 * descriptor and required hardware resources. The second step,
1276 * irq_domain_activate_irq(), is to program hardwares with preallocated
1277 * resources. In this way, it's easier to rollback when failing to
1278 * allocate resources.
1279 */
1280int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
1281                            unsigned int nr_irqs, int node, void *arg,
1282                            bool realloc, const struct cpumask *affinity)
1283{
1284        int i, ret, virq;
1285
1286        if (domain == NULL) {
1287                domain = irq_default_domain;
1288                if (WARN(!domain, "domain is NULL; cannot allocate IRQ\n"))
1289                        return -EINVAL;
1290        }
1291
1292        if (!domain->ops->alloc) {
1293                pr_debug("domain->ops->alloc() is NULL\n");
1294                return -ENOSYS;
1295        }
1296
1297        if (realloc && irq_base >= 0) {
1298                virq = irq_base;
1299        } else {
1300                virq = irq_domain_alloc_descs(irq_base, nr_irqs, 0, node,
1301                                              affinity);
1302                if (virq < 0) {
1303                        pr_debug("cannot allocate IRQ(base %d, count %d)\n",
1304                                 irq_base, nr_irqs);
1305                        return virq;
1306                }
1307        }
1308
1309        if (irq_domain_alloc_irq_data(domain, virq, nr_irqs)) {
1310                pr_debug("cannot allocate memory for IRQ%d\n", virq);
1311                ret = -ENOMEM;
1312                goto out_free_desc;
1313        }
1314
1315        mutex_lock(&irq_domain_mutex);
1316        ret = irq_domain_alloc_irqs_hierarchy(domain, virq, nr_irqs, arg);
1317        if (ret < 0) {
1318                mutex_unlock(&irq_domain_mutex);
1319                goto out_free_irq_data;
1320        }
1321        for (i = 0; i < nr_irqs; i++)
1322                irq_domain_insert_irq(virq + i);
1323        mutex_unlock(&irq_domain_mutex);
1324
1325        return virq;
1326
1327out_free_irq_data:
1328        irq_domain_free_irq_data(virq, nr_irqs);
1329out_free_desc:
1330        irq_free_descs(virq, nr_irqs);
1331        return ret;
1332}
1333
1334/* The irq_data was moved, fix the revmap to refer to the new location */
1335static void irq_domain_fix_revmap(struct irq_data *d)
1336{
1337        void __rcu **slot;
1338
1339        if (d->hwirq < d->domain->revmap_size)
1340                return; /* Not using radix tree. */
1341
1342        /* Fix up the revmap. */
1343        mutex_lock(&d->domain->revmap_tree_mutex);
1344        slot = radix_tree_lookup_slot(&d->domain->revmap_tree, d->hwirq);
1345        if (slot)
1346                radix_tree_replace_slot(&d->domain->revmap_tree, slot, d);
1347        mutex_unlock(&d->domain->revmap_tree_mutex);
1348}
1349
1350/**
1351 * irq_domain_push_irq() - Push a domain in to the top of a hierarchy.
1352 * @domain:     Domain to push.
1353 * @virq:       Irq to push the domain in to.
1354 * @arg:        Passed to the irq_domain_ops alloc() function.
1355 *
1356 * For an already existing irqdomain hierarchy, as might be obtained
1357 * via a call to pci_enable_msix(), add an additional domain to the
1358 * head of the processing chain.  Must be called before request_irq()
1359 * has been called.
1360 */
1361int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg)
1362{
1363        struct irq_data *child_irq_data;
1364        struct irq_data *root_irq_data = irq_get_irq_data(virq);
1365        struct irq_desc *desc;
1366        int rv = 0;
1367
1368        /*
1369         * Check that no action has been set, which indicates the virq
1370         * is in a state where this function doesn't have to deal with
1371         * races between interrupt handling and maintaining the
1372         * hierarchy.  This will catch gross misuse.  Attempting to
1373         * make the check race free would require holding locks across
1374         * calls to struct irq_domain_ops->alloc(), which could lead
1375         * to deadlock, so we just do a simple check before starting.
1376         */
1377        desc = irq_to_desc(virq);
1378        if (!desc)
1379                return -EINVAL;
1380        if (WARN_ON(desc->action))
1381                return -EBUSY;
1382
1383        if (domain == NULL)
1384                return -EINVAL;
1385
1386        if (WARN_ON(!irq_domain_is_hierarchy(domain)))
1387                return -EINVAL;
1388
1389        if (!root_irq_data)
1390                return -EINVAL;
1391
1392        if (domain->parent != root_irq_data->domain)
1393                return -EINVAL;
1394
1395        child_irq_data = kzalloc_node(sizeof(*child_irq_data), GFP_KERNEL,
1396                                      irq_data_get_node(root_irq_data));
1397        if (!child_irq_data)
1398                return -ENOMEM;
1399
1400        mutex_lock(&irq_domain_mutex);
1401
1402        /* Copy the original irq_data. */
1403        *child_irq_data = *root_irq_data;
1404
1405        /*
1406         * Overwrite the root_irq_data, which is embedded in struct
1407         * irq_desc, with values for this domain.
1408         */
1409        root_irq_data->parent_data = child_irq_data;
1410        root_irq_data->domain = domain;
1411        root_irq_data->mask = 0;
1412        root_irq_data->hwirq = 0;
1413        root_irq_data->chip = NULL;
1414        root_irq_data->chip_data = NULL;
1415
1416        /* May (probably does) set hwirq, chip, etc. */
1417        rv = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg);
1418        if (rv) {
1419                /* Restore the original irq_data. */
1420                *root_irq_data = *child_irq_data;
1421                goto error;
1422        }
1423
1424        irq_domain_fix_revmap(child_irq_data);
1425        irq_domain_set_mapping(domain, root_irq_data->hwirq, root_irq_data);
1426
1427error:
1428        mutex_unlock(&irq_domain_mutex);
1429
1430        return rv;
1431}
1432EXPORT_SYMBOL_GPL(irq_domain_push_irq);
1433
1434/**
1435 * irq_domain_pop_irq() - Remove a domain from the top of a hierarchy.
1436 * @domain:     Domain to remove.
1437 * @virq:       Irq to remove the domain from.
1438 *
1439 * Undo the effects of a call to irq_domain_push_irq().  Must be
1440 * called either before request_irq() or after free_irq().
1441 */
1442int irq_domain_pop_irq(struct irq_domain *domain, int virq)
1443{
1444        struct irq_data *root_irq_data = irq_get_irq_data(virq);
1445        struct irq_data *child_irq_data;
1446        struct irq_data *tmp_irq_data;
1447        struct irq_desc *desc;
1448
1449        /*
1450         * Check that no action is set, which indicates the virq is in
1451         * a state where this function doesn't have to deal with races
1452         * between interrupt handling and maintaining the hierarchy.
1453         * This will catch gross misuse.  Attempting to make the check
1454         * race free would require holding locks across calls to
1455         * struct irq_domain_ops->free(), which could lead to
1456         * deadlock, so we just do a simple check before starting.
1457         */
1458        desc = irq_to_desc(virq);
1459        if (!desc)
1460                return -EINVAL;
1461        if (WARN_ON(desc->action))
1462                return -EBUSY;
1463
1464        if (domain == NULL)
1465                return -EINVAL;
1466
1467        if (!root_irq_data)
1468                return -EINVAL;
1469
1470        tmp_irq_data = irq_domain_get_irq_data(domain, virq);
1471
1472        /* We can only "pop" if this domain is at the top of the list */
1473        if (WARN_ON(root_irq_data != tmp_irq_data))
1474                return -EINVAL;
1475
1476        if (WARN_ON(root_irq_data->domain != domain))
1477                return -EINVAL;
1478
1479        child_irq_data = root_irq_data->parent_data;
1480        if (WARN_ON(!child_irq_data))
1481                return -EINVAL;
1482
1483        mutex_lock(&irq_domain_mutex);
1484
1485        root_irq_data->parent_data = NULL;
1486
1487        irq_domain_clear_mapping(domain, root_irq_data->hwirq);
1488        irq_domain_free_irqs_hierarchy(domain, virq, 1);
1489
1490        /* Restore the original irq_data. */
1491        *root_irq_data = *child_irq_data;
1492
1493        irq_domain_fix_revmap(root_irq_data);
1494
1495        mutex_unlock(&irq_domain_mutex);
1496
1497        kfree(child_irq_data);
1498
1499        return 0;
1500}
1501EXPORT_SYMBOL_GPL(irq_domain_pop_irq);
1502
1503/**
1504 * irq_domain_free_irqs - Free IRQ number and associated data structures
1505 * @virq:       base IRQ number
1506 * @nr_irqs:    number of IRQs to free
1507 */
1508void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs)
1509{
1510        struct irq_data *data = irq_get_irq_data(virq);
1511        int i;
1512
1513        if (WARN(!data || !data->domain || !data->domain->ops->free,
1514                 "NULL pointer, cannot free irq\n"))
1515                return;
1516
1517        mutex_lock(&irq_domain_mutex);
1518        for (i = 0; i < nr_irqs; i++)
1519                irq_domain_remove_irq(virq + i);
1520        irq_domain_free_irqs_hierarchy(data->domain, virq, nr_irqs);
1521        mutex_unlock(&irq_domain_mutex);
1522
1523        irq_domain_free_irq_data(virq, nr_irqs);
1524        irq_free_descs(virq, nr_irqs);
1525}
1526
1527/**
1528 * irq_domain_alloc_irqs_parent - Allocate interrupts from parent domain
1529 * @irq_base:   Base IRQ number
1530 * @nr_irqs:    Number of IRQs to allocate
1531 * @arg:        Allocation data (arch/domain specific)
1532 *
1533 * Check whether the domain has been setup recursive. If not allocate
1534 * through the parent domain.
1535 */
1536int irq_domain_alloc_irqs_parent(struct irq_domain *domain,
1537                                 unsigned int irq_base, unsigned int nr_irqs,
1538                                 void *arg)
1539{
1540        if (!domain->parent)
1541                return -ENOSYS;
1542
1543        return irq_domain_alloc_irqs_hierarchy(domain->parent, irq_base,
1544                                               nr_irqs, arg);
1545}
1546EXPORT_SYMBOL_GPL(irq_domain_alloc_irqs_parent);
1547
1548/**
1549 * irq_domain_free_irqs_parent - Free interrupts from parent domain
1550 * @irq_base:   Base IRQ number
1551 * @nr_irqs:    Number of IRQs to free
1552 *
1553 * Check whether the domain has been setup recursive. If not free
1554 * through the parent domain.
1555 */
1556void irq_domain_free_irqs_parent(struct irq_domain *domain,
1557                                 unsigned int irq_base, unsigned int nr_irqs)
1558{
1559        if (!domain->parent)
1560                return;
1561
1562        irq_domain_free_irqs_hierarchy(domain->parent, irq_base, nr_irqs);
1563}
1564EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
1565
1566static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
1567{
1568        if (irq_data && irq_data->domain) {
1569                struct irq_domain *domain = irq_data->domain;
1570
1571                if (domain->ops->deactivate)
1572                        domain->ops->deactivate(domain, irq_data);
1573                if (irq_data->parent_data)
1574                        __irq_domain_deactivate_irq(irq_data->parent_data);
1575        }
1576}
1577
1578static int __irq_domain_activate_irq(struct irq_data *irqd, bool reserve)
1579{
1580        int ret = 0;
1581
1582        if (irqd && irqd->domain) {
1583                struct irq_domain *domain = irqd->domain;
1584
1585                if (irqd->parent_data)
1586                        ret = __irq_domain_activate_irq(irqd->parent_data,
1587                                                        reserve);
1588                if (!ret && domain->ops->activate) {
1589                        ret = domain->ops->activate(domain, irqd, reserve);
1590                        /* Rollback in case of error */
1591                        if (ret && irqd->parent_data)
1592                                __irq_domain_deactivate_irq(irqd->parent_data);
1593                }
1594        }
1595        return ret;
1596}
1597
1598/**
1599 * irq_domain_activate_irq - Call domain_ops->activate recursively to activate
1600 *                           interrupt
1601 * @irq_data:   Outermost irq_data associated with interrupt
1602 * @reserve:    If set only reserve an interrupt vector instead of assigning one
1603 *
1604 * This is the second step to call domain_ops->activate to program interrupt
1605 * controllers, so the interrupt could actually get delivered.
1606 */
1607int irq_domain_activate_irq(struct irq_data *irq_data, bool reserve)
1608{
1609        int ret = 0;
1610
1611        if (!irqd_is_activated(irq_data))
1612                ret = __irq_domain_activate_irq(irq_data, reserve);
1613        if (!ret)
1614                irqd_set_activated(irq_data);
1615        return ret;
1616}
1617
1618/**
1619 * irq_domain_deactivate_irq - Call domain_ops->deactivate recursively to
1620 *                             deactivate interrupt
1621 * @irq_data: outermost irq_data associated with interrupt
1622 *
1623 * It calls domain_ops->deactivate to program interrupt controllers to disable
1624 * interrupt delivery.
1625 */
1626void irq_domain_deactivate_irq(struct irq_data *irq_data)
1627{
1628        if (irqd_is_activated(irq_data)) {
1629                __irq_domain_deactivate_irq(irq_data);
1630                irqd_clr_activated(irq_data);
1631        }
1632}
1633
1634static void irq_domain_check_hierarchy(struct irq_domain *domain)
1635{
1636        /* Hierarchy irq_domains must implement callback alloc() */
1637        if (domain->ops->alloc)
1638                domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY;
1639}
1640
1641/**
1642 * irq_domain_hierarchical_is_msi_remap - Check if the domain or any
1643 * parent has MSI remapping support
1644 * @domain: domain pointer
1645 */
1646bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain)
1647{
1648        for (; domain; domain = domain->parent) {
1649                if (irq_domain_is_msi_remap(domain))
1650                        return true;
1651        }
1652        return false;
1653}
1654#else   /* CONFIG_IRQ_DOMAIN_HIERARCHY */
1655/**
1656 * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain
1657 * @domain:     domain to match
1658 * @virq:       IRQ number to get irq_data
1659 */
1660struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
1661                                         unsigned int virq)
1662{
1663        struct irq_data *irq_data = irq_get_irq_data(virq);
1664
1665        return (irq_data && irq_data->domain == domain) ? irq_data : NULL;
1666}
1667EXPORT_SYMBOL_GPL(irq_domain_get_irq_data);
1668
1669/**
1670 * irq_domain_set_info - Set the complete data for a @virq in @domain
1671 * @domain:             Interrupt domain to match
1672 * @virq:               IRQ number
1673 * @hwirq:              The hardware interrupt number
1674 * @chip:               The associated interrupt chip
1675 * @chip_data:          The associated interrupt chip data
1676 * @handler:            The interrupt flow handler
1677 * @handler_data:       The interrupt flow handler data
1678 * @handler_name:       The interrupt handler name
1679 */
1680void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
1681                         irq_hw_number_t hwirq, struct irq_chip *chip,
1682                         void *chip_data, irq_flow_handler_t handler,
1683                         void *handler_data, const char *handler_name)
1684{
1685        irq_set_chip_and_handler_name(virq, chip, handler, handler_name);
1686        irq_set_chip_data(virq, chip_data);
1687        irq_set_handler_data(virq, handler_data);
1688}
1689
1690static void irq_domain_check_hierarchy(struct irq_domain *domain)
1691{
1692}
1693#endif  /* CONFIG_IRQ_DOMAIN_HIERARCHY */
1694
1695#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
1696static struct dentry *domain_dir;
1697
1698static void
1699irq_domain_debug_show_one(struct seq_file *m, struct irq_domain *d, int ind)
1700{
1701        seq_printf(m, "%*sname:   %s\n", ind, "", d->name);
1702        seq_printf(m, "%*ssize:   %u\n", ind + 1, "",
1703                   d->revmap_size + d->revmap_direct_max_irq);
1704        seq_printf(m, "%*smapped: %u\n", ind + 1, "", d->mapcount);
1705        seq_printf(m, "%*sflags:  0x%08x\n", ind +1 , "", d->flags);
1706        if (d->ops && d->ops->debug_show)
1707                d->ops->debug_show(m, d, NULL, ind + 1);
1708#ifdef  CONFIG_IRQ_DOMAIN_HIERARCHY
1709        if (!d->parent)
1710                return;
1711        seq_printf(m, "%*sparent: %s\n", ind + 1, "", d->parent->name);
1712        irq_domain_debug_show_one(m, d->parent, ind + 4);
1713#endif
1714}
1715
1716static int irq_domain_debug_show(struct seq_file *m, void *p)
1717{
1718        struct irq_domain *d = m->private;
1719
1720        /* Default domain? Might be NULL */
1721        if (!d) {
1722                if (!irq_default_domain)
1723                        return 0;
1724                d = irq_default_domain;
1725        }
1726        irq_domain_debug_show_one(m, d, 0);
1727        return 0;
1728}
1729DEFINE_SHOW_ATTRIBUTE(irq_domain_debug);
1730
1731static void debugfs_add_domain_dir(struct irq_domain *d)
1732{
1733        if (!d->name || !domain_dir || d->debugfs_file)
1734                return;
1735        d->debugfs_file = debugfs_create_file(d->name, 0444, domain_dir, d,
1736                                              &irq_domain_debug_fops);
1737}
1738
1739static void debugfs_remove_domain_dir(struct irq_domain *d)
1740{
1741        debugfs_remove(d->debugfs_file);
1742}
1743
1744void __init irq_domain_debugfs_init(struct dentry *root)
1745{
1746        struct irq_domain *d;
1747
1748        domain_dir = debugfs_create_dir("domains", root);
1749        if (!domain_dir)
1750                return;
1751
1752        debugfs_create_file("default", 0444, domain_dir, NULL,
1753                            &irq_domain_debug_fops);
1754        mutex_lock(&irq_domain_mutex);
1755        list_for_each_entry(d, &irq_domain_list, link)
1756                debugfs_add_domain_dir(d);
1757        mutex_unlock(&irq_domain_mutex);
1758}
1759#endif
1760