linux/kernel/irq/irqdomain.c
<<
>>
Prefs
   1#define pr_fmt(fmt)  "irq: " fmt
   2
   3#include <linux/acpi.h>
   4#include <linux/debugfs.h>
   5#include <linux/hardirq.h>
   6#include <linux/interrupt.h>
   7#include <linux/irq.h>
   8#include <linux/irqdesc.h>
   9#include <linux/irqdomain.h>
  10#include <linux/module.h>
  11#include <linux/mutex.h>
  12#include <linux/of.h>
  13#include <linux/of_address.h>
  14#include <linux/of_irq.h>
  15#include <linux/topology.h>
  16#include <linux/seq_file.h>
  17#include <linux/slab.h>
  18#include <linux/smp.h>
  19#include <linux/fs.h>
  20
  21static LIST_HEAD(irq_domain_list);
  22static DEFINE_MUTEX(irq_domain_mutex);
  23
  24static DEFINE_MUTEX(revmap_trees_mutex);
  25static struct irq_domain *irq_default_domain;
  26
  27static void irq_domain_check_hierarchy(struct irq_domain *domain);
  28
  29struct irqchip_fwid {
  30        struct fwnode_handle    fwnode;
  31        unsigned int            type;
  32        char                    *name;
  33        void *data;
  34};
  35
  36#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
  37static void debugfs_add_domain_dir(struct irq_domain *d);
  38static void debugfs_remove_domain_dir(struct irq_domain *d);
  39#else
  40static inline void debugfs_add_domain_dir(struct irq_domain *d) { }
  41static inline void debugfs_remove_domain_dir(struct irq_domain *d) { }
  42#endif
  43
  44const struct fwnode_operations irqchip_fwnode_ops;
  45EXPORT_SYMBOL_GPL(irqchip_fwnode_ops);
  46
  47/**
  48 * irq_domain_alloc_fwnode - Allocate a fwnode_handle suitable for
  49 *                           identifying an irq domain
  50 * @type:       Type of irqchip_fwnode. See linux/irqdomain.h
  51 * @name:       Optional user provided domain name
  52 * @id:         Optional user provided id if name != NULL
  53 * @data:       Optional user-provided data
  54 *
  55 * Allocate a struct irqchip_fwid, and return a poiner to the embedded
  56 * fwnode_handle (or NULL on failure).
  57 *
  58 * Note: The types IRQCHIP_FWNODE_NAMED and IRQCHIP_FWNODE_NAMED_ID are
  59 * solely to transport name information to irqdomain creation code. The
  60 * node is not stored. For other types the pointer is kept in the irq
  61 * domain struct.
  62 */
  63struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id,
  64                                                const char *name, void *data)
  65{
  66        struct irqchip_fwid *fwid;
  67        char *n;
  68
  69        fwid = kzalloc(sizeof(*fwid), GFP_KERNEL);
  70
  71        switch (type) {
  72        case IRQCHIP_FWNODE_NAMED:
  73                n = kasprintf(GFP_KERNEL, "%s", name);
  74                break;
  75        case IRQCHIP_FWNODE_NAMED_ID:
  76                n = kasprintf(GFP_KERNEL, "%s-%d", name, id);
  77                break;
  78        default:
  79                n = kasprintf(GFP_KERNEL, "irqchip@%p", data);
  80                break;
  81        }
  82
  83        if (!fwid || !n) {
  84                kfree(fwid);
  85                kfree(n);
  86                return NULL;
  87        }
  88
  89        fwid->type = type;
  90        fwid->name = n;
  91        fwid->data = data;
  92        fwid->fwnode.ops = &irqchip_fwnode_ops;
  93        return &fwid->fwnode;
  94}
  95EXPORT_SYMBOL_GPL(__irq_domain_alloc_fwnode);
  96
  97/**
  98 * irq_domain_free_fwnode - Free a non-OF-backed fwnode_handle
  99 *
 100 * Free a fwnode_handle allocated with irq_domain_alloc_fwnode.
 101 */
 102void irq_domain_free_fwnode(struct fwnode_handle *fwnode)
 103{
 104        struct irqchip_fwid *fwid;
 105
 106        if (WARN_ON(!is_fwnode_irqchip(fwnode)))
 107                return;
 108
 109        fwid = container_of(fwnode, struct irqchip_fwid, fwnode);
 110        kfree(fwid->name);
 111        kfree(fwid);
 112}
 113EXPORT_SYMBOL_GPL(irq_domain_free_fwnode);
 114
 115/**
 116 * __irq_domain_add() - Allocate a new irq_domain data structure
 117 * @fwnode: firmware node for the interrupt controller
 118 * @size: Size of linear map; 0 for radix mapping only
 119 * @hwirq_max: Maximum number of interrupts supported by controller
 120 * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no
 121 *              direct mapping
 122 * @ops: domain callbacks
 123 * @host_data: Controller private data pointer
 124 *
 125 * Allocates and initialize and irq_domain structure.
 126 * Returns pointer to IRQ domain, or NULL on failure.
 127 */
 128struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
 129                                    irq_hw_number_t hwirq_max, int direct_max,
 130                                    const struct irq_domain_ops *ops,
 131                                    void *host_data)
 132{
 133        struct device_node *of_node = to_of_node(fwnode);
 134        struct irqchip_fwid *fwid;
 135        struct irq_domain *domain;
 136
 137        static atomic_t unknown_domains;
 138
 139        domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size),
 140                              GFP_KERNEL, of_node_to_nid(of_node));
 141        if (WARN_ON(!domain))
 142                return NULL;
 143
 144        if (fwnode && is_fwnode_irqchip(fwnode)) {
 145                fwid = container_of(fwnode, struct irqchip_fwid, fwnode);
 146
 147                switch (fwid->type) {
 148                case IRQCHIP_FWNODE_NAMED:
 149                case IRQCHIP_FWNODE_NAMED_ID:
 150                        domain->name = kstrdup(fwid->name, GFP_KERNEL);
 151                        if (!domain->name) {
 152                                kfree(domain);
 153                                return NULL;
 154                        }
 155                        domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
 156                        break;
 157                default:
 158                        domain->fwnode = fwnode;
 159                        domain->name = fwid->name;
 160                        break;
 161                }
 162#ifdef CONFIG_ACPI
 163        } else if (is_acpi_device_node(fwnode)) {
 164                struct acpi_buffer buf = {
 165                        .length = ACPI_ALLOCATE_BUFFER,
 166                };
 167                acpi_handle handle;
 168
 169                handle = acpi_device_handle(to_acpi_device_node(fwnode));
 170                if (acpi_get_name(handle, ACPI_FULL_PATHNAME, &buf) == AE_OK) {
 171                        domain->name = buf.pointer;
 172                        domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
 173                }
 174
 175                domain->fwnode = fwnode;
 176#endif
 177        } else if (of_node) {
 178                char *name;
 179
 180                /*
 181                 * DT paths contain '/', which debugfs is legitimately
 182                 * unhappy about. Replace them with ':', which does
 183                 * the trick and is not as offensive as '\'...
 184                 */
 185                name = kstrdup(of_node_full_name(of_node), GFP_KERNEL);
 186                if (!name) {
 187                        kfree(domain);
 188                        return NULL;
 189                }
 190
 191                strreplace(name, '/', ':');
 192
 193                domain->name = name;
 194                domain->fwnode = fwnode;
 195                domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
 196        }
 197
 198        if (!domain->name) {
 199                if (fwnode)
 200                        pr_err("Invalid fwnode type for irqdomain\n");
 201                domain->name = kasprintf(GFP_KERNEL, "unknown-%d",
 202                                         atomic_inc_return(&unknown_domains));
 203                if (!domain->name) {
 204                        kfree(domain);
 205                        return NULL;
 206                }
 207                domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
 208        }
 209
 210        of_node_get(of_node);
 211
 212        /* Fill structure */
 213        INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
 214        domain->ops = ops;
 215        domain->host_data = host_data;
 216        domain->hwirq_max = hwirq_max;
 217        domain->revmap_size = size;
 218        domain->revmap_direct_max_irq = direct_max;
 219        irq_domain_check_hierarchy(domain);
 220
 221        mutex_lock(&irq_domain_mutex);
 222        debugfs_add_domain_dir(domain);
 223        list_add(&domain->link, &irq_domain_list);
 224        mutex_unlock(&irq_domain_mutex);
 225
 226        pr_debug("Added domain %s\n", domain->name);
 227        return domain;
 228}
 229EXPORT_SYMBOL_GPL(__irq_domain_add);
 230
 231/**
 232 * irq_domain_remove() - Remove an irq domain.
 233 * @domain: domain to remove
 234 *
 235 * This routine is used to remove an irq domain. The caller must ensure
 236 * that all mappings within the domain have been disposed of prior to
 237 * use, depending on the revmap type.
 238 */
 239void irq_domain_remove(struct irq_domain *domain)
 240{
 241        mutex_lock(&irq_domain_mutex);
 242        debugfs_remove_domain_dir(domain);
 243
 244        WARN_ON(!radix_tree_empty(&domain->revmap_tree));
 245
 246        list_del(&domain->link);
 247
 248        /*
 249         * If the going away domain is the default one, reset it.
 250         */
 251        if (unlikely(irq_default_domain == domain))
 252                irq_set_default_host(NULL);
 253
 254        mutex_unlock(&irq_domain_mutex);
 255
 256        pr_debug("Removed domain %s\n", domain->name);
 257
 258        of_node_put(irq_domain_get_of_node(domain));
 259        if (domain->flags & IRQ_DOMAIN_NAME_ALLOCATED)
 260                kfree(domain->name);
 261        kfree(domain);
 262}
 263EXPORT_SYMBOL_GPL(irq_domain_remove);
 264
 265void irq_domain_update_bus_token(struct irq_domain *domain,
 266                                 enum irq_domain_bus_token bus_token)
 267{
 268        char *name;
 269
 270        if (domain->bus_token == bus_token)
 271                return;
 272
 273        mutex_lock(&irq_domain_mutex);
 274
 275        domain->bus_token = bus_token;
 276
 277        name = kasprintf(GFP_KERNEL, "%s-%d", domain->name, bus_token);
 278        if (!name) {
 279                mutex_unlock(&irq_domain_mutex);
 280                return;
 281        }
 282
 283        debugfs_remove_domain_dir(domain);
 284
 285        if (domain->flags & IRQ_DOMAIN_NAME_ALLOCATED)
 286                kfree(domain->name);
 287        else
 288                domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
 289
 290        domain->name = name;
 291        debugfs_add_domain_dir(domain);
 292
 293        mutex_unlock(&irq_domain_mutex);
 294}
 295
 296/**
 297 * irq_domain_add_simple() - Register an irq_domain and optionally map a range of irqs
 298 * @of_node: pointer to interrupt controller's device tree node.
 299 * @size: total number of irqs in mapping
 300 * @first_irq: first number of irq block assigned to the domain,
 301 *      pass zero to assign irqs on-the-fly. If first_irq is non-zero, then
 302 *      pre-map all of the irqs in the domain to virqs starting at first_irq.
 303 * @ops: domain callbacks
 304 * @host_data: Controller private data pointer
 305 *
 306 * Allocates an irq_domain, and optionally if first_irq is positive then also
 307 * allocate irq_descs and map all of the hwirqs to virqs starting at first_irq.
 308 *
 309 * This is intended to implement the expected behaviour for most
 310 * interrupt controllers. If device tree is used, then first_irq will be 0 and
 311 * irqs get mapped dynamically on the fly. However, if the controller requires
 312 * static virq assignments (non-DT boot) then it will set that up correctly.
 313 */
 314struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
 315                                         unsigned int size,
 316                                         unsigned int first_irq,
 317                                         const struct irq_domain_ops *ops,
 318                                         void *host_data)
 319{
 320        struct irq_domain *domain;
 321
 322        domain = __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data);
 323        if (!domain)
 324                return NULL;
 325
 326        if (first_irq > 0) {
 327                if (IS_ENABLED(CONFIG_SPARSE_IRQ)) {
 328                        /* attempt to allocated irq_descs */
 329                        int rc = irq_alloc_descs(first_irq, first_irq, size,
 330                                                 of_node_to_nid(of_node));
 331                        if (rc < 0)
 332                                pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
 333                                        first_irq);
 334                }
 335                irq_domain_associate_many(domain, first_irq, 0, size);
 336        }
 337
 338        return domain;
 339}
 340EXPORT_SYMBOL_GPL(irq_domain_add_simple);
 341
 342/**
 343 * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain.
 344 * @of_node: pointer to interrupt controller's device tree node.
 345 * @size: total number of irqs in legacy mapping
 346 * @first_irq: first number of irq block assigned to the domain
 347 * @first_hwirq: first hwirq number to use for the translation. Should normally
 348 *               be '0', but a positive integer can be used if the effective
 349 *               hwirqs numbering does not begin at zero.
 350 * @ops: map/unmap domain callbacks
 351 * @host_data: Controller private data pointer
 352 *
 353 * Note: the map() callback will be called before this function returns
 354 * for all legacy interrupts except 0 (which is always the invalid irq for
 355 * a legacy controller).
 356 */
 357struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
 358                                         unsigned int size,
 359                                         unsigned int first_irq,
 360                                         irq_hw_number_t first_hwirq,
 361                                         const struct irq_domain_ops *ops,
 362                                         void *host_data)
 363{
 364        struct irq_domain *domain;
 365
 366        domain = __irq_domain_add(of_node_to_fwnode(of_node), first_hwirq + size,
 367                                  first_hwirq + size, 0, ops, host_data);
 368        if (domain)
 369                irq_domain_associate_many(domain, first_irq, first_hwirq, size);
 370
 371        return domain;
 372}
 373EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
 374
 375/**
 376 * irq_find_matching_fwspec() - Locates a domain for a given fwspec
 377 * @fwspec: FW specifier for an interrupt
 378 * @bus_token: domain-specific data
 379 */
 380struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
 381                                            enum irq_domain_bus_token bus_token)
 382{
 383        struct irq_domain *h, *found = NULL;
 384        struct fwnode_handle *fwnode = fwspec->fwnode;
 385        int rc;
 386
 387        /* We might want to match the legacy controller last since
 388         * it might potentially be set to match all interrupts in
 389         * the absence of a device node. This isn't a problem so far
 390         * yet though...
 391         *
 392         * bus_token == DOMAIN_BUS_ANY matches any domain, any other
 393         * values must generate an exact match for the domain to be
 394         * selected.
 395         */
 396        mutex_lock(&irq_domain_mutex);
 397        list_for_each_entry(h, &irq_domain_list, link) {
 398                if (h->ops->select && fwspec->param_count)
 399                        rc = h->ops->select(h, fwspec, bus_token);
 400                else if (h->ops->match)
 401                        rc = h->ops->match(h, to_of_node(fwnode), bus_token);
 402                else
 403                        rc = ((fwnode != NULL) && (h->fwnode == fwnode) &&
 404                              ((bus_token == DOMAIN_BUS_ANY) ||
 405                               (h->bus_token == bus_token)));
 406
 407                if (rc) {
 408                        found = h;
 409                        break;
 410                }
 411        }
 412        mutex_unlock(&irq_domain_mutex);
 413        return found;
 414}
 415EXPORT_SYMBOL_GPL(irq_find_matching_fwspec);
 416
 417/**
 418 * irq_domain_check_msi_remap - Check whether all MSI irq domains implement
 419 * IRQ remapping
 420 *
 421 * Return: false if any MSI irq domain does not support IRQ remapping,
 422 * true otherwise (including if there is no MSI irq domain)
 423 */
 424bool irq_domain_check_msi_remap(void)
 425{
 426        struct irq_domain *h;
 427        bool ret = true;
 428
 429        mutex_lock(&irq_domain_mutex);
 430        list_for_each_entry(h, &irq_domain_list, link) {
 431                if (irq_domain_is_msi(h) &&
 432                    !irq_domain_hierarchical_is_msi_remap(h)) {
 433                        ret = false;
 434                        break;
 435                }
 436        }
 437        mutex_unlock(&irq_domain_mutex);
 438        return ret;
 439}
 440EXPORT_SYMBOL_GPL(irq_domain_check_msi_remap);
 441
 442/**
 443 * irq_set_default_host() - Set a "default" irq domain
 444 * @domain: default domain pointer
 445 *
 446 * For convenience, it's possible to set a "default" domain that will be used
 447 * whenever NULL is passed to irq_create_mapping(). It makes life easier for
 448 * platforms that want to manipulate a few hard coded interrupt numbers that
 449 * aren't properly represented in the device-tree.
 450 */
 451void irq_set_default_host(struct irq_domain *domain)
 452{
 453        pr_debug("Default domain set to @0x%p\n", domain);
 454
 455        irq_default_domain = domain;
 456}
 457EXPORT_SYMBOL_GPL(irq_set_default_host);
 458
 459static void irq_domain_clear_mapping(struct irq_domain *domain,
 460                                     irq_hw_number_t hwirq)
 461{
 462        if (hwirq < domain->revmap_size) {
 463                domain->linear_revmap[hwirq] = 0;
 464        } else {
 465                mutex_lock(&revmap_trees_mutex);
 466                radix_tree_delete(&domain->revmap_tree, hwirq);
 467                mutex_unlock(&revmap_trees_mutex);
 468        }
 469}
 470
 471static void irq_domain_set_mapping(struct irq_domain *domain,
 472                                   irq_hw_number_t hwirq,
 473                                   struct irq_data *irq_data)
 474{
 475        if (hwirq < domain->revmap_size) {
 476                domain->linear_revmap[hwirq] = irq_data->irq;
 477        } else {
 478                mutex_lock(&revmap_trees_mutex);
 479                radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
 480                mutex_unlock(&revmap_trees_mutex);
 481        }
 482}
 483
 484void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
 485{
 486        struct irq_data *irq_data = irq_get_irq_data(irq);
 487        irq_hw_number_t hwirq;
 488
 489        if (WARN(!irq_data || irq_data->domain != domain,
 490                 "virq%i doesn't exist; cannot disassociate\n", irq))
 491                return;
 492
 493        hwirq = irq_data->hwirq;
 494        irq_set_status_flags(irq, IRQ_NOREQUEST);
 495
 496        /* remove chip and handler */
 497        irq_set_chip_and_handler(irq, NULL, NULL);
 498
 499        /* Make sure it's completed */
 500        synchronize_irq(irq);
 501
 502        /* Tell the PIC about it */
 503        if (domain->ops->unmap)
 504                domain->ops->unmap(domain, irq);
 505        smp_mb();
 506
 507        irq_data->domain = NULL;
 508        irq_data->hwirq = 0;
 509        domain->mapcount--;
 510
 511        /* Clear reverse map for this hwirq */
 512        irq_domain_clear_mapping(domain, hwirq);
 513}
 514
 515int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
 516                         irq_hw_number_t hwirq)
 517{
 518        struct irq_data *irq_data = irq_get_irq_data(virq);
 519        int ret;
 520
 521        if (WARN(hwirq >= domain->hwirq_max,
 522                 "error: hwirq 0x%x is too large for %s\n", (int)hwirq, domain->name))
 523                return -EINVAL;
 524        if (WARN(!irq_data, "error: virq%i is not allocated", virq))
 525                return -EINVAL;
 526        if (WARN(irq_data->domain, "error: virq%i is already associated", virq))
 527                return -EINVAL;
 528
 529        mutex_lock(&irq_domain_mutex);
 530        irq_data->hwirq = hwirq;
 531        irq_data->domain = domain;
 532        if (domain->ops->map) {
 533                ret = domain->ops->map(domain, virq, hwirq);
 534                if (ret != 0) {
 535                        /*
 536                         * If map() returns -EPERM, this interrupt is protected
 537                         * by the firmware or some other service and shall not
 538                         * be mapped. Don't bother telling the user about it.
 539                         */
 540                        if (ret != -EPERM) {
 541                                pr_info("%s didn't like hwirq-0x%lx to VIRQ%i mapping (rc=%d)\n",
 542                                       domain->name, hwirq, virq, ret);
 543                        }
 544                        irq_data->domain = NULL;
 545                        irq_data->hwirq = 0;
 546                        mutex_unlock(&irq_domain_mutex);
 547                        return ret;
 548                }
 549
 550                /* If not already assigned, give the domain the chip's name */
 551                if (!domain->name && irq_data->chip)
 552                        domain->name = irq_data->chip->name;
 553        }
 554
 555        domain->mapcount++;
 556        irq_domain_set_mapping(domain, hwirq, irq_data);
 557        mutex_unlock(&irq_domain_mutex);
 558
 559        irq_clear_status_flags(virq, IRQ_NOREQUEST);
 560
 561        return 0;
 562}
 563EXPORT_SYMBOL_GPL(irq_domain_associate);
 564
 565void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
 566                               irq_hw_number_t hwirq_base, int count)
 567{
 568        struct device_node *of_node;
 569        int i;
 570
 571        of_node = irq_domain_get_of_node(domain);
 572        pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__,
 573                of_node_full_name(of_node), irq_base, (int)hwirq_base, count);
 574
 575        for (i = 0; i < count; i++) {
 576                irq_domain_associate(domain, irq_base + i, hwirq_base + i);
 577        }
 578}
 579EXPORT_SYMBOL_GPL(irq_domain_associate_many);
 580
 581/**
 582 * irq_create_direct_mapping() - Allocate an irq for direct mapping
 583 * @domain: domain to allocate the irq for or NULL for default domain
 584 *
 585 * This routine is used for irq controllers which can choose the hardware
 586 * interrupt numbers they generate. In such a case it's simplest to use
 587 * the linux irq as the hardware interrupt number. It still uses the linear
 588 * or radix tree to store the mapping, but the irq controller can optimize
 589 * the revmap path by using the hwirq directly.
 590 */
 591unsigned int irq_create_direct_mapping(struct irq_domain *domain)
 592{
 593        struct device_node *of_node;
 594        unsigned int virq;
 595
 596        if (domain == NULL)
 597                domain = irq_default_domain;
 598
 599        of_node = irq_domain_get_of_node(domain);
 600        virq = irq_alloc_desc_from(1, of_node_to_nid(of_node));
 601        if (!virq) {
 602                pr_debug("create_direct virq allocation failed\n");
 603                return 0;
 604        }
 605        if (virq >= domain->revmap_direct_max_irq) {
 606                pr_err("ERROR: no free irqs available below %i maximum\n",
 607                        domain->revmap_direct_max_irq);
 608                irq_free_desc(virq);
 609                return 0;
 610        }
 611        pr_debug("create_direct obtained virq %d\n", virq);
 612
 613        if (irq_domain_associate(domain, virq, virq)) {
 614                irq_free_desc(virq);
 615                return 0;
 616        }
 617
 618        return virq;
 619}
 620EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
 621
 622/**
 623 * irq_create_mapping() - Map a hardware interrupt into linux irq space
 624 * @domain: domain owning this hardware interrupt or NULL for default domain
 625 * @hwirq: hardware irq number in that domain space
 626 *
 627 * Only one mapping per hardware interrupt is permitted. Returns a linux
 628 * irq number.
 629 * If the sense/trigger is to be specified, set_irq_type() should be called
 630 * on the number returned from that call.
 631 */
 632unsigned int irq_create_mapping(struct irq_domain *domain,
 633                                irq_hw_number_t hwirq)
 634{
 635        struct device_node *of_node;
 636        int virq;
 637
 638        pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
 639
 640        /* Look for default domain if nececssary */
 641        if (domain == NULL)
 642                domain = irq_default_domain;
 643        if (domain == NULL) {
 644                WARN(1, "%s(, %lx) called with NULL domain\n", __func__, hwirq);
 645                return 0;
 646        }
 647        pr_debug("-> using domain @%p\n", domain);
 648
 649        of_node = irq_domain_get_of_node(domain);
 650
 651        /* Check if mapping already exists */
 652        virq = irq_find_mapping(domain, hwirq);
 653        if (virq) {
 654                pr_debug("-> existing mapping on virq %d\n", virq);
 655                return virq;
 656        }
 657
 658        /* Allocate a virtual interrupt number */
 659        virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node), NULL);
 660        if (virq <= 0) {
 661                pr_debug("-> virq allocation failed\n");
 662                return 0;
 663        }
 664
 665        if (irq_domain_associate(domain, virq, hwirq)) {
 666                irq_free_desc(virq);
 667                return 0;
 668        }
 669
 670        pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
 671                hwirq, of_node_full_name(of_node), virq);
 672
 673        return virq;
 674}
 675EXPORT_SYMBOL_GPL(irq_create_mapping);
 676
 677/**
 678 * irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs
 679 * @domain: domain owning the interrupt range
 680 * @irq_base: beginning of linux IRQ range
 681 * @hwirq_base: beginning of hardware IRQ range
 682 * @count: Number of interrupts to map
 683 *
 684 * This routine is used for allocating and mapping a range of hardware
 685 * irqs to linux irqs where the linux irq numbers are at pre-defined
 686 * locations. For use by controllers that already have static mappings
 687 * to insert in to the domain.
 688 *
 689 * Non-linear users can use irq_create_identity_mapping() for IRQ-at-a-time
 690 * domain insertion.
 691 *
 692 * 0 is returned upon success, while any failure to establish a static
 693 * mapping is treated as an error.
 694 */
 695int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base,
 696                               irq_hw_number_t hwirq_base, int count)
 697{
 698        struct device_node *of_node;
 699        int ret;
 700
 701        of_node = irq_domain_get_of_node(domain);
 702        ret = irq_alloc_descs(irq_base, irq_base, count,
 703                              of_node_to_nid(of_node));
 704        if (unlikely(ret < 0))
 705                return ret;
 706
 707        irq_domain_associate_many(domain, irq_base, hwirq_base, count);
 708        return 0;
 709}
 710EXPORT_SYMBOL_GPL(irq_create_strict_mappings);
 711
 712static int irq_domain_translate(struct irq_domain *d,
 713                                struct irq_fwspec *fwspec,
 714                                irq_hw_number_t *hwirq, unsigned int *type)
 715{
 716#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
 717        if (d->ops->translate)
 718                return d->ops->translate(d, fwspec, hwirq, type);
 719#endif
 720        if (d->ops->xlate)
 721                return d->ops->xlate(d, to_of_node(fwspec->fwnode),
 722                                     fwspec->param, fwspec->param_count,
 723                                     hwirq, type);
 724
 725        /* If domain has no translation, then we assume interrupt line */
 726        *hwirq = fwspec->param[0];
 727        return 0;
 728}
 729
 730static void of_phandle_args_to_fwspec(struct of_phandle_args *irq_data,
 731                                      struct irq_fwspec *fwspec)
 732{
 733        int i;
 734
 735        fwspec->fwnode = irq_data->np ? &irq_data->np->fwnode : NULL;
 736        fwspec->param_count = irq_data->args_count;
 737
 738        for (i = 0; i < irq_data->args_count; i++)
 739                fwspec->param[i] = irq_data->args[i];
 740}
 741
 742unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
 743{
 744        struct irq_domain *domain;
 745        struct irq_data *irq_data;
 746        irq_hw_number_t hwirq;
 747        unsigned int type = IRQ_TYPE_NONE;
 748        int virq;
 749
 750        if (fwspec->fwnode) {
 751                domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_WIRED);
 752                if (!domain)
 753                        domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_ANY);
 754        } else {
 755                domain = irq_default_domain;
 756        }
 757
 758        if (!domain) {
 759                pr_warn("no irq domain found for %s !\n",
 760                        of_node_full_name(to_of_node(fwspec->fwnode)));
 761                return 0;
 762        }
 763
 764        if (irq_domain_translate(domain, fwspec, &hwirq, &type))
 765                return 0;
 766
 767        /*
 768         * WARN if the irqchip returns a type with bits
 769         * outside the sense mask set and clear these bits.
 770         */
 771        if (WARN_ON(type & ~IRQ_TYPE_SENSE_MASK))
 772                type &= IRQ_TYPE_SENSE_MASK;
 773
 774        /*
 775         * If we've already configured this interrupt,
 776         * don't do it again, or hell will break loose.
 777         */
 778        virq = irq_find_mapping(domain, hwirq);
 779        if (virq) {
 780                /*
 781                 * If the trigger type is not specified or matches the
 782                 * current trigger type then we are done so return the
 783                 * interrupt number.
 784                 */
 785                if (type == IRQ_TYPE_NONE || type == irq_get_trigger_type(virq))
 786                        return virq;
 787
 788                /*
 789                 * If the trigger type has not been set yet, then set
 790                 * it now and return the interrupt number.
 791                 */
 792                if (irq_get_trigger_type(virq) == IRQ_TYPE_NONE) {
 793                        irq_data = irq_get_irq_data(virq);
 794                        if (!irq_data)
 795                                return 0;
 796
 797                        irqd_set_trigger_type(irq_data, type);
 798                        return virq;
 799                }
 800
 801                pr_warn("type mismatch, failed to map hwirq-%lu for %s!\n",
 802                        hwirq, of_node_full_name(to_of_node(fwspec->fwnode)));
 803                return 0;
 804        }
 805
 806        if (irq_domain_is_hierarchy(domain)) {
 807                virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, fwspec);
 808                if (virq <= 0)
 809                        return 0;
 810        } else {
 811                /* Create mapping */
 812                virq = irq_create_mapping(domain, hwirq);
 813                if (!virq)
 814                        return virq;
 815        }
 816
 817        irq_data = irq_get_irq_data(virq);
 818        if (!irq_data) {
 819                if (irq_domain_is_hierarchy(domain))
 820                        irq_domain_free_irqs(virq, 1);
 821                else
 822                        irq_dispose_mapping(virq);
 823                return 0;
 824        }
 825
 826        /* Store trigger type */
 827        irqd_set_trigger_type(irq_data, type);
 828
 829        return virq;
 830}
 831EXPORT_SYMBOL_GPL(irq_create_fwspec_mapping);
 832
 833unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data)
 834{
 835        struct irq_fwspec fwspec;
 836
 837        of_phandle_args_to_fwspec(irq_data, &fwspec);
 838        return irq_create_fwspec_mapping(&fwspec);
 839}
 840EXPORT_SYMBOL_GPL(irq_create_of_mapping);
 841
 842/**
 843 * irq_dispose_mapping() - Unmap an interrupt
 844 * @virq: linux irq number of the interrupt to unmap
 845 */
 846void irq_dispose_mapping(unsigned int virq)
 847{
 848        struct irq_data *irq_data = irq_get_irq_data(virq);
 849        struct irq_domain *domain;
 850
 851        if (!virq || !irq_data)
 852                return;
 853
 854        domain = irq_data->domain;
 855        if (WARN_ON(domain == NULL))
 856                return;
 857
 858        if (irq_domain_is_hierarchy(domain)) {
 859                irq_domain_free_irqs(virq, 1);
 860        } else {
 861                irq_domain_disassociate(domain, virq);
 862                irq_free_desc(virq);
 863        }
 864}
 865EXPORT_SYMBOL_GPL(irq_dispose_mapping);
 866
 867/**
 868 * irq_find_mapping() - Find a linux irq from an hw irq number.
 869 * @domain: domain owning this hardware interrupt
 870 * @hwirq: hardware irq number in that domain space
 871 */
 872unsigned int irq_find_mapping(struct irq_domain *domain,
 873                              irq_hw_number_t hwirq)
 874{
 875        struct irq_data *data;
 876
 877        /* Look for default domain if nececssary */
 878        if (domain == NULL)
 879                domain = irq_default_domain;
 880        if (domain == NULL)
 881                return 0;
 882
 883        if (hwirq < domain->revmap_direct_max_irq) {
 884                data = irq_domain_get_irq_data(domain, hwirq);
 885                if (data && data->hwirq == hwirq)
 886                        return hwirq;
 887        }
 888
 889        /* Check if the hwirq is in the linear revmap. */
 890        if (hwirq < domain->revmap_size)
 891                return domain->linear_revmap[hwirq];
 892
 893        rcu_read_lock();
 894        data = radix_tree_lookup(&domain->revmap_tree, hwirq);
 895        rcu_read_unlock();
 896        return data ? data->irq : 0;
 897}
 898EXPORT_SYMBOL_GPL(irq_find_mapping);
 899
 900#ifdef CONFIG_IRQ_DOMAIN_DEBUG
 901static void virq_debug_show_one(struct seq_file *m, struct irq_desc *desc)
 902{
 903        struct irq_domain *domain;
 904        struct irq_data *data;
 905
 906        domain = desc->irq_data.domain;
 907        data = &desc->irq_data;
 908
 909        while (domain) {
 910                unsigned int irq = data->irq;
 911                unsigned long hwirq = data->hwirq;
 912                struct irq_chip *chip;
 913                bool direct;
 914
 915                if (data == &desc->irq_data)
 916                        seq_printf(m, "%5d  ", irq);
 917                else
 918                        seq_printf(m, "%5d+ ", irq);
 919                seq_printf(m, "0x%05lx  ", hwirq);
 920
 921                chip = irq_data_get_irq_chip(data);
 922                seq_printf(m, "%-15s  ", (chip && chip->name) ? chip->name : "none");
 923
 924                seq_printf(m, data ? "0x%p  " : "  %p  ",
 925                           irq_data_get_irq_chip_data(data));
 926
 927                seq_printf(m, "   %c    ", (desc->action && desc->action->handler) ? '*' : ' ');
 928                direct = (irq == hwirq) && (irq < domain->revmap_direct_max_irq);
 929                seq_printf(m, "%6s%-8s  ",
 930                           (hwirq < domain->revmap_size) ? "LINEAR" : "RADIX",
 931                           direct ? "(DIRECT)" : "");
 932                seq_printf(m, "%s\n", domain->name);
 933#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
 934                domain = domain->parent;
 935                data = data->parent_data;
 936#else
 937                domain = NULL;
 938#endif
 939        }
 940}
 941
 942static int virq_debug_show(struct seq_file *m, void *private)
 943{
 944        unsigned long flags;
 945        struct irq_desc *desc;
 946        struct irq_domain *domain;
 947        struct radix_tree_iter iter;
 948        void __rcu **slot;
 949        int i;
 950
 951        seq_printf(m, " %-16s  %-6s  %-10s  %-10s  %s\n",
 952                   "name", "mapped", "linear-max", "direct-max", "devtree-node");
 953        mutex_lock(&irq_domain_mutex);
 954        list_for_each_entry(domain, &irq_domain_list, link) {
 955                struct device_node *of_node;
 956                const char *name;
 957
 958                int count = 0;
 959
 960                of_node = irq_domain_get_of_node(domain);
 961                if (of_node)
 962                        name = of_node_full_name(of_node);
 963                else if (is_fwnode_irqchip(domain->fwnode))
 964                        name = container_of(domain->fwnode, struct irqchip_fwid,
 965                                            fwnode)->name;
 966                else
 967                        name = "";
 968
 969                radix_tree_for_each_slot(slot, &domain->revmap_tree, &iter, 0)
 970                        count++;
 971                seq_printf(m, "%c%-16s  %6u  %10u  %10u  %s\n",
 972                           domain == irq_default_domain ? '*' : ' ', domain->name,
 973                           domain->revmap_size + count, domain->revmap_size,
 974                           domain->revmap_direct_max_irq,
 975                           name);
 976        }
 977        mutex_unlock(&irq_domain_mutex);
 978
 979        seq_printf(m, "%-5s  %-7s  %-15s  %-*s  %6s  %-14s  %s\n", "irq", "hwirq",
 980                      "chip name", (int)(2 * sizeof(void *) + 2), "chip data",
 981                      "active", "type", "domain");
 982
 983        for (i = 1; i < nr_irqs; i++) {
 984                desc = irq_to_desc(i);
 985                if (!desc)
 986                        continue;
 987
 988                raw_spin_lock_irqsave(&desc->lock, flags);
 989                virq_debug_show_one(m, desc);
 990                raw_spin_unlock_irqrestore(&desc->lock, flags);
 991        }
 992
 993        return 0;
 994}
 995
 996static int virq_debug_open(struct inode *inode, struct file *file)
 997{
 998        return single_open(file, virq_debug_show, inode->i_private);
 999}
1000
1001static const struct file_operations virq_debug_fops = {
1002        .open = virq_debug_open,
1003        .read = seq_read,
1004        .llseek = seq_lseek,
1005        .release = single_release,
1006};
1007
1008static int __init irq_debugfs_init(void)
1009{
1010        if (debugfs_create_file("irq_domain_mapping", S_IRUGO, NULL,
1011                                 NULL, &virq_debug_fops) == NULL)
1012                return -ENOMEM;
1013
1014        return 0;
1015}
1016__initcall(irq_debugfs_init);
1017#endif /* CONFIG_IRQ_DOMAIN_DEBUG */
1018
1019/**
1020 * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings
1021 *
1022 * Device Tree IRQ specifier translation function which works with one cell
1023 * bindings where the cell value maps directly to the hwirq number.
1024 */
1025int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr,
1026                             const u32 *intspec, unsigned int intsize,
1027                             unsigned long *out_hwirq, unsigned int *out_type)
1028{
1029        if (WARN_ON(intsize < 1))
1030                return -EINVAL;
1031        *out_hwirq = intspec[0];
1032        *out_type = IRQ_TYPE_NONE;
1033        return 0;
1034}
1035EXPORT_SYMBOL_GPL(irq_domain_xlate_onecell);
1036
1037/**
1038 * irq_domain_xlate_twocell() - Generic xlate for direct two cell bindings
1039 *
1040 * Device Tree IRQ specifier translation function which works with two cell
1041 * bindings where the cell values map directly to the hwirq number
1042 * and linux irq flags.
1043 */
1044int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr,
1045                        const u32 *intspec, unsigned int intsize,
1046                        irq_hw_number_t *out_hwirq, unsigned int *out_type)
1047{
1048        if (WARN_ON(intsize < 2))
1049                return -EINVAL;
1050        *out_hwirq = intspec[0];
1051        *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
1052        return 0;
1053}
1054EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell);
1055
1056/**
1057 * irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings
1058 *
1059 * Device Tree IRQ specifier translation function which works with either one
1060 * or two cell bindings where the cell values map directly to the hwirq number
1061 * and linux irq flags.
1062 *
1063 * Note: don't use this function unless your interrupt controller explicitly
1064 * supports both one and two cell bindings.  For the majority of controllers
1065 * the _onecell() or _twocell() variants above should be used.
1066 */
1067int irq_domain_xlate_onetwocell(struct irq_domain *d,
1068                                struct device_node *ctrlr,
1069                                const u32 *intspec, unsigned int intsize,
1070                                unsigned long *out_hwirq, unsigned int *out_type)
1071{
1072        if (WARN_ON(intsize < 1))
1073                return -EINVAL;
1074        *out_hwirq = intspec[0];
1075        if (intsize > 1)
1076                *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
1077        else
1078                *out_type = IRQ_TYPE_NONE;
1079        return 0;
1080}
1081EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell);
1082
1083const struct irq_domain_ops irq_domain_simple_ops = {
1084        .xlate = irq_domain_xlate_onetwocell,
1085};
1086EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
1087
1088int irq_domain_alloc_descs(int virq, unsigned int cnt, irq_hw_number_t hwirq,
1089                           int node, const struct cpumask *affinity)
1090{
1091        unsigned int hint;
1092
1093        if (virq >= 0) {
1094                virq = __irq_alloc_descs(virq, virq, cnt, node, THIS_MODULE,
1095                                         affinity);
1096        } else {
1097                hint = hwirq % nr_irqs;
1098                if (hint == 0)
1099                        hint++;
1100                virq = __irq_alloc_descs(-1, hint, cnt, node, THIS_MODULE,
1101                                         affinity);
1102                if (virq <= 0 && hint > 1) {
1103                        virq = __irq_alloc_descs(-1, 1, cnt, node, THIS_MODULE,
1104                                                 affinity);
1105                }
1106        }
1107
1108        return virq;
1109}
1110
1111#ifdef  CONFIG_IRQ_DOMAIN_HIERARCHY
1112/**
1113 * irq_domain_create_hierarchy - Add a irqdomain into the hierarchy
1114 * @parent:     Parent irq domain to associate with the new domain
1115 * @flags:      Irq domain flags associated to the domain
1116 * @size:       Size of the domain. See below
1117 * @fwnode:     Optional fwnode of the interrupt controller
1118 * @ops:        Pointer to the interrupt domain callbacks
1119 * @host_data:  Controller private data pointer
1120 *
1121 * If @size is 0 a tree domain is created, otherwise a linear domain.
1122 *
1123 * If successful the parent is associated to the new domain and the
1124 * domain flags are set.
1125 * Returns pointer to IRQ domain, or NULL on failure.
1126 */
1127struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent,
1128                                            unsigned int flags,
1129                                            unsigned int size,
1130                                            struct fwnode_handle *fwnode,
1131                                            const struct irq_domain_ops *ops,
1132                                            void *host_data)
1133{
1134        struct irq_domain *domain;
1135
1136        if (size)
1137                domain = irq_domain_create_linear(fwnode, size, ops, host_data);
1138        else
1139                domain = irq_domain_create_tree(fwnode, ops, host_data);
1140        if (domain) {
1141                domain->parent = parent;
1142                domain->flags |= flags;
1143        }
1144
1145        return domain;
1146}
1147EXPORT_SYMBOL_GPL(irq_domain_create_hierarchy);
1148
1149static void irq_domain_insert_irq(int virq)
1150{
1151        struct irq_data *data;
1152
1153        for (data = irq_get_irq_data(virq); data; data = data->parent_data) {
1154                struct irq_domain *domain = data->domain;
1155
1156                domain->mapcount++;
1157                irq_domain_set_mapping(domain, data->hwirq, data);
1158
1159                /* If not already assigned, give the domain the chip's name */
1160                if (!domain->name && data->chip)
1161                        domain->name = data->chip->name;
1162        }
1163
1164        irq_clear_status_flags(virq, IRQ_NOREQUEST);
1165}
1166
1167static void irq_domain_remove_irq(int virq)
1168{
1169        struct irq_data *data;
1170
1171        irq_set_status_flags(virq, IRQ_NOREQUEST);
1172        irq_set_chip_and_handler(virq, NULL, NULL);
1173        synchronize_irq(virq);
1174        smp_mb();
1175
1176        for (data = irq_get_irq_data(virq); data; data = data->parent_data) {
1177                struct irq_domain *domain = data->domain;
1178                irq_hw_number_t hwirq = data->hwirq;
1179
1180                domain->mapcount--;
1181                irq_domain_clear_mapping(domain, hwirq);
1182        }
1183}
1184
1185static struct irq_data *irq_domain_insert_irq_data(struct irq_domain *domain,
1186                                                   struct irq_data *child)
1187{
1188        struct irq_data *irq_data;
1189
1190        irq_data = kzalloc_node(sizeof(*irq_data), GFP_KERNEL,
1191                                irq_data_get_node(child));
1192        if (irq_data) {
1193                child->parent_data = irq_data;
1194                irq_data->irq = child->irq;
1195                irq_data->common = child->common;
1196                irq_data->domain = domain;
1197        }
1198
1199        return irq_data;
1200}
1201
1202static void irq_domain_free_irq_data(unsigned int virq, unsigned int nr_irqs)
1203{
1204        struct irq_data *irq_data, *tmp;
1205        int i;
1206
1207        for (i = 0; i < nr_irqs; i++) {
1208                irq_data = irq_get_irq_data(virq + i);
1209                tmp = irq_data->parent_data;
1210                irq_data->parent_data = NULL;
1211                irq_data->domain = NULL;
1212
1213                while (tmp) {
1214                        irq_data = tmp;
1215                        tmp = tmp->parent_data;
1216                        kfree(irq_data);
1217                }
1218        }
1219}
1220
1221static int irq_domain_alloc_irq_data(struct irq_domain *domain,
1222                                     unsigned int virq, unsigned int nr_irqs)
1223{
1224        struct irq_data *irq_data;
1225        struct irq_domain *parent;
1226        int i;
1227
1228        /* The outermost irq_data is embedded in struct irq_desc */
1229        for (i = 0; i < nr_irqs; i++) {
1230                irq_data = irq_get_irq_data(virq + i);
1231                irq_data->domain = domain;
1232
1233                for (parent = domain->parent; parent; parent = parent->parent) {
1234                        irq_data = irq_domain_insert_irq_data(parent, irq_data);
1235                        if (!irq_data) {
1236                                irq_domain_free_irq_data(virq, i + 1);
1237                                return -ENOMEM;
1238                        }
1239                }
1240        }
1241
1242        return 0;
1243}
1244
1245/**
1246 * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain
1247 * @domain:     domain to match
1248 * @virq:       IRQ number to get irq_data
1249 */
1250struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
1251                                         unsigned int virq)
1252{
1253        struct irq_data *irq_data;
1254
1255        for (irq_data = irq_get_irq_data(virq); irq_data;
1256             irq_data = irq_data->parent_data)
1257                if (irq_data->domain == domain)
1258                        return irq_data;
1259
1260        return NULL;
1261}
1262EXPORT_SYMBOL_GPL(irq_domain_get_irq_data);
1263
1264/**
1265 * irq_domain_set_hwirq_and_chip - Set hwirq and irqchip of @virq at @domain
1266 * @domain:     Interrupt domain to match
1267 * @virq:       IRQ number
1268 * @hwirq:      The hwirq number
1269 * @chip:       The associated interrupt chip
1270 * @chip_data:  The associated chip data
1271 */
1272int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq,
1273                                  irq_hw_number_t hwirq, struct irq_chip *chip,
1274                                  void *chip_data)
1275{
1276        struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq);
1277
1278        if (!irq_data)
1279                return -ENOENT;
1280
1281        irq_data->hwirq = hwirq;
1282        irq_data->chip = chip ? chip : &no_irq_chip;
1283        irq_data->chip_data = chip_data;
1284
1285        return 0;
1286}
1287EXPORT_SYMBOL_GPL(irq_domain_set_hwirq_and_chip);
1288
1289/**
1290 * irq_domain_set_info - Set the complete data for a @virq in @domain
1291 * @domain:             Interrupt domain to match
1292 * @virq:               IRQ number
1293 * @hwirq:              The hardware interrupt number
1294 * @chip:               The associated interrupt chip
1295 * @chip_data:          The associated interrupt chip data
1296 * @handler:            The interrupt flow handler
1297 * @handler_data:       The interrupt flow handler data
1298 * @handler_name:       The interrupt handler name
1299 */
1300void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
1301                         irq_hw_number_t hwirq, struct irq_chip *chip,
1302                         void *chip_data, irq_flow_handler_t handler,
1303                         void *handler_data, const char *handler_name)
1304{
1305        irq_domain_set_hwirq_and_chip(domain, virq, hwirq, chip, chip_data);
1306        __irq_set_handler(virq, handler, 0, handler_name);
1307        irq_set_handler_data(virq, handler_data);
1308}
1309EXPORT_SYMBOL(irq_domain_set_info);
1310
1311/**
1312 * irq_domain_reset_irq_data - Clear hwirq, chip and chip_data in @irq_data
1313 * @irq_data:   The pointer to irq_data
1314 */
1315void irq_domain_reset_irq_data(struct irq_data *irq_data)
1316{
1317        irq_data->hwirq = 0;
1318        irq_data->chip = &no_irq_chip;
1319        irq_data->chip_data = NULL;
1320}
1321EXPORT_SYMBOL_GPL(irq_domain_reset_irq_data);
1322
1323/**
1324 * irq_domain_free_irqs_common - Clear irq_data and free the parent
1325 * @domain:     Interrupt domain to match
1326 * @virq:       IRQ number to start with
1327 * @nr_irqs:    The number of irqs to free
1328 */
1329void irq_domain_free_irqs_common(struct irq_domain *domain, unsigned int virq,
1330                                 unsigned int nr_irqs)
1331{
1332        struct irq_data *irq_data;
1333        int i;
1334
1335        for (i = 0; i < nr_irqs; i++) {
1336                irq_data = irq_domain_get_irq_data(domain, virq + i);
1337                if (irq_data)
1338                        irq_domain_reset_irq_data(irq_data);
1339        }
1340        irq_domain_free_irqs_parent(domain, virq, nr_irqs);
1341}
1342EXPORT_SYMBOL_GPL(irq_domain_free_irqs_common);
1343
1344/**
1345 * irq_domain_free_irqs_top - Clear handler and handler data, clear irqdata and free parent
1346 * @domain:     Interrupt domain to match
1347 * @virq:       IRQ number to start with
1348 * @nr_irqs:    The number of irqs to free
1349 */
1350void irq_domain_free_irqs_top(struct irq_domain *domain, unsigned int virq,
1351                              unsigned int nr_irqs)
1352{
1353        int i;
1354
1355        for (i = 0; i < nr_irqs; i++) {
1356                irq_set_handler_data(virq + i, NULL);
1357                irq_set_handler(virq + i, NULL);
1358        }
1359        irq_domain_free_irqs_common(domain, virq, nr_irqs);
1360}
1361
1362static void irq_domain_free_irqs_hierarchy(struct irq_domain *domain,
1363                                           unsigned int irq_base,
1364                                           unsigned int nr_irqs)
1365{
1366        if (domain->ops->free)
1367                domain->ops->free(domain, irq_base, nr_irqs);
1368}
1369
1370int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
1371                                    unsigned int irq_base,
1372                                    unsigned int nr_irqs, void *arg)
1373{
1374        return domain->ops->alloc(domain, irq_base, nr_irqs, arg);
1375}
1376
1377/**
1378 * __irq_domain_alloc_irqs - Allocate IRQs from domain
1379 * @domain:     domain to allocate from
1380 * @irq_base:   allocate specified IRQ nubmer if irq_base >= 0
1381 * @nr_irqs:    number of IRQs to allocate
1382 * @node:       NUMA node id for memory allocation
1383 * @arg:        domain specific argument
1384 * @realloc:    IRQ descriptors have already been allocated if true
1385 * @affinity:   Optional irq affinity mask for multiqueue devices
1386 *
1387 * Allocate IRQ numbers and initialized all data structures to support
1388 * hierarchy IRQ domains.
1389 * Parameter @realloc is mainly to support legacy IRQs.
1390 * Returns error code or allocated IRQ number
1391 *
1392 * The whole process to setup an IRQ has been split into two steps.
1393 * The first step, __irq_domain_alloc_irqs(), is to allocate IRQ
1394 * descriptor and required hardware resources. The second step,
1395 * irq_domain_activate_irq(), is to program hardwares with preallocated
1396 * resources. In this way, it's easier to rollback when failing to
1397 * allocate resources.
1398 */
1399int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
1400                            unsigned int nr_irqs, int node, void *arg,
1401                            bool realloc, const struct cpumask *affinity)
1402{
1403        int i, ret, virq;
1404
1405        if (domain == NULL) {
1406                domain = irq_default_domain;
1407                if (WARN(!domain, "domain is NULL; cannot allocate IRQ\n"))
1408                        return -EINVAL;
1409        }
1410
1411        if (!domain->ops->alloc) {
1412                pr_debug("domain->ops->alloc() is NULL\n");
1413                return -ENOSYS;
1414        }
1415
1416        if (realloc && irq_base >= 0) {
1417                virq = irq_base;
1418        } else {
1419                virq = irq_domain_alloc_descs(irq_base, nr_irqs, 0, node,
1420                                              affinity);
1421                if (virq < 0) {
1422                        pr_debug("cannot allocate IRQ(base %d, count %d)\n",
1423                                 irq_base, nr_irqs);
1424                        return virq;
1425                }
1426        }
1427
1428        if (irq_domain_alloc_irq_data(domain, virq, nr_irqs)) {
1429                pr_debug("cannot allocate memory for IRQ%d\n", virq);
1430                ret = -ENOMEM;
1431                goto out_free_desc;
1432        }
1433
1434        mutex_lock(&irq_domain_mutex);
1435        ret = irq_domain_alloc_irqs_hierarchy(domain, virq, nr_irqs, arg);
1436        if (ret < 0) {
1437                mutex_unlock(&irq_domain_mutex);
1438                goto out_free_irq_data;
1439        }
1440        for (i = 0; i < nr_irqs; i++)
1441                irq_domain_insert_irq(virq + i);
1442        mutex_unlock(&irq_domain_mutex);
1443
1444        return virq;
1445
1446out_free_irq_data:
1447        irq_domain_free_irq_data(virq, nr_irqs);
1448out_free_desc:
1449        irq_free_descs(virq, nr_irqs);
1450        return ret;
1451}
1452
1453/* The irq_data was moved, fix the revmap to refer to the new location */
1454static void irq_domain_fix_revmap(struct irq_data *d)
1455{
1456        void __rcu **slot;
1457
1458        if (d->hwirq < d->domain->revmap_size)
1459                return; /* Not using radix tree. */
1460
1461        /* Fix up the revmap. */
1462        mutex_lock(&revmap_trees_mutex);
1463        slot = radix_tree_lookup_slot(&d->domain->revmap_tree, d->hwirq);
1464        if (slot)
1465                radix_tree_replace_slot(&d->domain->revmap_tree, slot, d);
1466        mutex_unlock(&revmap_trees_mutex);
1467}
1468
1469/**
1470 * irq_domain_push_irq() - Push a domain in to the top of a hierarchy.
1471 * @domain:     Domain to push.
1472 * @virq:       Irq to push the domain in to.
1473 * @arg:        Passed to the irq_domain_ops alloc() function.
1474 *
1475 * For an already existing irqdomain hierarchy, as might be obtained
1476 * via a call to pci_enable_msix(), add an additional domain to the
1477 * head of the processing chain.  Must be called before request_irq()
1478 * has been called.
1479 */
1480int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg)
1481{
1482        struct irq_data *child_irq_data;
1483        struct irq_data *root_irq_data = irq_get_irq_data(virq);
1484        struct irq_desc *desc;
1485        int rv = 0;
1486
1487        /*
1488         * Check that no action has been set, which indicates the virq
1489         * is in a state where this function doesn't have to deal with
1490         * races between interrupt handling and maintaining the
1491         * hierarchy.  This will catch gross misuse.  Attempting to
1492         * make the check race free would require holding locks across
1493         * calls to struct irq_domain_ops->alloc(), which could lead
1494         * to deadlock, so we just do a simple check before starting.
1495         */
1496        desc = irq_to_desc(virq);
1497        if (!desc)
1498                return -EINVAL;
1499        if (WARN_ON(desc->action))
1500                return -EBUSY;
1501
1502        if (domain == NULL)
1503                return -EINVAL;
1504
1505        if (WARN_ON(!irq_domain_is_hierarchy(domain)))
1506                return -EINVAL;
1507
1508        if (!root_irq_data)
1509                return -EINVAL;
1510
1511        if (domain->parent != root_irq_data->domain)
1512                return -EINVAL;
1513
1514        child_irq_data = kzalloc_node(sizeof(*child_irq_data), GFP_KERNEL,
1515                                      irq_data_get_node(root_irq_data));
1516        if (!child_irq_data)
1517                return -ENOMEM;
1518
1519        mutex_lock(&irq_domain_mutex);
1520
1521        /* Copy the original irq_data. */
1522        *child_irq_data = *root_irq_data;
1523
1524        /*
1525         * Overwrite the root_irq_data, which is embedded in struct
1526         * irq_desc, with values for this domain.
1527         */
1528        root_irq_data->parent_data = child_irq_data;
1529        root_irq_data->domain = domain;
1530        root_irq_data->mask = 0;
1531        root_irq_data->hwirq = 0;
1532        root_irq_data->chip = NULL;
1533        root_irq_data->chip_data = NULL;
1534
1535        /* May (probably does) set hwirq, chip, etc. */
1536        rv = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg);
1537        if (rv) {
1538                /* Restore the original irq_data. */
1539                *root_irq_data = *child_irq_data;
1540                goto error;
1541        }
1542
1543        irq_domain_fix_revmap(child_irq_data);
1544        irq_domain_set_mapping(domain, root_irq_data->hwirq, root_irq_data);
1545
1546error:
1547        mutex_unlock(&irq_domain_mutex);
1548
1549        return rv;
1550}
1551EXPORT_SYMBOL_GPL(irq_domain_push_irq);
1552
1553/**
1554 * irq_domain_pop_irq() - Remove a domain from the top of a hierarchy.
1555 * @domain:     Domain to remove.
1556 * @virq:       Irq to remove the domain from.
1557 *
1558 * Undo the effects of a call to irq_domain_push_irq().  Must be
1559 * called either before request_irq() or after free_irq().
1560 */
1561int irq_domain_pop_irq(struct irq_domain *domain, int virq)
1562{
1563        struct irq_data *root_irq_data = irq_get_irq_data(virq);
1564        struct irq_data *child_irq_data;
1565        struct irq_data *tmp_irq_data;
1566        struct irq_desc *desc;
1567
1568        /*
1569         * Check that no action is set, which indicates the virq is in
1570         * a state where this function doesn't have to deal with races
1571         * between interrupt handling and maintaining the hierarchy.
1572         * This will catch gross misuse.  Attempting to make the check
1573         * race free would require holding locks across calls to
1574         * struct irq_domain_ops->free(), which could lead to
1575         * deadlock, so we just do a simple check before starting.
1576         */
1577        desc = irq_to_desc(virq);
1578        if (!desc)
1579                return -EINVAL;
1580        if (WARN_ON(desc->action))
1581                return -EBUSY;
1582
1583        if (domain == NULL)
1584                return -EINVAL;
1585
1586        if (!root_irq_data)
1587                return -EINVAL;
1588
1589        tmp_irq_data = irq_domain_get_irq_data(domain, virq);
1590
1591        /* We can only "pop" if this domain is at the top of the list */
1592        if (WARN_ON(root_irq_data != tmp_irq_data))
1593                return -EINVAL;
1594
1595        if (WARN_ON(root_irq_data->domain != domain))
1596                return -EINVAL;
1597
1598        child_irq_data = root_irq_data->parent_data;
1599        if (WARN_ON(!child_irq_data))
1600                return -EINVAL;
1601
1602        mutex_lock(&irq_domain_mutex);
1603
1604        root_irq_data->parent_data = NULL;
1605
1606        irq_domain_clear_mapping(domain, root_irq_data->hwirq);
1607        irq_domain_free_irqs_hierarchy(domain, virq, 1);
1608
1609        /* Restore the original irq_data. */
1610        *root_irq_data = *child_irq_data;
1611
1612        irq_domain_fix_revmap(root_irq_data);
1613
1614        mutex_unlock(&irq_domain_mutex);
1615
1616        kfree(child_irq_data);
1617
1618        return 0;
1619}
1620EXPORT_SYMBOL_GPL(irq_domain_pop_irq);
1621
1622/**
1623 * irq_domain_free_irqs - Free IRQ number and associated data structures
1624 * @virq:       base IRQ number
1625 * @nr_irqs:    number of IRQs to free
1626 */
1627void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs)
1628{
1629        struct irq_data *data = irq_get_irq_data(virq);
1630        int i;
1631
1632        if (WARN(!data || !data->domain || !data->domain->ops->free,
1633                 "NULL pointer, cannot free irq\n"))
1634                return;
1635
1636        mutex_lock(&irq_domain_mutex);
1637        for (i = 0; i < nr_irqs; i++)
1638                irq_domain_remove_irq(virq + i);
1639        irq_domain_free_irqs_hierarchy(data->domain, virq, nr_irqs);
1640        mutex_unlock(&irq_domain_mutex);
1641
1642        irq_domain_free_irq_data(virq, nr_irqs);
1643        irq_free_descs(virq, nr_irqs);
1644}
1645
1646/**
1647 * irq_domain_alloc_irqs_parent - Allocate interrupts from parent domain
1648 * @irq_base:   Base IRQ number
1649 * @nr_irqs:    Number of IRQs to allocate
1650 * @arg:        Allocation data (arch/domain specific)
1651 *
1652 * Check whether the domain has been setup recursive. If not allocate
1653 * through the parent domain.
1654 */
1655int irq_domain_alloc_irqs_parent(struct irq_domain *domain,
1656                                 unsigned int irq_base, unsigned int nr_irqs,
1657                                 void *arg)
1658{
1659        if (!domain->parent)
1660                return -ENOSYS;
1661
1662        return irq_domain_alloc_irqs_hierarchy(domain->parent, irq_base,
1663                                               nr_irqs, arg);
1664}
1665EXPORT_SYMBOL_GPL(irq_domain_alloc_irqs_parent);
1666
1667/**
1668 * irq_domain_free_irqs_parent - Free interrupts from parent domain
1669 * @irq_base:   Base IRQ number
1670 * @nr_irqs:    Number of IRQs to free
1671 *
1672 * Check whether the domain has been setup recursive. If not free
1673 * through the parent domain.
1674 */
1675void irq_domain_free_irqs_parent(struct irq_domain *domain,
1676                                 unsigned int irq_base, unsigned int nr_irqs)
1677{
1678        if (!domain->parent)
1679                return;
1680
1681        irq_domain_free_irqs_hierarchy(domain->parent, irq_base, nr_irqs);
1682}
1683EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
1684
1685static void __irq_domain_activate_irq(struct irq_data *irq_data)
1686{
1687        if (irq_data && irq_data->domain) {
1688                struct irq_domain *domain = irq_data->domain;
1689
1690                if (irq_data->parent_data)
1691                        __irq_domain_activate_irq(irq_data->parent_data);
1692                if (domain->ops->activate)
1693                        domain->ops->activate(domain, irq_data);
1694        }
1695}
1696
1697static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
1698{
1699        if (irq_data && irq_data->domain) {
1700                struct irq_domain *domain = irq_data->domain;
1701
1702                if (domain->ops->deactivate)
1703                        domain->ops->deactivate(domain, irq_data);
1704                if (irq_data->parent_data)
1705                        __irq_domain_deactivate_irq(irq_data->parent_data);
1706        }
1707}
1708
1709/**
1710 * irq_domain_activate_irq - Call domain_ops->activate recursively to activate
1711 *                           interrupt
1712 * @irq_data:   outermost irq_data associated with interrupt
1713 *
1714 * This is the second step to call domain_ops->activate to program interrupt
1715 * controllers, so the interrupt could actually get delivered.
1716 */
1717void irq_domain_activate_irq(struct irq_data *irq_data)
1718{
1719        if (!irqd_is_activated(irq_data)) {
1720                __irq_domain_activate_irq(irq_data);
1721                irqd_set_activated(irq_data);
1722        }
1723}
1724
1725/**
1726 * irq_domain_deactivate_irq - Call domain_ops->deactivate recursively to
1727 *                             deactivate interrupt
1728 * @irq_data: outermost irq_data associated with interrupt
1729 *
1730 * It calls domain_ops->deactivate to program interrupt controllers to disable
1731 * interrupt delivery.
1732 */
1733void irq_domain_deactivate_irq(struct irq_data *irq_data)
1734{
1735        if (irqd_is_activated(irq_data)) {
1736                __irq_domain_deactivate_irq(irq_data);
1737                irqd_clr_activated(irq_data);
1738        }
1739}
1740
1741static void irq_domain_check_hierarchy(struct irq_domain *domain)
1742{
1743        /* Hierarchy irq_domains must implement callback alloc() */
1744        if (domain->ops->alloc)
1745                domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY;
1746}
1747
1748/**
1749 * irq_domain_hierarchical_is_msi_remap - Check if the domain or any
1750 * parent has MSI remapping support
1751 * @domain: domain pointer
1752 */
1753bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain)
1754{
1755        for (; domain; domain = domain->parent) {
1756                if (irq_domain_is_msi_remap(domain))
1757                        return true;
1758        }
1759        return false;
1760}
1761#else   /* CONFIG_IRQ_DOMAIN_HIERARCHY */
1762/**
1763 * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain
1764 * @domain:     domain to match
1765 * @virq:       IRQ number to get irq_data
1766 */
1767struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
1768                                         unsigned int virq)
1769{
1770        struct irq_data *irq_data = irq_get_irq_data(virq);
1771
1772        return (irq_data && irq_data->domain == domain) ? irq_data : NULL;
1773}
1774EXPORT_SYMBOL_GPL(irq_domain_get_irq_data);
1775
1776/**
1777 * irq_domain_set_info - Set the complete data for a @virq in @domain
1778 * @domain:             Interrupt domain to match
1779 * @virq:               IRQ number
1780 * @hwirq:              The hardware interrupt number
1781 * @chip:               The associated interrupt chip
1782 * @chip_data:          The associated interrupt chip data
1783 * @handler:            The interrupt flow handler
1784 * @handler_data:       The interrupt flow handler data
1785 * @handler_name:       The interrupt handler name
1786 */
1787void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
1788                         irq_hw_number_t hwirq, struct irq_chip *chip,
1789                         void *chip_data, irq_flow_handler_t handler,
1790                         void *handler_data, const char *handler_name)
1791{
1792        irq_set_chip_and_handler_name(virq, chip, handler, handler_name);
1793        irq_set_chip_data(virq, chip_data);
1794        irq_set_handler_data(virq, handler_data);
1795}
1796
1797static void irq_domain_check_hierarchy(struct irq_domain *domain)
1798{
1799}
1800#endif  /* CONFIG_IRQ_DOMAIN_HIERARCHY */
1801
1802#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
1803static struct dentry *domain_dir;
1804
1805static void
1806irq_domain_debug_show_one(struct seq_file *m, struct irq_domain *d, int ind)
1807{
1808        seq_printf(m, "%*sname:   %s\n", ind, "", d->name);
1809        seq_printf(m, "%*ssize:   %u\n", ind + 1, "",
1810                   d->revmap_size + d->revmap_direct_max_irq);
1811        seq_printf(m, "%*smapped: %u\n", ind + 1, "", d->mapcount);
1812        seq_printf(m, "%*sflags:  0x%08x\n", ind +1 , "", d->flags);
1813#ifdef  CONFIG_IRQ_DOMAIN_HIERARCHY
1814        if (!d->parent)
1815                return;
1816        seq_printf(m, "%*sparent: %s\n", ind + 1, "", d->parent->name);
1817        irq_domain_debug_show_one(m, d->parent, ind + 4);
1818#endif
1819}
1820
1821static int irq_domain_debug_show(struct seq_file *m, void *p)
1822{
1823        struct irq_domain *d = m->private;
1824
1825        /* Default domain? Might be NULL */
1826        if (!d) {
1827                if (!irq_default_domain)
1828                        return 0;
1829                d = irq_default_domain;
1830        }
1831        irq_domain_debug_show_one(m, d, 0);
1832        return 0;
1833}
1834
1835static int irq_domain_debug_open(struct inode *inode, struct file *file)
1836{
1837        return single_open(file, irq_domain_debug_show, inode->i_private);
1838}
1839
1840static const struct file_operations dfs_domain_ops = {
1841        .open           = irq_domain_debug_open,
1842        .read           = seq_read,
1843        .llseek         = seq_lseek,
1844        .release        = single_release,
1845};
1846
1847static void debugfs_add_domain_dir(struct irq_domain *d)
1848{
1849        if (!d->name || !domain_dir || d->debugfs_file)
1850                return;
1851        d->debugfs_file = debugfs_create_file(d->name, 0444, domain_dir, d,
1852                                              &dfs_domain_ops);
1853}
1854
1855static void debugfs_remove_domain_dir(struct irq_domain *d)
1856{
1857        debugfs_remove(d->debugfs_file);
1858}
1859
1860void __init irq_domain_debugfs_init(struct dentry *root)
1861{
1862        struct irq_domain *d;
1863
1864        domain_dir = debugfs_create_dir("domains", root);
1865        if (!domain_dir)
1866                return;
1867
1868        debugfs_create_file("default", 0444, domain_dir, NULL, &dfs_domain_ops);
1869        mutex_lock(&irq_domain_mutex);
1870        list_for_each_entry(d, &irq_domain_list, link)
1871                debugfs_add_domain_dir(d);
1872        mutex_unlock(&irq_domain_mutex);
1873}
1874#endif
1875