linux/kernel/irq/irqdomain.c
<<
>>
Prefs
   1#define pr_fmt(fmt)  "irq: " fmt
   2
   3#include <linux/debugfs.h>
   4#include <linux/hardirq.h>
   5#include <linux/interrupt.h>
   6#include <linux/irq.h>
   7#include <linux/irqdesc.h>
   8#include <linux/irqdomain.h>
   9#include <linux/module.h>
  10#include <linux/mutex.h>
  11#include <linux/of.h>
  12#include <linux/of_address.h>
  13#include <linux/topology.h>
  14#include <linux/seq_file.h>
  15#include <linux/slab.h>
  16#include <linux/smp.h>
  17#include <linux/fs.h>
  18
  19#define IRQ_DOMAIN_MAP_LEGACY 0 /* driver allocated fixed range of irqs.
  20                                 * ie. legacy 8259, gets irqs 1..15 */
  21#define IRQ_DOMAIN_MAP_NOMAP 1 /* no fast reverse mapping */
  22#define IRQ_DOMAIN_MAP_LINEAR 2 /* linear map of interrupts */
  23#define IRQ_DOMAIN_MAP_TREE 3 /* radix tree */
  24
  25static LIST_HEAD(irq_domain_list);
  26static DEFINE_MUTEX(irq_domain_mutex);
  27
  28static DEFINE_MUTEX(revmap_trees_mutex);
  29static struct irq_domain *irq_default_domain;
  30
  31/**
  32 * irq_domain_alloc() - Allocate a new irq_domain data structure
  33 * @of_node: optional device-tree node of the interrupt controller
  34 * @revmap_type: type of reverse mapping to use
  35 * @ops: map/unmap domain callbacks
  36 * @host_data: Controller private data pointer
  37 *
  38 * Allocates and initialize and irq_domain structure.  Caller is expected to
  39 * register allocated irq_domain with irq_domain_register().  Returns pointer
  40 * to IRQ domain, or NULL on failure.
  41 */
  42static struct irq_domain *irq_domain_alloc(struct device_node *of_node,
  43                                           unsigned int revmap_type,
  44                                           const struct irq_domain_ops *ops,
  45                                           void *host_data)
  46{
  47        struct irq_domain *domain;
  48
  49        domain = kzalloc_node(sizeof(*domain), GFP_KERNEL,
  50                              of_node_to_nid(of_node));
  51        if (WARN_ON(!domain))
  52                return NULL;
  53
  54        /* Fill structure */
  55        domain->revmap_type = revmap_type;
  56        domain->ops = ops;
  57        domain->host_data = host_data;
  58        domain->of_node = of_node_get(of_node);
  59
  60        return domain;
  61}
  62
  63static void irq_domain_free(struct irq_domain *domain)
  64{
  65        of_node_put(domain->of_node);
  66        kfree(domain);
  67}
  68
  69static void irq_domain_add(struct irq_domain *domain)
  70{
  71        mutex_lock(&irq_domain_mutex);
  72        list_add(&domain->link, &irq_domain_list);
  73        mutex_unlock(&irq_domain_mutex);
  74        pr_debug("Allocated domain of type %d @0x%p\n",
  75                 domain->revmap_type, domain);
  76}
  77
  78/**
  79 * irq_domain_remove() - Remove an irq domain.
  80 * @domain: domain to remove
  81 *
  82 * This routine is used to remove an irq domain. The caller must ensure
  83 * that all mappings within the domain have been disposed of prior to
  84 * use, depending on the revmap type.
  85 */
  86void irq_domain_remove(struct irq_domain *domain)
  87{
  88        mutex_lock(&irq_domain_mutex);
  89
  90        switch (domain->revmap_type) {
  91        case IRQ_DOMAIN_MAP_LEGACY:
  92                /*
  93                 * Legacy domains don't manage their own irq_desc
  94                 * allocations, we expect the caller to handle irq_desc
  95                 * freeing on their own.
  96                 */
  97                break;
  98        case IRQ_DOMAIN_MAP_TREE:
  99                WARN_ON(!radix_tree_empty(&domain->revmap_data.tree));
 100                break;
 101        case IRQ_DOMAIN_MAP_LINEAR:
 102                kfree(domain->revmap_data.linear.revmap);
 103                domain->revmap_data.linear.size = 0;
 104                break;
 105        case IRQ_DOMAIN_MAP_NOMAP:
 106                break;
 107        }
 108
 109        list_del(&domain->link);
 110
 111        /*
 112         * If the going away domain is the default one, reset it.
 113         */
 114        if (unlikely(irq_default_domain == domain))
 115                irq_set_default_host(NULL);
 116
 117        mutex_unlock(&irq_domain_mutex);
 118
 119        pr_debug("Removed domain of type %d @0x%p\n",
 120                 domain->revmap_type, domain);
 121
 122        irq_domain_free(domain);
 123}
 124EXPORT_SYMBOL_GPL(irq_domain_remove);
 125
 126static unsigned int irq_domain_legacy_revmap(struct irq_domain *domain,
 127                                             irq_hw_number_t hwirq)
 128{
 129        irq_hw_number_t first_hwirq = domain->revmap_data.legacy.first_hwirq;
 130        int size = domain->revmap_data.legacy.size;
 131
 132        if (WARN_ON(hwirq < first_hwirq || hwirq >= first_hwirq + size))
 133                return 0;
 134        return hwirq - first_hwirq + domain->revmap_data.legacy.first_irq;
 135}
 136
 137/**
 138 * irq_domain_add_simple() - Allocate and register a simple irq_domain.
 139 * @of_node: pointer to interrupt controller's device tree node.
 140 * @size: total number of irqs in mapping
 141 * @first_irq: first number of irq block assigned to the domain,
 142 *      pass zero to assign irqs on-the-fly. This will result in a
 143 *      linear IRQ domain so it is important to use irq_create_mapping()
 144 *      for each used IRQ, especially when SPARSE_IRQ is enabled.
 145 * @ops: map/unmap domain callbacks
 146 * @host_data: Controller private data pointer
 147 *
 148 * Allocates a legacy irq_domain if irq_base is positive or a linear
 149 * domain otherwise. For the legacy domain, IRQ descriptors will also
 150 * be allocated.
 151 *
 152 * This is intended to implement the expected behaviour for most
 153 * interrupt controllers which is that a linear mapping should
 154 * normally be used unless the system requires a legacy mapping in
 155 * order to support supplying interrupt numbers during non-DT
 156 * registration of devices.
 157 */
 158struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
 159                                         unsigned int size,
 160                                         unsigned int first_irq,
 161                                         const struct irq_domain_ops *ops,
 162                                         void *host_data)
 163{
 164        if (first_irq > 0) {
 165                int irq_base;
 166
 167                if (IS_ENABLED(CONFIG_SPARSE_IRQ)) {
 168                        /*
 169                         * Set the descriptor allocator to search for a
 170                         * 1-to-1 mapping, such as irq_alloc_desc_at().
 171                         * Use of_node_to_nid() which is defined to
 172                         * numa_node_id() on platforms that have no custom
 173                         * implementation.
 174                         */
 175                        irq_base = irq_alloc_descs(first_irq, first_irq, size,
 176                                                   of_node_to_nid(of_node));
 177                        if (irq_base < 0) {
 178                                pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
 179                                        first_irq);
 180                                irq_base = first_irq;
 181                        }
 182                } else
 183                        irq_base = first_irq;
 184
 185                return irq_domain_add_legacy(of_node, size, irq_base, 0,
 186                                             ops, host_data);
 187        }
 188
 189        /* A linear domain is the default */
 190        return irq_domain_add_linear(of_node, size, ops, host_data);
 191}
 192EXPORT_SYMBOL_GPL(irq_domain_add_simple);
 193
 194/**
 195 * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain.
 196 * @of_node: pointer to interrupt controller's device tree node.
 197 * @size: total number of irqs in legacy mapping
 198 * @first_irq: first number of irq block assigned to the domain
 199 * @first_hwirq: first hwirq number to use for the translation. Should normally
 200 *               be '0', but a positive integer can be used if the effective
 201 *               hwirqs numbering does not begin at zero.
 202 * @ops: map/unmap domain callbacks
 203 * @host_data: Controller private data pointer
 204 *
 205 * Note: the map() callback will be called before this function returns
 206 * for all legacy interrupts except 0 (which is always the invalid irq for
 207 * a legacy controller).
 208 */
 209struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
 210                                         unsigned int size,
 211                                         unsigned int first_irq,
 212                                         irq_hw_number_t first_hwirq,
 213                                         const struct irq_domain_ops *ops,
 214                                         void *host_data)
 215{
 216        struct irq_domain *domain;
 217        unsigned int i;
 218
 219        domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LEGACY, ops, host_data);
 220        if (!domain)
 221                return NULL;
 222
 223        domain->revmap_data.legacy.first_irq = first_irq;
 224        domain->revmap_data.legacy.first_hwirq = first_hwirq;
 225        domain->revmap_data.legacy.size = size;
 226
 227        mutex_lock(&irq_domain_mutex);
 228        /* Verify that all the irqs are available */
 229        for (i = 0; i < size; i++) {
 230                int irq = first_irq + i;
 231                struct irq_data *irq_data = irq_get_irq_data(irq);
 232
 233                if (WARN_ON(!irq_data || irq_data->domain)) {
 234                        mutex_unlock(&irq_domain_mutex);
 235                        irq_domain_free(domain);
 236                        return NULL;
 237                }
 238        }
 239
 240        /* Claim all of the irqs before registering a legacy domain */
 241        for (i = 0; i < size; i++) {
 242                struct irq_data *irq_data = irq_get_irq_data(first_irq + i);
 243                irq_data->hwirq = first_hwirq + i;
 244                irq_data->domain = domain;
 245        }
 246        mutex_unlock(&irq_domain_mutex);
 247
 248        for (i = 0; i < size; i++) {
 249                int irq = first_irq + i;
 250                int hwirq = first_hwirq + i;
 251
 252                /* IRQ0 gets ignored */
 253                if (!irq)
 254                        continue;
 255
 256                /* Legacy flags are left to default at this point,
 257                 * one can then use irq_create_mapping() to
 258                 * explicitly change them
 259                 */
 260                if (ops->map)
 261                        ops->map(domain, irq, hwirq);
 262
 263                /* Clear norequest flags */
 264                irq_clear_status_flags(irq, IRQ_NOREQUEST);
 265        }
 266
 267        irq_domain_add(domain);
 268        return domain;
 269}
 270EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
 271
 272/**
 273 * irq_domain_add_linear() - Allocate and register a linear revmap irq_domain.
 274 * @of_node: pointer to interrupt controller's device tree node.
 275 * @size: Number of interrupts in the domain.
 276 * @ops: map/unmap domain callbacks
 277 * @host_data: Controller private data pointer
 278 */
 279struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
 280                                         unsigned int size,
 281                                         const struct irq_domain_ops *ops,
 282                                         void *host_data)
 283{
 284        struct irq_domain *domain;
 285        unsigned int *revmap;
 286
 287        revmap = kzalloc_node(sizeof(*revmap) * size, GFP_KERNEL,
 288                              of_node_to_nid(of_node));
 289        if (WARN_ON(!revmap))
 290                return NULL;
 291
 292        domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LINEAR, ops, host_data);
 293        if (!domain) {
 294                kfree(revmap);
 295                return NULL;
 296        }
 297        domain->revmap_data.linear.size = size;
 298        domain->revmap_data.linear.revmap = revmap;
 299        irq_domain_add(domain);
 300        return domain;
 301}
 302EXPORT_SYMBOL_GPL(irq_domain_add_linear);
 303
 304struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
 305                                         unsigned int max_irq,
 306                                         const struct irq_domain_ops *ops,
 307                                         void *host_data)
 308{
 309        struct irq_domain *domain = irq_domain_alloc(of_node,
 310                                        IRQ_DOMAIN_MAP_NOMAP, ops, host_data);
 311        if (domain) {
 312                domain->revmap_data.nomap.max_irq = max_irq ? max_irq : ~0;
 313                irq_domain_add(domain);
 314        }
 315        return domain;
 316}
 317EXPORT_SYMBOL_GPL(irq_domain_add_nomap);
 318
 319/**
 320 * irq_domain_add_tree()
 321 * @of_node: pointer to interrupt controller's device tree node.
 322 * @ops: map/unmap domain callbacks
 323 *
 324 * Note: The radix tree will be allocated later during boot automatically
 325 * (the reverse mapping will use the slow path until that happens).
 326 */
 327struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
 328                                         const struct irq_domain_ops *ops,
 329                                         void *host_data)
 330{
 331        struct irq_domain *domain = irq_domain_alloc(of_node,
 332                                        IRQ_DOMAIN_MAP_TREE, ops, host_data);
 333        if (domain) {
 334                INIT_RADIX_TREE(&domain->revmap_data.tree, GFP_KERNEL);
 335                irq_domain_add(domain);
 336        }
 337        return domain;
 338}
 339EXPORT_SYMBOL_GPL(irq_domain_add_tree);
 340
 341/**
 342 * irq_find_host() - Locates a domain for a given device node
 343 * @node: device-tree node of the interrupt controller
 344 */
 345struct irq_domain *irq_find_host(struct device_node *node)
 346{
 347        struct irq_domain *h, *found = NULL;
 348        int rc;
 349
 350        /* We might want to match the legacy controller last since
 351         * it might potentially be set to match all interrupts in
 352         * the absence of a device node. This isn't a problem so far
 353         * yet though...
 354         */
 355        mutex_lock(&irq_domain_mutex);
 356        list_for_each_entry(h, &irq_domain_list, link) {
 357                if (h->ops->match)
 358                        rc = h->ops->match(h, node);
 359                else
 360                        rc = (h->of_node != NULL) && (h->of_node == node);
 361
 362                if (rc) {
 363                        found = h;
 364                        break;
 365                }
 366        }
 367        mutex_unlock(&irq_domain_mutex);
 368        return found;
 369}
 370EXPORT_SYMBOL_GPL(irq_find_host);
 371
 372/**
 373 * irq_set_default_host() - Set a "default" irq domain
 374 * @domain: default domain pointer
 375 *
 376 * For convenience, it's possible to set a "default" domain that will be used
 377 * whenever NULL is passed to irq_create_mapping(). It makes life easier for
 378 * platforms that want to manipulate a few hard coded interrupt numbers that
 379 * aren't properly represented in the device-tree.
 380 */
 381void irq_set_default_host(struct irq_domain *domain)
 382{
 383        pr_debug("Default domain set to @0x%p\n", domain);
 384
 385        irq_default_domain = domain;
 386}
 387EXPORT_SYMBOL_GPL(irq_set_default_host);
 388
 389static void irq_domain_disassociate_many(struct irq_domain *domain,
 390                                         unsigned int irq_base, int count)
 391{
 392        /*
 393         * disassociate in reverse order;
 394         * not strictly necessary, but nice for unwinding
 395         */
 396        while (count--) {
 397                int irq = irq_base + count;
 398                struct irq_data *irq_data = irq_get_irq_data(irq);
 399                irq_hw_number_t hwirq;
 400
 401                if (WARN_ON(!irq_data || irq_data->domain != domain))
 402                        continue;
 403
 404                hwirq = irq_data->hwirq;
 405                irq_set_status_flags(irq, IRQ_NOREQUEST);
 406
 407                /* remove chip and handler */
 408                irq_set_chip_and_handler(irq, NULL, NULL);
 409
 410                /* Make sure it's completed */
 411                synchronize_irq(irq);
 412
 413                /* Tell the PIC about it */
 414                if (domain->ops->unmap)
 415                        domain->ops->unmap(domain, irq);
 416                smp_mb();
 417
 418                irq_data->domain = NULL;
 419                irq_data->hwirq = 0;
 420
 421                /* Clear reverse map */
 422                switch(domain->revmap_type) {
 423                case IRQ_DOMAIN_MAP_LINEAR:
 424                        if (hwirq < domain->revmap_data.linear.size)
 425                                domain->revmap_data.linear.revmap[hwirq] = 0;
 426                        break;
 427                case IRQ_DOMAIN_MAP_TREE:
 428                        mutex_lock(&revmap_trees_mutex);
 429                        radix_tree_delete(&domain->revmap_data.tree, hwirq);
 430                        mutex_unlock(&revmap_trees_mutex);
 431                        break;
 432                }
 433        }
 434}
 435
 436int irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
 437                              irq_hw_number_t hwirq_base, int count)
 438{
 439        unsigned int virq = irq_base;
 440        irq_hw_number_t hwirq = hwirq_base;
 441        int i, ret;
 442
 443        pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__,
 444                of_node_full_name(domain->of_node), irq_base, (int)hwirq_base, count);
 445
 446        for (i = 0; i < count; i++) {
 447                struct irq_data *irq_data = irq_get_irq_data(virq + i);
 448
 449                if (WARN(!irq_data, "error: irq_desc not allocated; "
 450                         "irq=%i hwirq=0x%x\n", virq + i, (int)hwirq + i))
 451                        return -EINVAL;
 452                if (WARN(irq_data->domain, "error: irq_desc already associated; "
 453                         "irq=%i hwirq=0x%x\n", virq + i, (int)hwirq + i))
 454                        return -EINVAL;
 455        };
 456
 457        for (i = 0; i < count; i++, virq++, hwirq++) {
 458                struct irq_data *irq_data = irq_get_irq_data(virq);
 459
 460                irq_data->hwirq = hwirq;
 461                irq_data->domain = domain;
 462                if (domain->ops->map) {
 463                        ret = domain->ops->map(domain, virq, hwirq);
 464                        if (ret != 0) {
 465                                /*
 466                                 * If map() returns -EPERM, this interrupt is protected
 467                                 * by the firmware or some other service and shall not
 468                                 * be mapped. Don't bother telling the user about it.
 469                                 */
 470                                if (ret != -EPERM) {
 471                                        pr_info("%s didn't like hwirq-0x%lx to VIRQ%i mapping (rc=%d)\n",
 472                                               of_node_full_name(domain->of_node), hwirq, virq, ret);
 473                                }
 474                                irq_data->domain = NULL;
 475                                irq_data->hwirq = 0;
 476                                continue;
 477                        }
 478                }
 479
 480                switch (domain->revmap_type) {
 481                case IRQ_DOMAIN_MAP_LINEAR:
 482                        if (hwirq < domain->revmap_data.linear.size)
 483                                domain->revmap_data.linear.revmap[hwirq] = virq;
 484                        break;
 485                case IRQ_DOMAIN_MAP_TREE:
 486                        mutex_lock(&revmap_trees_mutex);
 487                        radix_tree_insert(&domain->revmap_data.tree, hwirq, irq_data);
 488                        mutex_unlock(&revmap_trees_mutex);
 489                        break;
 490                }
 491
 492                irq_clear_status_flags(virq, IRQ_NOREQUEST);
 493        }
 494
 495        return 0;
 496
 497        irq_domain_disassociate_many(domain, irq_base, i);
 498        return -EINVAL;
 499}
 500EXPORT_SYMBOL_GPL(irq_domain_associate_many);
 501
 502/**
 503 * irq_create_direct_mapping() - Allocate an irq for direct mapping
 504 * @domain: domain to allocate the irq for or NULL for default domain
 505 *
 506 * This routine is used for irq controllers which can choose the hardware
 507 * interrupt numbers they generate. In such a case it's simplest to use
 508 * the linux irq as the hardware interrupt number.
 509 */
 510unsigned int irq_create_direct_mapping(struct irq_domain *domain)
 511{
 512        unsigned int virq;
 513
 514        if (domain == NULL)
 515                domain = irq_default_domain;
 516
 517        if (WARN_ON(!domain || domain->revmap_type != IRQ_DOMAIN_MAP_NOMAP))
 518                return 0;
 519
 520        virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node));
 521        if (!virq) {
 522                pr_debug("create_direct virq allocation failed\n");
 523                return 0;
 524        }
 525        if (virq >= domain->revmap_data.nomap.max_irq) {
 526                pr_err("ERROR: no free irqs available below %i maximum\n",
 527                        domain->revmap_data.nomap.max_irq);
 528                irq_free_desc(virq);
 529                return 0;
 530        }
 531        pr_debug("create_direct obtained virq %d\n", virq);
 532
 533        if (irq_domain_associate(domain, virq, virq)) {
 534                irq_free_desc(virq);
 535                return 0;
 536        }
 537
 538        return virq;
 539}
 540EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
 541
 542/**
 543 * irq_create_mapping() - Map a hardware interrupt into linux irq space
 544 * @domain: domain owning this hardware interrupt or NULL for default domain
 545 * @hwirq: hardware irq number in that domain space
 546 *
 547 * Only one mapping per hardware interrupt is permitted. Returns a linux
 548 * irq number.
 549 * If the sense/trigger is to be specified, set_irq_type() should be called
 550 * on the number returned from that call.
 551 */
 552unsigned int irq_create_mapping(struct irq_domain *domain,
 553                                irq_hw_number_t hwirq)
 554{
 555        unsigned int hint;
 556        int virq;
 557
 558        pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
 559
 560        /* Look for default domain if nececssary */
 561        if (domain == NULL)
 562                domain = irq_default_domain;
 563        if (domain == NULL) {
 564                pr_warning("irq_create_mapping called for"
 565                           " NULL domain, hwirq=%lx\n", hwirq);
 566                WARN_ON(1);
 567                return 0;
 568        }
 569        pr_debug("-> using domain @%p\n", domain);
 570
 571        /* Check if mapping already exists */
 572        virq = irq_find_mapping(domain, hwirq);
 573        if (virq) {
 574                pr_debug("-> existing mapping on virq %d\n", virq);
 575                return virq;
 576        }
 577
 578        /* Get a virtual interrupt number */
 579        if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY)
 580                return irq_domain_legacy_revmap(domain, hwirq);
 581
 582        /* Allocate a virtual interrupt number */
 583        hint = hwirq % nr_irqs;
 584        if (hint == 0)
 585                hint++;
 586        virq = irq_alloc_desc_from(hint, of_node_to_nid(domain->of_node));
 587        if (virq <= 0)
 588                virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node));
 589        if (virq <= 0) {
 590                pr_debug("-> virq allocation failed\n");
 591                return 0;
 592        }
 593
 594        if (irq_domain_associate(domain, virq, hwirq)) {
 595                irq_free_desc(virq);
 596                return 0;
 597        }
 598
 599        pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
 600                hwirq, of_node_full_name(domain->of_node), virq);
 601
 602        return virq;
 603}
 604EXPORT_SYMBOL_GPL(irq_create_mapping);
 605
 606/**
 607 * irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs
 608 * @domain: domain owning the interrupt range
 609 * @irq_base: beginning of linux IRQ range
 610 * @hwirq_base: beginning of hardware IRQ range
 611 * @count: Number of interrupts to map
 612 *
 613 * This routine is used for allocating and mapping a range of hardware
 614 * irqs to linux irqs where the linux irq numbers are at pre-defined
 615 * locations. For use by controllers that already have static mappings
 616 * to insert in to the domain.
 617 *
 618 * Non-linear users can use irq_create_identity_mapping() for IRQ-at-a-time
 619 * domain insertion.
 620 *
 621 * 0 is returned upon success, while any failure to establish a static
 622 * mapping is treated as an error.
 623 */
 624int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base,
 625                               irq_hw_number_t hwirq_base, int count)
 626{
 627        int ret;
 628
 629        ret = irq_alloc_descs(irq_base, irq_base, count,
 630                              of_node_to_nid(domain->of_node));
 631        if (unlikely(ret < 0))
 632                return ret;
 633
 634        ret = irq_domain_associate_many(domain, irq_base, hwirq_base, count);
 635        if (unlikely(ret < 0)) {
 636                irq_free_descs(irq_base, count);
 637                return ret;
 638        }
 639
 640        return 0;
 641}
 642EXPORT_SYMBOL_GPL(irq_create_strict_mappings);
 643
 644unsigned int irq_create_of_mapping(struct device_node *controller,
 645                                   const u32 *intspec, unsigned int intsize)
 646{
 647        struct irq_domain *domain;
 648        irq_hw_number_t hwirq;
 649        unsigned int type = IRQ_TYPE_NONE;
 650        unsigned int virq;
 651
 652        domain = controller ? irq_find_host(controller) : irq_default_domain;
 653        if (!domain) {
 654#ifdef CONFIG_MIPS
 655                /*
 656                 * Workaround to avoid breaking interrupt controller drivers
 657                 * that don't yet register an irq_domain.  This is temporary
 658                 * code. ~~~gcl, Feb 24, 2012
 659                 *
 660                 * Scheduled for removal in Linux v3.6.  That should be enough
 661                 * time.
 662                 */
 663                if (intsize > 0)
 664                        return intspec[0];
 665#endif
 666                pr_warning("no irq domain found for %s !\n",
 667                           of_node_full_name(controller));
 668                return 0;
 669        }
 670
 671        /* If domain has no translation, then we assume interrupt line */
 672        if (domain->ops->xlate == NULL)
 673                hwirq = intspec[0];
 674        else {
 675                if (domain->ops->xlate(domain, controller, intspec, intsize,
 676                                     &hwirq, &type))
 677                        return 0;
 678        }
 679
 680        /* Create mapping */
 681        virq = irq_create_mapping(domain, hwirq);
 682        if (!virq)
 683                return virq;
 684
 685        /* Set type if specified and different than the current one */
 686        if (type != IRQ_TYPE_NONE &&
 687            type != (irqd_get_trigger_type(irq_get_irq_data(virq))))
 688                irq_set_irq_type(virq, type);
 689        return virq;
 690}
 691EXPORT_SYMBOL_GPL(irq_create_of_mapping);
 692
 693/**
 694 * irq_dispose_mapping() - Unmap an interrupt
 695 * @virq: linux irq number of the interrupt to unmap
 696 */
 697void irq_dispose_mapping(unsigned int virq)
 698{
 699        struct irq_data *irq_data = irq_get_irq_data(virq);
 700        struct irq_domain *domain;
 701
 702        if (!virq || !irq_data)
 703                return;
 704
 705        domain = irq_data->domain;
 706        if (WARN_ON(domain == NULL))
 707                return;
 708
 709        /* Never unmap legacy interrupts */
 710        if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY)
 711                return;
 712
 713        irq_domain_disassociate_many(domain, virq, 1);
 714        irq_free_desc(virq);
 715}
 716EXPORT_SYMBOL_GPL(irq_dispose_mapping);
 717
 718/**
 719 * irq_find_mapping() - Find a linux irq from an hw irq number.
 720 * @domain: domain owning this hardware interrupt
 721 * @hwirq: hardware irq number in that domain space
 722 */
 723unsigned int irq_find_mapping(struct irq_domain *domain,
 724                              irq_hw_number_t hwirq)
 725{
 726        struct irq_data *data;
 727
 728        /* Look for default domain if nececssary */
 729        if (domain == NULL)
 730                domain = irq_default_domain;
 731        if (domain == NULL)
 732                return 0;
 733
 734        switch (domain->revmap_type) {
 735        case IRQ_DOMAIN_MAP_LEGACY:
 736                return irq_domain_legacy_revmap(domain, hwirq);
 737        case IRQ_DOMAIN_MAP_LINEAR:
 738                return irq_linear_revmap(domain, hwirq);
 739        case IRQ_DOMAIN_MAP_TREE:
 740                rcu_read_lock();
 741                data = radix_tree_lookup(&domain->revmap_data.tree, hwirq);
 742                rcu_read_unlock();
 743                if (data)
 744                        return data->irq;
 745                break;
 746        case IRQ_DOMAIN_MAP_NOMAP:
 747                data = irq_get_irq_data(hwirq);
 748                if (data && (data->domain == domain) && (data->hwirq == hwirq))
 749                        return hwirq;
 750                break;
 751        }
 752
 753        return 0;
 754}
 755EXPORT_SYMBOL_GPL(irq_find_mapping);
 756
 757/**
 758 * irq_linear_revmap() - Find a linux irq from a hw irq number.
 759 * @domain: domain owning this hardware interrupt
 760 * @hwirq: hardware irq number in that domain space
 761 *
 762 * This is a fast path that can be called directly by irq controller code to
 763 * save a handful of instructions.
 764 */
 765unsigned int irq_linear_revmap(struct irq_domain *domain,
 766                               irq_hw_number_t hwirq)
 767{
 768        BUG_ON(domain->revmap_type != IRQ_DOMAIN_MAP_LINEAR);
 769
 770        /* Check revmap bounds; complain if exceeded */
 771        if (WARN_ON(hwirq >= domain->revmap_data.linear.size))
 772                return 0;
 773
 774        return domain->revmap_data.linear.revmap[hwirq];
 775}
 776EXPORT_SYMBOL_GPL(irq_linear_revmap);
 777
 778#ifdef CONFIG_IRQ_DOMAIN_DEBUG
 779static int virq_debug_show(struct seq_file *m, void *private)
 780{
 781        unsigned long flags;
 782        struct irq_desc *desc;
 783        const char *p;
 784        static const char none[] = "none";
 785        void *data;
 786        int i;
 787
 788        seq_printf(m, "%-5s  %-7s  %-15s  %-*s  %s\n", "irq", "hwirq",
 789                      "chip name", (int)(2 * sizeof(void *) + 2), "chip data",
 790                      "domain name");
 791
 792        for (i = 1; i < nr_irqs; i++) {
 793                desc = irq_to_desc(i);
 794                if (!desc)
 795                        continue;
 796
 797                raw_spin_lock_irqsave(&desc->lock, flags);
 798
 799                if (desc->action && desc->action->handler) {
 800                        struct irq_chip *chip;
 801
 802                        seq_printf(m, "%5d  ", i);
 803                        seq_printf(m, "0x%05lx  ", desc->irq_data.hwirq);
 804
 805                        chip = irq_desc_get_chip(desc);
 806                        if (chip && chip->name)
 807                                p = chip->name;
 808                        else
 809                                p = none;
 810                        seq_printf(m, "%-15s  ", p);
 811
 812                        data = irq_desc_get_chip_data(desc);
 813                        seq_printf(m, data ? "0x%p  " : "  %p  ", data);
 814
 815                        if (desc->irq_data.domain)
 816                                p = of_node_full_name(desc->irq_data.domain->of_node);
 817                        else
 818                                p = none;
 819                        seq_printf(m, "%s\n", p);
 820                }
 821
 822                raw_spin_unlock_irqrestore(&desc->lock, flags);
 823        }
 824
 825        return 0;
 826}
 827
 828static int virq_debug_open(struct inode *inode, struct file *file)
 829{
 830        return single_open(file, virq_debug_show, inode->i_private);
 831}
 832
 833static const struct file_operations virq_debug_fops = {
 834        .open = virq_debug_open,
 835        .read = seq_read,
 836        .llseek = seq_lseek,
 837        .release = single_release,
 838};
 839
 840static int __init irq_debugfs_init(void)
 841{
 842        if (debugfs_create_file("irq_domain_mapping", S_IRUGO, NULL,
 843                                 NULL, &virq_debug_fops) == NULL)
 844                return -ENOMEM;
 845
 846        return 0;
 847}
 848__initcall(irq_debugfs_init);
 849#endif /* CONFIG_IRQ_DOMAIN_DEBUG */
 850
 851/**
 852 * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings
 853 *
 854 * Device Tree IRQ specifier translation function which works with one cell
 855 * bindings where the cell value maps directly to the hwirq number.
 856 */
 857int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr,
 858                             const u32 *intspec, unsigned int intsize,
 859                             unsigned long *out_hwirq, unsigned int *out_type)
 860{
 861        if (WARN_ON(intsize < 1))
 862                return -EINVAL;
 863        *out_hwirq = intspec[0];
 864        *out_type = IRQ_TYPE_NONE;
 865        return 0;
 866}
 867EXPORT_SYMBOL_GPL(irq_domain_xlate_onecell);
 868
 869/**
 870 * irq_domain_xlate_twocell() - Generic xlate for direct two cell bindings
 871 *
 872 * Device Tree IRQ specifier translation function which works with two cell
 873 * bindings where the cell values map directly to the hwirq number
 874 * and linux irq flags.
 875 */
 876int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr,
 877                        const u32 *intspec, unsigned int intsize,
 878                        irq_hw_number_t *out_hwirq, unsigned int *out_type)
 879{
 880        if (WARN_ON(intsize < 2))
 881                return -EINVAL;
 882        *out_hwirq = intspec[0];
 883        *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
 884        return 0;
 885}
 886EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell);
 887
 888/**
 889 * irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings
 890 *
 891 * Device Tree IRQ specifier translation function which works with either one
 892 * or two cell bindings where the cell values map directly to the hwirq number
 893 * and linux irq flags.
 894 *
 895 * Note: don't use this function unless your interrupt controller explicitly
 896 * supports both one and two cell bindings.  For the majority of controllers
 897 * the _onecell() or _twocell() variants above should be used.
 898 */
 899int irq_domain_xlate_onetwocell(struct irq_domain *d,
 900                                struct device_node *ctrlr,
 901                                const u32 *intspec, unsigned int intsize,
 902                                unsigned long *out_hwirq, unsigned int *out_type)
 903{
 904        if (WARN_ON(intsize < 1))
 905                return -EINVAL;
 906        *out_hwirq = intspec[0];
 907        *out_type = (intsize > 1) ? intspec[1] : IRQ_TYPE_NONE;
 908        return 0;
 909}
 910EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell);
 911
 912const struct irq_domain_ops irq_domain_simple_ops = {
 913        .xlate = irq_domain_xlate_onetwocell,
 914};
 915EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
 916
 917#ifdef CONFIG_OF_IRQ
 918void irq_domain_generate_simple(const struct of_device_id *match,
 919                                u64 phys_base, unsigned int irq_start)
 920{
 921        struct device_node *node;
 922        pr_debug("looking for phys_base=%llx, irq_start=%i\n",
 923                (unsigned long long) phys_base, (int) irq_start);
 924        node = of_find_matching_node_by_address(NULL, match, phys_base);
 925        if (node)
 926                irq_domain_add_legacy(node, 32, irq_start, 0,
 927                                      &irq_domain_simple_ops, NULL);
 928}
 929EXPORT_SYMBOL_GPL(irq_domain_generate_simple);
 930#endif
 931