linux/kernel/irq/irqdomain.c
<<
>>
Prefs
   1#include <linux/debugfs.h>
   2#include <linux/hardirq.h>
   3#include <linux/interrupt.h>
   4#include <linux/irq.h>
   5#include <linux/irqdesc.h>
   6#include <linux/irqdomain.h>
   7#include <linux/module.h>
   8#include <linux/mutex.h>
   9#include <linux/of.h>
  10#include <linux/of_address.h>
  11#include <linux/seq_file.h>
  12#include <linux/slab.h>
  13#include <linux/smp.h>
  14#include <linux/fs.h>
  15
  16#define IRQ_DOMAIN_MAP_LEGACY 0 /* driver allocated fixed range of irqs.
  17                                 * ie. legacy 8259, gets irqs 1..15 */
  18#define IRQ_DOMAIN_MAP_NOMAP 1 /* no fast reverse mapping */
  19#define IRQ_DOMAIN_MAP_LINEAR 2 /* linear map of interrupts */
  20#define IRQ_DOMAIN_MAP_TREE 3 /* radix tree */
  21
  22static LIST_HEAD(irq_domain_list);
  23static DEFINE_MUTEX(irq_domain_mutex);
  24
  25static DEFINE_MUTEX(revmap_trees_mutex);
  26static struct irq_domain *irq_default_domain;
  27
  28/**
  29 * irq_domain_alloc() - Allocate a new irq_domain data structure
  30 * @of_node: optional device-tree node of the interrupt controller
  31 * @revmap_type: type of reverse mapping to use
  32 * @ops: map/unmap domain callbacks
  33 * @host_data: Controller private data pointer
  34 *
  35 * Allocates and initialize and irq_domain structure.  Caller is expected to
  36 * register allocated irq_domain with irq_domain_register().  Returns pointer
  37 * to IRQ domain, or NULL on failure.
  38 */
  39static struct irq_domain *irq_domain_alloc(struct device_node *of_node,
  40                                           unsigned int revmap_type,
  41                                           const struct irq_domain_ops *ops,
  42                                           void *host_data)
  43{
  44        struct irq_domain *domain;
  45
  46        domain = kzalloc(sizeof(*domain), GFP_KERNEL);
  47        if (WARN_ON(!domain))
  48                return NULL;
  49
  50        /* Fill structure */
  51        domain->revmap_type = revmap_type;
  52        domain->ops = ops;
  53        domain->host_data = host_data;
  54        domain->of_node = of_node_get(of_node);
  55
  56        return domain;
  57}
  58
  59static void irq_domain_add(struct irq_domain *domain)
  60{
  61        mutex_lock(&irq_domain_mutex);
  62        list_add(&domain->link, &irq_domain_list);
  63        mutex_unlock(&irq_domain_mutex);
  64        pr_debug("irq: Allocated domain of type %d @0x%p\n",
  65                 domain->revmap_type, domain);
  66}
  67
  68static unsigned int irq_domain_legacy_revmap(struct irq_domain *domain,
  69                                             irq_hw_number_t hwirq)
  70{
  71        irq_hw_number_t first_hwirq = domain->revmap_data.legacy.first_hwirq;
  72        int size = domain->revmap_data.legacy.size;
  73
  74        if (WARN_ON(hwirq < first_hwirq || hwirq >= first_hwirq + size))
  75                return 0;
  76        return hwirq - first_hwirq + domain->revmap_data.legacy.first_irq;
  77}
  78
  79/**
  80 * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain.
  81 * @of_node: pointer to interrupt controller's device tree node.
  82 * @size: total number of irqs in legacy mapping
  83 * @first_irq: first number of irq block assigned to the domain
  84 * @first_hwirq: first hwirq number to use for the translation. Should normally
  85 *               be '0', but a positive integer can be used if the effective
  86 *               hwirqs numbering does not begin at zero.
  87 * @ops: map/unmap domain callbacks
  88 * @host_data: Controller private data pointer
  89 *
  90 * Note: the map() callback will be called before this function returns
  91 * for all legacy interrupts except 0 (which is always the invalid irq for
  92 * a legacy controller).
  93 */
  94struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
  95                                         unsigned int size,
  96                                         unsigned int first_irq,
  97                                         irq_hw_number_t first_hwirq,
  98                                         const struct irq_domain_ops *ops,
  99                                         void *host_data)
 100{
 101        struct irq_domain *domain;
 102        unsigned int i;
 103
 104        domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LEGACY, ops, host_data);
 105        if (!domain)
 106                return NULL;
 107
 108        domain->revmap_data.legacy.first_irq = first_irq;
 109        domain->revmap_data.legacy.first_hwirq = first_hwirq;
 110        domain->revmap_data.legacy.size = size;
 111
 112        mutex_lock(&irq_domain_mutex);
 113        /* Verify that all the irqs are available */
 114        for (i = 0; i < size; i++) {
 115                int irq = first_irq + i;
 116                struct irq_data *irq_data = irq_get_irq_data(irq);
 117
 118                if (WARN_ON(!irq_data || irq_data->domain)) {
 119                        mutex_unlock(&irq_domain_mutex);
 120                        of_node_put(domain->of_node);
 121                        kfree(domain);
 122                        return NULL;
 123                }
 124        }
 125
 126        /* Claim all of the irqs before registering a legacy domain */
 127        for (i = 0; i < size; i++) {
 128                struct irq_data *irq_data = irq_get_irq_data(first_irq + i);
 129                irq_data->hwirq = first_hwirq + i;
 130                irq_data->domain = domain;
 131        }
 132        mutex_unlock(&irq_domain_mutex);
 133
 134        for (i = 0; i < size; i++) {
 135                int irq = first_irq + i;
 136                int hwirq = first_hwirq + i;
 137
 138                /* IRQ0 gets ignored */
 139                if (!irq)
 140                        continue;
 141
 142                /* Legacy flags are left to default at this point,
 143                 * one can then use irq_create_mapping() to
 144                 * explicitly change them
 145                 */
 146                ops->map(domain, irq, hwirq);
 147
 148                /* Clear norequest flags */
 149                irq_clear_status_flags(irq, IRQ_NOREQUEST);
 150        }
 151
 152        irq_domain_add(domain);
 153        return domain;
 154}
 155
 156/**
 157 * irq_domain_add_linear() - Allocate and register a legacy revmap irq_domain.
 158 * @of_node: pointer to interrupt controller's device tree node.
 159 * @ops: map/unmap domain callbacks
 160 * @host_data: Controller private data pointer
 161 */
 162struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
 163                                         unsigned int size,
 164                                         const struct irq_domain_ops *ops,
 165                                         void *host_data)
 166{
 167        struct irq_domain *domain;
 168        unsigned int *revmap;
 169
 170        revmap = kzalloc(sizeof(*revmap) * size, GFP_KERNEL);
 171        if (WARN_ON(!revmap))
 172                return NULL;
 173
 174        domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LINEAR, ops, host_data);
 175        if (!domain) {
 176                kfree(revmap);
 177                return NULL;
 178        }
 179        domain->revmap_data.linear.size = size;
 180        domain->revmap_data.linear.revmap = revmap;
 181        irq_domain_add(domain);
 182        return domain;
 183}
 184
 185struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
 186                                         unsigned int max_irq,
 187                                         const struct irq_domain_ops *ops,
 188                                         void *host_data)
 189{
 190        struct irq_domain *domain = irq_domain_alloc(of_node,
 191                                        IRQ_DOMAIN_MAP_NOMAP, ops, host_data);
 192        if (domain) {
 193                domain->revmap_data.nomap.max_irq = max_irq ? max_irq : ~0;
 194                irq_domain_add(domain);
 195        }
 196        return domain;
 197}
 198
 199/**
 200 * irq_domain_add_tree()
 201 * @of_node: pointer to interrupt controller's device tree node.
 202 * @ops: map/unmap domain callbacks
 203 *
 204 * Note: The radix tree will be allocated later during boot automatically
 205 * (the reverse mapping will use the slow path until that happens).
 206 */
 207struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
 208                                         const struct irq_domain_ops *ops,
 209                                         void *host_data)
 210{
 211        struct irq_domain *domain = irq_domain_alloc(of_node,
 212                                        IRQ_DOMAIN_MAP_TREE, ops, host_data);
 213        if (domain) {
 214                INIT_RADIX_TREE(&domain->revmap_data.tree, GFP_KERNEL);
 215                irq_domain_add(domain);
 216        }
 217        return domain;
 218}
 219
 220/**
 221 * irq_find_host() - Locates a domain for a given device node
 222 * @node: device-tree node of the interrupt controller
 223 */
 224struct irq_domain *irq_find_host(struct device_node *node)
 225{
 226        struct irq_domain *h, *found = NULL;
 227        int rc;
 228
 229        /* We might want to match the legacy controller last since
 230         * it might potentially be set to match all interrupts in
 231         * the absence of a device node. This isn't a problem so far
 232         * yet though...
 233         */
 234        mutex_lock(&irq_domain_mutex);
 235        list_for_each_entry(h, &irq_domain_list, link) {
 236                if (h->ops->match)
 237                        rc = h->ops->match(h, node);
 238                else
 239                        rc = (h->of_node != NULL) && (h->of_node == node);
 240
 241                if (rc) {
 242                        found = h;
 243                        break;
 244                }
 245        }
 246        mutex_unlock(&irq_domain_mutex);
 247        return found;
 248}
 249EXPORT_SYMBOL_GPL(irq_find_host);
 250
 251/**
 252 * irq_set_default_host() - Set a "default" irq domain
 253 * @domain: default domain pointer
 254 *
 255 * For convenience, it's possible to set a "default" domain that will be used
 256 * whenever NULL is passed to irq_create_mapping(). It makes life easier for
 257 * platforms that want to manipulate a few hard coded interrupt numbers that
 258 * aren't properly represented in the device-tree.
 259 */
 260void irq_set_default_host(struct irq_domain *domain)
 261{
 262        pr_debug("irq: Default domain set to @0x%p\n", domain);
 263
 264        irq_default_domain = domain;
 265}
 266
 267static int irq_setup_virq(struct irq_domain *domain, unsigned int virq,
 268                            irq_hw_number_t hwirq)
 269{
 270        struct irq_data *irq_data = irq_get_irq_data(virq);
 271
 272        irq_data->hwirq = hwirq;
 273        irq_data->domain = domain;
 274        if (domain->ops->map(domain, virq, hwirq)) {
 275                pr_debug("irq: -> mapping failed, freeing\n");
 276                irq_data->domain = NULL;
 277                irq_data->hwirq = 0;
 278                return -1;
 279        }
 280
 281        irq_clear_status_flags(virq, IRQ_NOREQUEST);
 282
 283        return 0;
 284}
 285
 286/**
 287 * irq_create_direct_mapping() - Allocate an irq for direct mapping
 288 * @domain: domain to allocate the irq for or NULL for default domain
 289 *
 290 * This routine is used for irq controllers which can choose the hardware
 291 * interrupt numbers they generate. In such a case it's simplest to use
 292 * the linux irq as the hardware interrupt number.
 293 */
 294unsigned int irq_create_direct_mapping(struct irq_domain *domain)
 295{
 296        unsigned int virq;
 297
 298        if (domain == NULL)
 299                domain = irq_default_domain;
 300
 301        BUG_ON(domain == NULL);
 302        WARN_ON(domain->revmap_type != IRQ_DOMAIN_MAP_NOMAP);
 303
 304        virq = irq_alloc_desc_from(1, 0);
 305        if (!virq) {
 306                pr_debug("irq: create_direct virq allocation failed\n");
 307                return 0;
 308        }
 309        if (virq >= domain->revmap_data.nomap.max_irq) {
 310                pr_err("ERROR: no free irqs available below %i maximum\n",
 311                        domain->revmap_data.nomap.max_irq);
 312                irq_free_desc(virq);
 313                return 0;
 314        }
 315        pr_debug("irq: create_direct obtained virq %d\n", virq);
 316
 317        if (irq_setup_virq(domain, virq, virq)) {
 318                irq_free_desc(virq);
 319                return 0;
 320        }
 321
 322        return virq;
 323}
 324
 325/**
 326 * irq_create_mapping() - Map a hardware interrupt into linux irq space
 327 * @domain: domain owning this hardware interrupt or NULL for default domain
 328 * @hwirq: hardware irq number in that domain space
 329 *
 330 * Only one mapping per hardware interrupt is permitted. Returns a linux
 331 * irq number.
 332 * If the sense/trigger is to be specified, set_irq_type() should be called
 333 * on the number returned from that call.
 334 */
 335unsigned int irq_create_mapping(struct irq_domain *domain,
 336                                irq_hw_number_t hwirq)
 337{
 338        unsigned int hint;
 339        int virq;
 340
 341        pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
 342
 343        /* Look for default domain if nececssary */
 344        if (domain == NULL)
 345                domain = irq_default_domain;
 346        if (domain == NULL) {
 347                printk(KERN_WARNING "irq_create_mapping called for"
 348                       " NULL domain, hwirq=%lx\n", hwirq);
 349                WARN_ON(1);
 350                return 0;
 351        }
 352        pr_debug("irq: -> using domain @%p\n", domain);
 353
 354        /* Check if mapping already exists */
 355        virq = irq_find_mapping(domain, hwirq);
 356        if (virq) {
 357                pr_debug("irq: -> existing mapping on virq %d\n", virq);
 358                return virq;
 359        }
 360
 361        /* Get a virtual interrupt number */
 362        if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY)
 363                return irq_domain_legacy_revmap(domain, hwirq);
 364
 365        /* Allocate a virtual interrupt number */
 366        hint = hwirq % nr_irqs;
 367        if (hint == 0)
 368                hint++;
 369        virq = irq_alloc_desc_from(hint, 0);
 370        if (virq <= 0)
 371                virq = irq_alloc_desc_from(1, 0);
 372        if (virq <= 0) {
 373                pr_debug("irq: -> virq allocation failed\n");
 374                return 0;
 375        }
 376
 377        if (irq_setup_virq(domain, virq, hwirq)) {
 378                if (domain->revmap_type != IRQ_DOMAIN_MAP_LEGACY)
 379                        irq_free_desc(virq);
 380                return 0;
 381        }
 382
 383        pr_debug("irq: irq %lu on domain %s mapped to virtual irq %u\n",
 384                hwirq, domain->of_node ? domain->of_node->full_name : "null", virq);
 385
 386        return virq;
 387}
 388EXPORT_SYMBOL_GPL(irq_create_mapping);
 389
 390unsigned int irq_create_of_mapping(struct device_node *controller,
 391                                   const u32 *intspec, unsigned int intsize)
 392{
 393        struct irq_domain *domain;
 394        irq_hw_number_t hwirq;
 395        unsigned int type = IRQ_TYPE_NONE;
 396        unsigned int virq;
 397
 398        domain = controller ? irq_find_host(controller) : irq_default_domain;
 399        if (!domain) {
 400#ifdef CONFIG_MIPS
 401                /*
 402                 * Workaround to avoid breaking interrupt controller drivers
 403                 * that don't yet register an irq_domain.  This is temporary
 404                 * code. ~~~gcl, Feb 24, 2012
 405                 *
 406                 * Scheduled for removal in Linux v3.6.  That should be enough
 407                 * time.
 408                 */
 409                if (intsize > 0)
 410                        return intspec[0];
 411#endif
 412                printk(KERN_WARNING "irq: no irq domain found for %s !\n",
 413                       controller->full_name);
 414                return 0;
 415        }
 416
 417        /* If domain has no translation, then we assume interrupt line */
 418        if (domain->ops->xlate == NULL)
 419                hwirq = intspec[0];
 420        else {
 421                if (domain->ops->xlate(domain, controller, intspec, intsize,
 422                                     &hwirq, &type))
 423                        return 0;
 424        }
 425
 426        /* Create mapping */
 427        virq = irq_create_mapping(domain, hwirq);
 428        if (!virq)
 429                return virq;
 430
 431        /* Set type if specified and different than the current one */
 432        if (type != IRQ_TYPE_NONE &&
 433            type != (irqd_get_trigger_type(irq_get_irq_data(virq))))
 434                irq_set_irq_type(virq, type);
 435        return virq;
 436}
 437EXPORT_SYMBOL_GPL(irq_create_of_mapping);
 438
 439/**
 440 * irq_dispose_mapping() - Unmap an interrupt
 441 * @virq: linux irq number of the interrupt to unmap
 442 */
 443void irq_dispose_mapping(unsigned int virq)
 444{
 445        struct irq_data *irq_data = irq_get_irq_data(virq);
 446        struct irq_domain *domain;
 447        irq_hw_number_t hwirq;
 448
 449        if (!virq || !irq_data)
 450                return;
 451
 452        domain = irq_data->domain;
 453        if (WARN_ON(domain == NULL))
 454                return;
 455
 456        /* Never unmap legacy interrupts */
 457        if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY)
 458                return;
 459
 460        irq_set_status_flags(virq, IRQ_NOREQUEST);
 461
 462        /* remove chip and handler */
 463        irq_set_chip_and_handler(virq, NULL, NULL);
 464
 465        /* Make sure it's completed */
 466        synchronize_irq(virq);
 467
 468        /* Tell the PIC about it */
 469        if (domain->ops->unmap)
 470                domain->ops->unmap(domain, virq);
 471        smp_mb();
 472
 473        /* Clear reverse map */
 474        hwirq = irq_data->hwirq;
 475        switch(domain->revmap_type) {
 476        case IRQ_DOMAIN_MAP_LINEAR:
 477                if (hwirq < domain->revmap_data.linear.size)
 478                        domain->revmap_data.linear.revmap[hwirq] = 0;
 479                break;
 480        case IRQ_DOMAIN_MAP_TREE:
 481                mutex_lock(&revmap_trees_mutex);
 482                radix_tree_delete(&domain->revmap_data.tree, hwirq);
 483                mutex_unlock(&revmap_trees_mutex);
 484                break;
 485        }
 486
 487        irq_free_desc(virq);
 488}
 489EXPORT_SYMBOL_GPL(irq_dispose_mapping);
 490
 491/**
 492 * irq_find_mapping() - Find a linux irq from an hw irq number.
 493 * @domain: domain owning this hardware interrupt
 494 * @hwirq: hardware irq number in that domain space
 495 *
 496 * This is a slow path, for use by generic code. It's expected that an
 497 * irq controller implementation directly calls the appropriate low level
 498 * mapping function.
 499 */
 500unsigned int irq_find_mapping(struct irq_domain *domain,
 501                              irq_hw_number_t hwirq)
 502{
 503        unsigned int i;
 504        unsigned int hint = hwirq % nr_irqs;
 505
 506        /* Look for default domain if nececssary */
 507        if (domain == NULL)
 508                domain = irq_default_domain;
 509        if (domain == NULL)
 510                return 0;
 511
 512        /* legacy -> bail early */
 513        if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY)
 514                return irq_domain_legacy_revmap(domain, hwirq);
 515
 516        /* Slow path does a linear search of the map */
 517        if (hint == 0)
 518                hint = 1;
 519        i = hint;
 520        do {
 521                struct irq_data *data = irq_get_irq_data(i);
 522                if (data && (data->domain == domain) && (data->hwirq == hwirq))
 523                        return i;
 524                i++;
 525                if (i >= nr_irqs)
 526                        i = 1;
 527        } while(i != hint);
 528        return 0;
 529}
 530EXPORT_SYMBOL_GPL(irq_find_mapping);
 531
 532/**
 533 * irq_radix_revmap_lookup() - Find a linux irq from a hw irq number.
 534 * @domain: domain owning this hardware interrupt
 535 * @hwirq: hardware irq number in that domain space
 536 *
 537 * This is a fast path, for use by irq controller code that uses radix tree
 538 * revmaps
 539 */
 540unsigned int irq_radix_revmap_lookup(struct irq_domain *domain,
 541                                     irq_hw_number_t hwirq)
 542{
 543        struct irq_data *irq_data;
 544
 545        if (WARN_ON_ONCE(domain->revmap_type != IRQ_DOMAIN_MAP_TREE))
 546                return irq_find_mapping(domain, hwirq);
 547
 548        /*
 549         * Freeing an irq can delete nodes along the path to
 550         * do the lookup via call_rcu.
 551         */
 552        rcu_read_lock();
 553        irq_data = radix_tree_lookup(&domain->revmap_data.tree, hwirq);
 554        rcu_read_unlock();
 555
 556        /*
 557         * If found in radix tree, then fine.
 558         * Else fallback to linear lookup - this should not happen in practice
 559         * as it means that we failed to insert the node in the radix tree.
 560         */
 561        return irq_data ? irq_data->irq : irq_find_mapping(domain, hwirq);
 562}
 563
 564/**
 565 * irq_radix_revmap_insert() - Insert a hw irq to linux irq number mapping.
 566 * @domain: domain owning this hardware interrupt
 567 * @virq: linux irq number
 568 * @hwirq: hardware irq number in that domain space
 569 *
 570 * This is for use by irq controllers that use a radix tree reverse
 571 * mapping for fast lookup.
 572 */
 573void irq_radix_revmap_insert(struct irq_domain *domain, unsigned int virq,
 574                             irq_hw_number_t hwirq)
 575{
 576        struct irq_data *irq_data = irq_get_irq_data(virq);
 577
 578        if (WARN_ON(domain->revmap_type != IRQ_DOMAIN_MAP_TREE))
 579                return;
 580
 581        if (virq) {
 582                mutex_lock(&revmap_trees_mutex);
 583                radix_tree_insert(&domain->revmap_data.tree, hwirq, irq_data);
 584                mutex_unlock(&revmap_trees_mutex);
 585        }
 586}
 587
 588/**
 589 * irq_linear_revmap() - Find a linux irq from a hw irq number.
 590 * @domain: domain owning this hardware interrupt
 591 * @hwirq: hardware irq number in that domain space
 592 *
 593 * This is a fast path, for use by irq controller code that uses linear
 594 * revmaps. It does fallback to the slow path if the revmap doesn't exist
 595 * yet and will create the revmap entry with appropriate locking
 596 */
 597unsigned int irq_linear_revmap(struct irq_domain *domain,
 598                               irq_hw_number_t hwirq)
 599{
 600        unsigned int *revmap;
 601
 602        if (WARN_ON_ONCE(domain->revmap_type != IRQ_DOMAIN_MAP_LINEAR))
 603                return irq_find_mapping(domain, hwirq);
 604
 605        /* Check revmap bounds */
 606        if (unlikely(hwirq >= domain->revmap_data.linear.size))
 607                return irq_find_mapping(domain, hwirq);
 608
 609        /* Check if revmap was allocated */
 610        revmap = domain->revmap_data.linear.revmap;
 611        if (unlikely(revmap == NULL))
 612                return irq_find_mapping(domain, hwirq);
 613
 614        /* Fill up revmap with slow path if no mapping found */
 615        if (unlikely(!revmap[hwirq]))
 616                revmap[hwirq] = irq_find_mapping(domain, hwirq);
 617
 618        return revmap[hwirq];
 619}
 620
 621#ifdef CONFIG_IRQ_DOMAIN_DEBUG
 622static int virq_debug_show(struct seq_file *m, void *private)
 623{
 624        unsigned long flags;
 625        struct irq_desc *desc;
 626        const char *p;
 627        static const char none[] = "none";
 628        void *data;
 629        int i;
 630
 631        seq_printf(m, "%-5s  %-7s  %-15s  %-*s  %s\n", "irq", "hwirq",
 632                      "chip name", (int)(2 * sizeof(void *) + 2), "chip data",
 633                      "domain name");
 634
 635        for (i = 1; i < nr_irqs; i++) {
 636                desc = irq_to_desc(i);
 637                if (!desc)
 638                        continue;
 639
 640                raw_spin_lock_irqsave(&desc->lock, flags);
 641
 642                if (desc->action && desc->action->handler) {
 643                        struct irq_chip *chip;
 644
 645                        seq_printf(m, "%5d  ", i);
 646                        seq_printf(m, "0x%05lx  ", desc->irq_data.hwirq);
 647
 648                        chip = irq_desc_get_chip(desc);
 649                        if (chip && chip->name)
 650                                p = chip->name;
 651                        else
 652                                p = none;
 653                        seq_printf(m, "%-15s  ", p);
 654
 655                        data = irq_desc_get_chip_data(desc);
 656                        seq_printf(m, data ? "0x%p  " : "  %p  ", data);
 657
 658                        if (desc->irq_data.domain && desc->irq_data.domain->of_node)
 659                                p = desc->irq_data.domain->of_node->full_name;
 660                        else
 661                                p = none;
 662                        seq_printf(m, "%s\n", p);
 663                }
 664
 665                raw_spin_unlock_irqrestore(&desc->lock, flags);
 666        }
 667
 668        return 0;
 669}
 670
 671static int virq_debug_open(struct inode *inode, struct file *file)
 672{
 673        return single_open(file, virq_debug_show, inode->i_private);
 674}
 675
 676static const struct file_operations virq_debug_fops = {
 677        .open = virq_debug_open,
 678        .read = seq_read,
 679        .llseek = seq_lseek,
 680        .release = single_release,
 681};
 682
 683static int __init irq_debugfs_init(void)
 684{
 685        if (debugfs_create_file("irq_domain_mapping", S_IRUGO, NULL,
 686                                 NULL, &virq_debug_fops) == NULL)
 687                return -ENOMEM;
 688
 689        return 0;
 690}
 691__initcall(irq_debugfs_init);
 692#endif /* CONFIG_IRQ_DOMAIN_DEBUG */
 693
 694int irq_domain_simple_map(struct irq_domain *d, unsigned int irq,
 695                          irq_hw_number_t hwirq)
 696{
 697        return 0;
 698}
 699
 700/**
 701 * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings
 702 *
 703 * Device Tree IRQ specifier translation function which works with one cell
 704 * bindings where the cell value maps directly to the hwirq number.
 705 */
 706int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr,
 707                             const u32 *intspec, unsigned int intsize,
 708                             unsigned long *out_hwirq, unsigned int *out_type)
 709{
 710        if (WARN_ON(intsize < 1))
 711                return -EINVAL;
 712        *out_hwirq = intspec[0];
 713        *out_type = IRQ_TYPE_NONE;
 714        return 0;
 715}
 716EXPORT_SYMBOL_GPL(irq_domain_xlate_onecell);
 717
 718/**
 719 * irq_domain_xlate_twocell() - Generic xlate for direct two cell bindings
 720 *
 721 * Device Tree IRQ specifier translation function which works with two cell
 722 * bindings where the cell values map directly to the hwirq number
 723 * and linux irq flags.
 724 */
 725int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr,
 726                        const u32 *intspec, unsigned int intsize,
 727                        irq_hw_number_t *out_hwirq, unsigned int *out_type)
 728{
 729        if (WARN_ON(intsize < 2))
 730                return -EINVAL;
 731        *out_hwirq = intspec[0];
 732        *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
 733        return 0;
 734}
 735EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell);
 736
 737/**
 738 * irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings
 739 *
 740 * Device Tree IRQ specifier translation function which works with either one
 741 * or two cell bindings where the cell values map directly to the hwirq number
 742 * and linux irq flags.
 743 *
 744 * Note: don't use this function unless your interrupt controller explicitly
 745 * supports both one and two cell bindings.  For the majority of controllers
 746 * the _onecell() or _twocell() variants above should be used.
 747 */
 748int irq_domain_xlate_onetwocell(struct irq_domain *d,
 749                                struct device_node *ctrlr,
 750                                const u32 *intspec, unsigned int intsize,
 751                                unsigned long *out_hwirq, unsigned int *out_type)
 752{
 753        if (WARN_ON(intsize < 1))
 754                return -EINVAL;
 755        *out_hwirq = intspec[0];
 756        *out_type = (intsize > 1) ? intspec[1] : IRQ_TYPE_NONE;
 757        return 0;
 758}
 759EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell);
 760
 761const struct irq_domain_ops irq_domain_simple_ops = {
 762        .map = irq_domain_simple_map,
 763        .xlate = irq_domain_xlate_onetwocell,
 764};
 765EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
 766
 767#ifdef CONFIG_OF_IRQ
 768void irq_domain_generate_simple(const struct of_device_id *match,
 769                                u64 phys_base, unsigned int irq_start)
 770{
 771        struct device_node *node;
 772        pr_debug("looking for phys_base=%llx, irq_start=%i\n",
 773                (unsigned long long) phys_base, (int) irq_start);
 774        node = of_find_matching_node_by_address(NULL, match, phys_base);
 775        if (node)
 776                irq_domain_add_legacy(node, 32, irq_start, 0,
 777                                      &irq_domain_simple_ops, NULL);
 778}
 779EXPORT_SYMBOL_GPL(irq_domain_generate_simple);
 780#endif
 781