linux/kernel/irq/irqdesc.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
   3 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
   4 *
   5 * This file contains the interrupt descriptor management code
   6 *
   7 * Detailed information is available in Documentation/DocBook/genericirq
   8 *
   9 */
  10#include <linux/irq.h>
  11#include <linux/slab.h>
  12#include <linux/export.h>
  13#include <linux/interrupt.h>
  14#include <linux/kernel_stat.h>
  15#include <linux/radix-tree.h>
  16#include <linux/bitmap.h>
  17#include <linux/irqdomain.h>
  18#include <linux/sysfs.h>
  19
  20#include "internals.h"
  21
  22/*
  23 * lockdep: we want to handle all irq_desc locks as a single lock-class:
  24 */
  25static struct lock_class_key irq_desc_lock_class;
  26
  27#if defined(CONFIG_SMP)
  28static int __init irq_affinity_setup(char *str)
  29{
  30        zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
  31        cpulist_parse(str, irq_default_affinity);
  32        /*
  33         * Set at least the boot cpu. We don't want to end up with
  34         * bugreports caused by random comandline masks
  35         */
  36        cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
  37        return 1;
  38}
  39__setup("irqaffinity=", irq_affinity_setup);
  40
  41static void __init init_irq_default_affinity(void)
  42{
  43#ifdef CONFIG_CPUMASK_OFFSTACK
  44        if (!irq_default_affinity)
  45                zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
  46#endif
  47        if (cpumask_empty(irq_default_affinity))
  48                cpumask_setall(irq_default_affinity);
  49}
  50#else
  51static void __init init_irq_default_affinity(void)
  52{
  53}
  54#endif
  55
  56#ifdef CONFIG_SMP
  57static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
  58{
  59        if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity,
  60                                     gfp, node))
  61                return -ENOMEM;
  62
  63#ifdef CONFIG_GENERIC_PENDING_IRQ
  64        if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
  65                free_cpumask_var(desc->irq_common_data.affinity);
  66                return -ENOMEM;
  67        }
  68#endif
  69        return 0;
  70}
  71
  72static void desc_smp_init(struct irq_desc *desc, int node,
  73                          const struct cpumask *affinity)
  74{
  75        if (!affinity)
  76                affinity = irq_default_affinity;
  77        cpumask_copy(desc->irq_common_data.affinity, affinity);
  78
  79#ifdef CONFIG_GENERIC_PENDING_IRQ
  80        cpumask_clear(desc->pending_mask);
  81#endif
  82#ifdef CONFIG_NUMA
  83        desc->irq_common_data.node = node;
  84#endif
  85}
  86
  87#else
  88static inline int
  89alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; }
  90static inline void
  91desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { }
  92#endif
  93
  94static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
  95                              const struct cpumask *affinity, struct module *owner)
  96{
  97        int cpu;
  98
  99        desc->irq_common_data.handler_data = NULL;
 100        desc->irq_common_data.msi_desc = NULL;
 101
 102        desc->irq_data.common = &desc->irq_common_data;
 103        desc->irq_data.irq = irq;
 104        desc->irq_data.chip = &no_irq_chip;
 105        desc->irq_data.chip_data = NULL;
 106        irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
 107        irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
 108        desc->handle_irq = handle_bad_irq;
 109        desc->depth = 1;
 110        desc->irq_count = 0;
 111        desc->irqs_unhandled = 0;
 112        desc->name = NULL;
 113        desc->owner = owner;
 114        for_each_possible_cpu(cpu)
 115                *per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
 116        desc_smp_init(desc, node, affinity);
 117}
 118
 119int nr_irqs = NR_IRQS;
 120EXPORT_SYMBOL_GPL(nr_irqs);
 121
 122static DEFINE_MUTEX(sparse_irq_lock);
 123static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
 124
 125#ifdef CONFIG_SPARSE_IRQ
 126
 127static void irq_kobj_release(struct kobject *kobj);
 128
 129#ifdef CONFIG_SYSFS
 130static struct kobject *irq_kobj_base;
 131
 132#define IRQ_ATTR_RO(_name) \
 133static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
 134
 135static ssize_t per_cpu_count_show(struct kobject *kobj,
 136                                  struct kobj_attribute *attr, char *buf)
 137{
 138        struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
 139        int cpu, irq = desc->irq_data.irq;
 140        ssize_t ret = 0;
 141        char *p = "";
 142
 143        for_each_possible_cpu(cpu) {
 144                unsigned int c = kstat_irqs_cpu(irq, cpu);
 145
 146                ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%u", p, c);
 147                p = ",";
 148        }
 149
 150        ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
 151        return ret;
 152}
 153IRQ_ATTR_RO(per_cpu_count);
 154
 155static ssize_t chip_name_show(struct kobject *kobj,
 156                              struct kobj_attribute *attr, char *buf)
 157{
 158        struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
 159        ssize_t ret = 0;
 160
 161        raw_spin_lock_irq(&desc->lock);
 162        if (desc->irq_data.chip && desc->irq_data.chip->name) {
 163                ret = scnprintf(buf, PAGE_SIZE, "%s\n",
 164                                desc->irq_data.chip->name);
 165        }
 166        raw_spin_unlock_irq(&desc->lock);
 167
 168        return ret;
 169}
 170IRQ_ATTR_RO(chip_name);
 171
 172static ssize_t hwirq_show(struct kobject *kobj,
 173                          struct kobj_attribute *attr, char *buf)
 174{
 175        struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
 176        ssize_t ret = 0;
 177
 178        raw_spin_lock_irq(&desc->lock);
 179        if (desc->irq_data.domain)
 180                ret = sprintf(buf, "%d\n", (int)desc->irq_data.hwirq);
 181        raw_spin_unlock_irq(&desc->lock);
 182
 183        return ret;
 184}
 185IRQ_ATTR_RO(hwirq);
 186
 187static ssize_t type_show(struct kobject *kobj,
 188                         struct kobj_attribute *attr, char *buf)
 189{
 190        struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
 191        ssize_t ret = 0;
 192
 193        raw_spin_lock_irq(&desc->lock);
 194        ret = sprintf(buf, "%s\n",
 195                      irqd_is_level_type(&desc->irq_data) ? "level" : "edge");
 196        raw_spin_unlock_irq(&desc->lock);
 197
 198        return ret;
 199
 200}
 201IRQ_ATTR_RO(type);
 202
 203static ssize_t name_show(struct kobject *kobj,
 204                         struct kobj_attribute *attr, char *buf)
 205{
 206        struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
 207        ssize_t ret = 0;
 208
 209        raw_spin_lock_irq(&desc->lock);
 210        if (desc->name)
 211                ret = scnprintf(buf, PAGE_SIZE, "%s\n", desc->name);
 212        raw_spin_unlock_irq(&desc->lock);
 213
 214        return ret;
 215}
 216IRQ_ATTR_RO(name);
 217
 218static ssize_t actions_show(struct kobject *kobj,
 219                            struct kobj_attribute *attr, char *buf)
 220{
 221        struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
 222        struct irqaction *action;
 223        ssize_t ret = 0;
 224        char *p = "";
 225
 226        raw_spin_lock_irq(&desc->lock);
 227        for (action = desc->action; action != NULL; action = action->next) {
 228                ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%s",
 229                                 p, action->name);
 230                p = ",";
 231        }
 232        raw_spin_unlock_irq(&desc->lock);
 233
 234        if (ret)
 235                ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
 236
 237        return ret;
 238}
 239IRQ_ATTR_RO(actions);
 240
 241static struct attribute *irq_attrs[] = {
 242        &per_cpu_count_attr.attr,
 243        &chip_name_attr.attr,
 244        &hwirq_attr.attr,
 245        &type_attr.attr,
 246        &name_attr.attr,
 247        &actions_attr.attr,
 248        NULL
 249};
 250
 251static struct kobj_type irq_kobj_type = {
 252        .release        = irq_kobj_release,
 253        .sysfs_ops      = &kobj_sysfs_ops,
 254        .default_attrs  = irq_attrs,
 255};
 256
 257static void irq_sysfs_add(int irq, struct irq_desc *desc)
 258{
 259        if (irq_kobj_base) {
 260                /*
 261                 * Continue even in case of failure as this is nothing
 262                 * crucial.
 263                 */
 264                if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq))
 265                        pr_warn("Failed to add kobject for irq %d\n", irq);
 266        }
 267}
 268
 269static int __init irq_sysfs_init(void)
 270{
 271        struct irq_desc *desc;
 272        int irq;
 273
 274        /* Prevent concurrent irq alloc/free */
 275        irq_lock_sparse();
 276
 277        irq_kobj_base = kobject_create_and_add("irq", kernel_kobj);
 278        if (!irq_kobj_base) {
 279                irq_unlock_sparse();
 280                return -ENOMEM;
 281        }
 282
 283        /* Add the already allocated interrupts */
 284        for_each_irq_desc(irq, desc)
 285                irq_sysfs_add(irq, desc);
 286        irq_unlock_sparse();
 287
 288        return 0;
 289}
 290postcore_initcall(irq_sysfs_init);
 291
 292#else /* !CONFIG_SYSFS */
 293
 294static struct kobj_type irq_kobj_type = {
 295        .release        = irq_kobj_release,
 296};
 297
 298static void irq_sysfs_add(int irq, struct irq_desc *desc) {}
 299
 300#endif /* CONFIG_SYSFS */
 301
 302static RADIX_TREE(irq_desc_tree, GFP_KERNEL);
 303
 304static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
 305{
 306        radix_tree_insert(&irq_desc_tree, irq, desc);
 307}
 308
 309struct irq_desc *irq_to_desc(unsigned int irq)
 310{
 311        return radix_tree_lookup(&irq_desc_tree, irq);
 312}
 313EXPORT_SYMBOL(irq_to_desc);
 314
 315static void delete_irq_desc(unsigned int irq)
 316{
 317        radix_tree_delete(&irq_desc_tree, irq);
 318}
 319
 320#ifdef CONFIG_SMP
 321static void free_masks(struct irq_desc *desc)
 322{
 323#ifdef CONFIG_GENERIC_PENDING_IRQ
 324        free_cpumask_var(desc->pending_mask);
 325#endif
 326        free_cpumask_var(desc->irq_common_data.affinity);
 327}
 328#else
 329static inline void free_masks(struct irq_desc *desc) { }
 330#endif
 331
 332void irq_lock_sparse(void)
 333{
 334        mutex_lock(&sparse_irq_lock);
 335}
 336
 337void irq_unlock_sparse(void)
 338{
 339        mutex_unlock(&sparse_irq_lock);
 340}
 341
 342static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags,
 343                                   const struct cpumask *affinity,
 344                                   struct module *owner)
 345{
 346        struct irq_desc *desc;
 347        gfp_t gfp = GFP_KERNEL;
 348
 349        desc = kzalloc_node(sizeof(*desc), gfp, node);
 350        if (!desc)
 351                return NULL;
 352        /* allocate based on nr_cpu_ids */
 353        desc->kstat_irqs = alloc_percpu(unsigned int);
 354        if (!desc->kstat_irqs)
 355                goto err_desc;
 356
 357        if (alloc_masks(desc, gfp, node))
 358                goto err_kstat;
 359
 360        raw_spin_lock_init(&desc->lock);
 361        lockdep_set_class(&desc->lock, &irq_desc_lock_class);
 362        init_rcu_head(&desc->rcu);
 363
 364        desc_set_defaults(irq, desc, node, affinity, owner);
 365        irqd_set(&desc->irq_data, flags);
 366        kobject_init(&desc->kobj, &irq_kobj_type);
 367
 368        return desc;
 369
 370err_kstat:
 371        free_percpu(desc->kstat_irqs);
 372err_desc:
 373        kfree(desc);
 374        return NULL;
 375}
 376
 377static void irq_kobj_release(struct kobject *kobj)
 378{
 379        struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
 380
 381        free_masks(desc);
 382        free_percpu(desc->kstat_irqs);
 383        kfree(desc);
 384}
 385
 386static void delayed_free_desc(struct rcu_head *rhp)
 387{
 388        struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu);
 389
 390        kobject_put(&desc->kobj);
 391}
 392
 393static void free_desc(unsigned int irq)
 394{
 395        struct irq_desc *desc = irq_to_desc(irq);
 396
 397        unregister_irq_proc(irq, desc);
 398
 399        /*
 400         * sparse_irq_lock protects also show_interrupts() and
 401         * kstat_irq_usr(). Once we deleted the descriptor from the
 402         * sparse tree we can free it. Access in proc will fail to
 403         * lookup the descriptor.
 404         *
 405         * The sysfs entry must be serialized against a concurrent
 406         * irq_sysfs_init() as well.
 407         */
 408        mutex_lock(&sparse_irq_lock);
 409        kobject_del(&desc->kobj);
 410        delete_irq_desc(irq);
 411        mutex_unlock(&sparse_irq_lock);
 412
 413        /*
 414         * We free the descriptor, masks and stat fields via RCU. That
 415         * allows demultiplex interrupts to do rcu based management of
 416         * the child interrupts.
 417         */
 418        call_rcu(&desc->rcu, delayed_free_desc);
 419}
 420
 421static int alloc_descs(unsigned int start, unsigned int cnt, int node,
 422                       const struct cpumask *affinity, struct module *owner)
 423{
 424        const struct cpumask *mask = NULL;
 425        struct irq_desc *desc;
 426        unsigned int flags;
 427        int i;
 428
 429        /* Validate affinity mask(s) */
 430        if (affinity) {
 431                for (i = 0, mask = affinity; i < cnt; i++, mask++) {
 432                        if (cpumask_empty(mask))
 433                                return -EINVAL;
 434                }
 435        }
 436
 437        flags = affinity ? IRQD_AFFINITY_MANAGED : 0;
 438        mask = NULL;
 439
 440        for (i = 0; i < cnt; i++) {
 441                if (affinity) {
 442                        node = cpu_to_node(cpumask_first(affinity));
 443                        mask = affinity;
 444                        affinity++;
 445                }
 446                desc = alloc_desc(start + i, node, flags, mask, owner);
 447                if (!desc)
 448                        goto err;
 449                mutex_lock(&sparse_irq_lock);
 450                irq_insert_desc(start + i, desc);
 451                irq_sysfs_add(start + i, desc);
 452                mutex_unlock(&sparse_irq_lock);
 453        }
 454        return start;
 455
 456err:
 457        for (i--; i >= 0; i--)
 458                free_desc(start + i);
 459
 460        mutex_lock(&sparse_irq_lock);
 461        bitmap_clear(allocated_irqs, start, cnt);
 462        mutex_unlock(&sparse_irq_lock);
 463        return -ENOMEM;
 464}
 465
 466static int irq_expand_nr_irqs(unsigned int nr)
 467{
 468        if (nr > IRQ_BITMAP_BITS)
 469                return -ENOMEM;
 470        nr_irqs = nr;
 471        return 0;
 472}
 473
 474int __init early_irq_init(void)
 475{
 476        int i, initcnt, node = first_online_node;
 477        struct irq_desc *desc;
 478
 479        init_irq_default_affinity();
 480
 481        /* Let arch update nr_irqs and return the nr of preallocated irqs */
 482        initcnt = arch_probe_nr_irqs();
 483        printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt);
 484
 485        if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
 486                nr_irqs = IRQ_BITMAP_BITS;
 487
 488        if (WARN_ON(initcnt > IRQ_BITMAP_BITS))
 489                initcnt = IRQ_BITMAP_BITS;
 490
 491        if (initcnt > nr_irqs)
 492                nr_irqs = initcnt;
 493
 494        for (i = 0; i < initcnt; i++) {
 495                desc = alloc_desc(i, node, 0, NULL, NULL);
 496                set_bit(i, allocated_irqs);
 497                irq_insert_desc(i, desc);
 498        }
 499        return arch_early_irq_init();
 500}
 501
 502#else /* !CONFIG_SPARSE_IRQ */
 503
 504struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
 505        [0 ... NR_IRQS-1] = {
 506                .handle_irq     = handle_bad_irq,
 507                .depth          = 1,
 508                .lock           = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
 509        }
 510};
 511
 512int __init early_irq_init(void)
 513{
 514        int count, i, node = first_online_node;
 515        struct irq_desc *desc;
 516
 517        init_irq_default_affinity();
 518
 519        printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
 520
 521        desc = irq_desc;
 522        count = ARRAY_SIZE(irq_desc);
 523
 524        for (i = 0; i < count; i++) {
 525                desc[i].kstat_irqs = alloc_percpu(unsigned int);
 526                alloc_masks(&desc[i], GFP_KERNEL, node);
 527                raw_spin_lock_init(&desc[i].lock);
 528                lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
 529                desc_set_defaults(i, &desc[i], node, NULL, NULL);
 530        }
 531        return arch_early_irq_init();
 532}
 533
 534struct irq_desc *irq_to_desc(unsigned int irq)
 535{
 536        return (irq < NR_IRQS) ? irq_desc + irq : NULL;
 537}
 538EXPORT_SYMBOL(irq_to_desc);
 539
 540static void free_desc(unsigned int irq)
 541{
 542        struct irq_desc *desc = irq_to_desc(irq);
 543        unsigned long flags;
 544
 545        raw_spin_lock_irqsave(&desc->lock, flags);
 546        desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL);
 547        raw_spin_unlock_irqrestore(&desc->lock, flags);
 548}
 549
 550static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
 551                              const struct cpumask *affinity,
 552                              struct module *owner)
 553{
 554        u32 i;
 555
 556        for (i = 0; i < cnt; i++) {
 557                struct irq_desc *desc = irq_to_desc(start + i);
 558
 559                desc->owner = owner;
 560        }
 561        return start;
 562}
 563
 564static int irq_expand_nr_irqs(unsigned int nr)
 565{
 566        return -ENOMEM;
 567}
 568
 569void irq_mark_irq(unsigned int irq)
 570{
 571        mutex_lock(&sparse_irq_lock);
 572        bitmap_set(allocated_irqs, irq, 1);
 573        mutex_unlock(&sparse_irq_lock);
 574}
 575
 576#ifdef CONFIG_GENERIC_IRQ_LEGACY
 577void irq_init_desc(unsigned int irq)
 578{
 579        free_desc(irq);
 580}
 581#endif
 582
 583#endif /* !CONFIG_SPARSE_IRQ */
 584
 585/**
 586 * generic_handle_irq - Invoke the handler for a particular irq
 587 * @irq:        The irq number to handle
 588 *
 589 */
 590int generic_handle_irq(unsigned int irq)
 591{
 592        struct irq_desc *desc = irq_to_desc(irq);
 593
 594        if (!desc)
 595                return -EINVAL;
 596        generic_handle_irq_desc(desc);
 597        return 0;
 598}
 599EXPORT_SYMBOL_GPL(generic_handle_irq);
 600
 601#ifdef CONFIG_HANDLE_DOMAIN_IRQ
 602/**
 603 * __handle_domain_irq - Invoke the handler for a HW irq belonging to a domain
 604 * @domain:     The domain where to perform the lookup
 605 * @hwirq:      The HW irq number to convert to a logical one
 606 * @lookup:     Whether to perform the domain lookup or not
 607 * @regs:       Register file coming from the low-level handling code
 608 *
 609 * Returns:     0 on success, or -EINVAL if conversion has failed
 610 */
 611int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
 612                        bool lookup, struct pt_regs *regs)
 613{
 614        struct pt_regs *old_regs = set_irq_regs(regs);
 615        unsigned int irq = hwirq;
 616        int ret = 0;
 617
 618        irq_enter();
 619
 620#ifdef CONFIG_IRQ_DOMAIN
 621        if (lookup)
 622                irq = irq_find_mapping(domain, hwirq);
 623#endif
 624
 625        /*
 626         * Some hardware gives randomly wrong interrupts.  Rather
 627         * than crashing, do something sensible.
 628         */
 629        if (unlikely(!irq || irq >= nr_irqs)) {
 630                ack_bad_irq(irq);
 631                ret = -EINVAL;
 632        } else {
 633                generic_handle_irq(irq);
 634        }
 635
 636        irq_exit();
 637        set_irq_regs(old_regs);
 638        return ret;
 639}
 640#endif
 641
 642/* Dynamic interrupt handling */
 643
 644/**
 645 * irq_free_descs - free irq descriptors
 646 * @from:       Start of descriptor range
 647 * @cnt:        Number of consecutive irqs to free
 648 */
 649void irq_free_descs(unsigned int from, unsigned int cnt)
 650{
 651        int i;
 652
 653        if (from >= nr_irqs || (from + cnt) > nr_irqs)
 654                return;
 655
 656        for (i = 0; i < cnt; i++)
 657                free_desc(from + i);
 658
 659        mutex_lock(&sparse_irq_lock);
 660        bitmap_clear(allocated_irqs, from, cnt);
 661        mutex_unlock(&sparse_irq_lock);
 662}
 663EXPORT_SYMBOL_GPL(irq_free_descs);
 664
 665/**
 666 * irq_alloc_descs - allocate and initialize a range of irq descriptors
 667 * @irq:        Allocate for specific irq number if irq >= 0
 668 * @from:       Start the search from this irq number
 669 * @cnt:        Number of consecutive irqs to allocate.
 670 * @node:       Preferred node on which the irq descriptor should be allocated
 671 * @owner:      Owning module (can be NULL)
 672 * @affinity:   Optional pointer to an affinity mask array of size @cnt which
 673 *              hints where the irq descriptors should be allocated and which
 674 *              default affinities to use
 675 *
 676 * Returns the first irq number or error code
 677 */
 678int __ref
 679__irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
 680                  struct module *owner, const struct cpumask *affinity)
 681{
 682        int start, ret;
 683
 684        if (!cnt)
 685                return -EINVAL;
 686
 687        if (irq >= 0) {
 688                if (from > irq)
 689                        return -EINVAL;
 690                from = irq;
 691        } else {
 692                /*
 693                 * For interrupts which are freely allocated the
 694                 * architecture can force a lower bound to the @from
 695                 * argument. x86 uses this to exclude the GSI space.
 696                 */
 697                from = arch_dynirq_lower_bound(from);
 698        }
 699
 700        mutex_lock(&sparse_irq_lock);
 701
 702        start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
 703                                           from, cnt, 0);
 704        ret = -EEXIST;
 705        if (irq >=0 && start != irq)
 706                goto err;
 707
 708        if (start + cnt > nr_irqs) {
 709                ret = irq_expand_nr_irqs(start + cnt);
 710                if (ret)
 711                        goto err;
 712        }
 713
 714        bitmap_set(allocated_irqs, start, cnt);
 715        mutex_unlock(&sparse_irq_lock);
 716        return alloc_descs(start, cnt, node, affinity, owner);
 717
 718err:
 719        mutex_unlock(&sparse_irq_lock);
 720        return ret;
 721}
 722EXPORT_SYMBOL_GPL(__irq_alloc_descs);
 723
 724#ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
 725/**
 726 * irq_alloc_hwirqs - Allocate an irq descriptor and initialize the hardware
 727 * @cnt:        number of interrupts to allocate
 728 * @node:       node on which to allocate
 729 *
 730 * Returns an interrupt number > 0 or 0, if the allocation fails.
 731 */
 732unsigned int irq_alloc_hwirqs(int cnt, int node)
 733{
 734        int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL, NULL);
 735
 736        if (irq < 0)
 737                return 0;
 738
 739        for (i = irq; cnt > 0; i++, cnt--) {
 740                if (arch_setup_hwirq(i, node))
 741                        goto err;
 742                irq_clear_status_flags(i, _IRQ_NOREQUEST);
 743        }
 744        return irq;
 745
 746err:
 747        for (i--; i >= irq; i--) {
 748                irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
 749                arch_teardown_hwirq(i);
 750        }
 751        irq_free_descs(irq, cnt);
 752        return 0;
 753}
 754EXPORT_SYMBOL_GPL(irq_alloc_hwirqs);
 755
 756/**
 757 * irq_free_hwirqs - Free irq descriptor and cleanup the hardware
 758 * @from:       Free from irq number
 759 * @cnt:        number of interrupts to free
 760 *
 761 */
 762void irq_free_hwirqs(unsigned int from, int cnt)
 763{
 764        int i, j;
 765
 766        for (i = from, j = cnt; j > 0; i++, j--) {
 767                irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
 768                arch_teardown_hwirq(i);
 769        }
 770        irq_free_descs(from, cnt);
 771}
 772EXPORT_SYMBOL_GPL(irq_free_hwirqs);
 773#endif
 774
 775/**
 776 * irq_get_next_irq - get next allocated irq number
 777 * @offset:     where to start the search
 778 *
 779 * Returns next irq number after offset or nr_irqs if none is found.
 780 */
 781unsigned int irq_get_next_irq(unsigned int offset)
 782{
 783        return find_next_bit(allocated_irqs, nr_irqs, offset);
 784}
 785
 786struct irq_desc *
 787__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
 788                    unsigned int check)
 789{
 790        struct irq_desc *desc = irq_to_desc(irq);
 791
 792        if (desc) {
 793                if (check & _IRQ_DESC_CHECK) {
 794                        if ((check & _IRQ_DESC_PERCPU) &&
 795                            !irq_settings_is_per_cpu_devid(desc))
 796                                return NULL;
 797
 798                        if (!(check & _IRQ_DESC_PERCPU) &&
 799                            irq_settings_is_per_cpu_devid(desc))
 800                                return NULL;
 801                }
 802
 803                if (bus)
 804                        chip_bus_lock(desc);
 805                raw_spin_lock_irqsave(&desc->lock, *flags);
 806        }
 807        return desc;
 808}
 809
 810void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
 811{
 812        raw_spin_unlock_irqrestore(&desc->lock, flags);
 813        if (bus)
 814                chip_bus_sync_unlock(desc);
 815}
 816
 817int irq_set_percpu_devid_partition(unsigned int irq,
 818                                   const struct cpumask *affinity)
 819{
 820        struct irq_desc *desc = irq_to_desc(irq);
 821
 822        if (!desc)
 823                return -EINVAL;
 824
 825        if (desc->percpu_enabled)
 826                return -EINVAL;
 827
 828        desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL);
 829
 830        if (!desc->percpu_enabled)
 831                return -ENOMEM;
 832
 833        if (affinity)
 834                desc->percpu_affinity = affinity;
 835        else
 836                desc->percpu_affinity = cpu_possible_mask;
 837
 838        irq_set_percpu_devid_flags(irq);
 839        return 0;
 840}
 841
 842int irq_set_percpu_devid(unsigned int irq)
 843{
 844        return irq_set_percpu_devid_partition(irq, NULL);
 845}
 846
 847int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity)
 848{
 849        struct irq_desc *desc = irq_to_desc(irq);
 850
 851        if (!desc || !desc->percpu_enabled)
 852                return -EINVAL;
 853
 854        if (affinity)
 855                cpumask_copy(affinity, desc->percpu_affinity);
 856
 857        return 0;
 858}
 859
 860void kstat_incr_irq_this_cpu(unsigned int irq)
 861{
 862        kstat_incr_irqs_this_cpu(irq_to_desc(irq));
 863}
 864
 865/**
 866 * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu
 867 * @irq:        The interrupt number
 868 * @cpu:        The cpu number
 869 *
 870 * Returns the sum of interrupt counts on @cpu since boot for
 871 * @irq. The caller must ensure that the interrupt is not removed
 872 * concurrently.
 873 */
 874unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
 875{
 876        struct irq_desc *desc = irq_to_desc(irq);
 877
 878        return desc && desc->kstat_irqs ?
 879                        *per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
 880}
 881
 882/**
 883 * kstat_irqs - Get the statistics for an interrupt
 884 * @irq:        The interrupt number
 885 *
 886 * Returns the sum of interrupt counts on all cpus since boot for
 887 * @irq. The caller must ensure that the interrupt is not removed
 888 * concurrently.
 889 */
 890unsigned int kstat_irqs(unsigned int irq)
 891{
 892        struct irq_desc *desc = irq_to_desc(irq);
 893        int cpu;
 894        unsigned int sum = 0;
 895
 896        if (!desc || !desc->kstat_irqs)
 897                return 0;
 898        for_each_possible_cpu(cpu)
 899                sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
 900        return sum;
 901}
 902
 903/**
 904 * kstat_irqs_usr - Get the statistics for an interrupt
 905 * @irq:        The interrupt number
 906 *
 907 * Returns the sum of interrupt counts on all cpus since boot for
 908 * @irq. Contrary to kstat_irqs() this can be called from any
 909 * preemptible context. It's protected against concurrent removal of
 910 * an interrupt descriptor when sparse irqs are enabled.
 911 */
 912unsigned int kstat_irqs_usr(unsigned int irq)
 913{
 914        unsigned int sum;
 915
 916        irq_lock_sparse();
 917        sum = kstat_irqs(irq);
 918        irq_unlock_sparse();
 919        return sum;
 920}
 921