linux/kernel/irq/manage.c
<<
>>
Prefs
   1/*
   2 * linux/kernel/irq/manage.c
   3 *
   4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
   5 * Copyright (C) 2005-2006 Thomas Gleixner
   6 *
   7 * This file contains driver APIs to the irq subsystem.
   8 */
   9
  10#define pr_fmt(fmt) "genirq: " fmt
  11
  12#include <linux/irq.h>
  13#include <linux/kthread.h>
  14#include <linux/module.h>
  15#include <linux/random.h>
  16#include <linux/interrupt.h>
  17#include <linux/slab.h>
  18#include <linux/sched.h>
  19#include <linux/sched/rt.h>
  20#include <linux/sched/task.h>
  21#include <uapi/linux/sched/types.h>
  22#include <linux/task_work.h>
  23
  24#include "internals.h"
  25
  26#ifdef CONFIG_IRQ_FORCED_THREADING
  27__read_mostly bool force_irqthreads;
  28
  29static int __init setup_forced_irqthreads(char *arg)
  30{
  31        force_irqthreads = true;
  32        return 0;
  33}
  34early_param("threadirqs", setup_forced_irqthreads);
  35#endif
  36
  37static void __synchronize_hardirq(struct irq_desc *desc)
  38{
  39        bool inprogress;
  40
  41        do {
  42                unsigned long flags;
  43
  44                /*
  45                 * Wait until we're out of the critical section.  This might
  46                 * give the wrong answer due to the lack of memory barriers.
  47                 */
  48                while (irqd_irq_inprogress(&desc->irq_data))
  49                        cpu_relax();
  50
  51                /* Ok, that indicated we're done: double-check carefully. */
  52                raw_spin_lock_irqsave(&desc->lock, flags);
  53                inprogress = irqd_irq_inprogress(&desc->irq_data);
  54                raw_spin_unlock_irqrestore(&desc->lock, flags);
  55
  56                /* Oops, that failed? */
  57        } while (inprogress);
  58}
  59
  60/**
  61 *      synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
  62 *      @irq: interrupt number to wait for
  63 *
  64 *      This function waits for any pending hard IRQ handlers for this
  65 *      interrupt to complete before returning. If you use this
  66 *      function while holding a resource the IRQ handler may need you
  67 *      will deadlock. It does not take associated threaded handlers
  68 *      into account.
  69 *
  70 *      Do not use this for shutdown scenarios where you must be sure
  71 *      that all parts (hardirq and threaded handler) have completed.
  72 *
  73 *      Returns: false if a threaded handler is active.
  74 *
  75 *      This function may be called - with care - from IRQ context.
  76 */
  77bool synchronize_hardirq(unsigned int irq)
  78{
  79        struct irq_desc *desc = irq_to_desc(irq);
  80
  81        if (desc) {
  82                __synchronize_hardirq(desc);
  83                return !atomic_read(&desc->threads_active);
  84        }
  85
  86        return true;
  87}
  88EXPORT_SYMBOL(synchronize_hardirq);
  89
  90/**
  91 *      synchronize_irq - wait for pending IRQ handlers (on other CPUs)
  92 *      @irq: interrupt number to wait for
  93 *
  94 *      This function waits for any pending IRQ handlers for this interrupt
  95 *      to complete before returning. If you use this function while
  96 *      holding a resource the IRQ handler may need you will deadlock.
  97 *
  98 *      This function may be called - with care - from IRQ context.
  99 */
 100void synchronize_irq(unsigned int irq)
 101{
 102        struct irq_desc *desc = irq_to_desc(irq);
 103
 104        if (desc) {
 105                __synchronize_hardirq(desc);
 106                /*
 107                 * We made sure that no hardirq handler is
 108                 * running. Now verify that no threaded handlers are
 109                 * active.
 110                 */
 111                wait_event(desc->wait_for_threads,
 112                           !atomic_read(&desc->threads_active));
 113        }
 114}
 115EXPORT_SYMBOL(synchronize_irq);
 116
 117#ifdef CONFIG_SMP
 118cpumask_var_t irq_default_affinity;
 119
 120static bool __irq_can_set_affinity(struct irq_desc *desc)
 121{
 122        if (!desc || !irqd_can_balance(&desc->irq_data) ||
 123            !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
 124                return false;
 125        return true;
 126}
 127
 128/**
 129 *      irq_can_set_affinity - Check if the affinity of a given irq can be set
 130 *      @irq:           Interrupt to check
 131 *
 132 */
 133int irq_can_set_affinity(unsigned int irq)
 134{
 135        return __irq_can_set_affinity(irq_to_desc(irq));
 136}
 137
 138/**
 139 * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
 140 * @irq:        Interrupt to check
 141 *
 142 * Like irq_can_set_affinity() above, but additionally checks for the
 143 * AFFINITY_MANAGED flag.
 144 */
 145bool irq_can_set_affinity_usr(unsigned int irq)
 146{
 147        struct irq_desc *desc = irq_to_desc(irq);
 148
 149        return __irq_can_set_affinity(desc) &&
 150                !irqd_affinity_is_managed(&desc->irq_data);
 151}
 152
 153/**
 154 *      irq_set_thread_affinity - Notify irq threads to adjust affinity
 155 *      @desc:          irq descriptor which has affitnity changed
 156 *
 157 *      We just set IRQTF_AFFINITY and delegate the affinity setting
 158 *      to the interrupt thread itself. We can not call
 159 *      set_cpus_allowed_ptr() here as we hold desc->lock and this
 160 *      code can be called from hard interrupt context.
 161 */
 162void irq_set_thread_affinity(struct irq_desc *desc)
 163{
 164        struct irqaction *action;
 165
 166        for_each_action_of_desc(desc, action)
 167                if (action->thread)
 168                        set_bit(IRQTF_AFFINITY, &action->thread_flags);
 169}
 170
 171#ifdef CONFIG_GENERIC_PENDING_IRQ
 172static inline bool irq_can_move_pcntxt(struct irq_data *data)
 173{
 174        return irqd_can_move_in_process_context(data);
 175}
 176static inline bool irq_move_pending(struct irq_data *data)
 177{
 178        return irqd_is_setaffinity_pending(data);
 179}
 180static inline void
 181irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
 182{
 183        cpumask_copy(desc->pending_mask, mask);
 184}
 185static inline void
 186irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
 187{
 188        cpumask_copy(mask, desc->pending_mask);
 189}
 190#else
 191static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
 192static inline bool irq_move_pending(struct irq_data *data) { return false; }
 193static inline void
 194irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
 195static inline void
 196irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
 197#endif
 198
 199int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
 200                        bool force)
 201{
 202        struct irq_desc *desc = irq_data_to_desc(data);
 203        struct irq_chip *chip = irq_data_get_irq_chip(data);
 204        int ret;
 205
 206        ret = chip->irq_set_affinity(data, mask, force);
 207        switch (ret) {
 208        case IRQ_SET_MASK_OK:
 209        case IRQ_SET_MASK_OK_DONE:
 210                cpumask_copy(desc->irq_common_data.affinity, mask);
 211        case IRQ_SET_MASK_OK_NOCOPY:
 212                irq_set_thread_affinity(desc);
 213                ret = 0;
 214        }
 215
 216        return ret;
 217}
 218
 219int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
 220                            bool force)
 221{
 222        struct irq_chip *chip = irq_data_get_irq_chip(data);
 223        struct irq_desc *desc = irq_data_to_desc(data);
 224        int ret = 0;
 225
 226        if (!chip || !chip->irq_set_affinity)
 227                return -EINVAL;
 228
 229        if (irq_can_move_pcntxt(data)) {
 230                ret = irq_do_set_affinity(data, mask, force);
 231        } else {
 232                irqd_set_move_pending(data);
 233                irq_copy_pending(desc, mask);
 234        }
 235
 236        if (desc->affinity_notify) {
 237                kref_get(&desc->affinity_notify->kref);
 238                schedule_work(&desc->affinity_notify->work);
 239        }
 240        irqd_set(data, IRQD_AFFINITY_SET);
 241
 242        return ret;
 243}
 244
 245int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
 246{
 247        struct irq_desc *desc = irq_to_desc(irq);
 248        unsigned long flags;
 249        int ret;
 250
 251        if (!desc)
 252                return -EINVAL;
 253
 254        raw_spin_lock_irqsave(&desc->lock, flags);
 255        ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
 256        raw_spin_unlock_irqrestore(&desc->lock, flags);
 257        return ret;
 258}
 259
 260int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
 261{
 262        unsigned long flags;
 263        struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 264
 265        if (!desc)
 266                return -EINVAL;
 267        desc->affinity_hint = m;
 268        irq_put_desc_unlock(desc, flags);
 269        /* set the initial affinity to prevent every interrupt being on CPU0 */
 270        if (m)
 271                __irq_set_affinity(irq, m, false);
 272        return 0;
 273}
 274EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
 275
 276static void irq_affinity_notify(struct work_struct *work)
 277{
 278        struct irq_affinity_notify *notify =
 279                container_of(work, struct irq_affinity_notify, work);
 280        struct irq_desc *desc = irq_to_desc(notify->irq);
 281        cpumask_var_t cpumask;
 282        unsigned long flags;
 283
 284        if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
 285                goto out;
 286
 287        raw_spin_lock_irqsave(&desc->lock, flags);
 288        if (irq_move_pending(&desc->irq_data))
 289                irq_get_pending(cpumask, desc);
 290        else
 291                cpumask_copy(cpumask, desc->irq_common_data.affinity);
 292        raw_spin_unlock_irqrestore(&desc->lock, flags);
 293
 294        notify->notify(notify, cpumask);
 295
 296        free_cpumask_var(cpumask);
 297out:
 298        kref_put(&notify->kref, notify->release);
 299}
 300
 301/**
 302 *      irq_set_affinity_notifier - control notification of IRQ affinity changes
 303 *      @irq:           Interrupt for which to enable/disable notification
 304 *      @notify:        Context for notification, or %NULL to disable
 305 *                      notification.  Function pointers must be initialised;
 306 *                      the other fields will be initialised by this function.
 307 *
 308 *      Must be called in process context.  Notification may only be enabled
 309 *      after the IRQ is allocated and must be disabled before the IRQ is
 310 *      freed using free_irq().
 311 */
 312int
 313irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
 314{
 315        struct irq_desc *desc = irq_to_desc(irq);
 316        struct irq_affinity_notify *old_notify;
 317        unsigned long flags;
 318
 319        /* The release function is promised process context */
 320        might_sleep();
 321
 322        if (!desc)
 323                return -EINVAL;
 324
 325        /* Complete initialisation of *notify */
 326        if (notify) {
 327                notify->irq = irq;
 328                kref_init(&notify->kref);
 329                INIT_WORK(&notify->work, irq_affinity_notify);
 330        }
 331
 332        raw_spin_lock_irqsave(&desc->lock, flags);
 333        old_notify = desc->affinity_notify;
 334        desc->affinity_notify = notify;
 335        raw_spin_unlock_irqrestore(&desc->lock, flags);
 336
 337        if (old_notify)
 338                kref_put(&old_notify->kref, old_notify->release);
 339
 340        return 0;
 341}
 342EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
 343
 344#ifndef CONFIG_AUTO_IRQ_AFFINITY
 345/*
 346 * Generic version of the affinity autoselector.
 347 */
 348static int setup_affinity(struct irq_desc *desc, struct cpumask *mask)
 349{
 350        struct cpumask *set = irq_default_affinity;
 351        int node = irq_desc_get_node(desc);
 352
 353        /* Excludes PER_CPU and NO_BALANCE interrupts */
 354        if (!__irq_can_set_affinity(desc))
 355                return 0;
 356
 357        /*
 358         * Preserve the managed affinity setting and a userspace affinity
 359         * setup, but make sure that one of the targets is online.
 360         */
 361        if (irqd_affinity_is_managed(&desc->irq_data) ||
 362            irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
 363                if (cpumask_intersects(desc->irq_common_data.affinity,
 364                                       cpu_online_mask))
 365                        set = desc->irq_common_data.affinity;
 366                else
 367                        irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
 368        }
 369
 370        cpumask_and(mask, cpu_online_mask, set);
 371        if (node != NUMA_NO_NODE) {
 372                const struct cpumask *nodemask = cpumask_of_node(node);
 373
 374                /* make sure at least one of the cpus in nodemask is online */
 375                if (cpumask_intersects(mask, nodemask))
 376                        cpumask_and(mask, mask, nodemask);
 377        }
 378        irq_do_set_affinity(&desc->irq_data, mask, false);
 379        return 0;
 380}
 381#else
 382/* Wrapper for ALPHA specific affinity selector magic */
 383static inline int setup_affinity(struct irq_desc *d, struct cpumask *mask)
 384{
 385        return irq_select_affinity(irq_desc_get_irq(d));
 386}
 387#endif
 388
 389/*
 390 * Called when affinity is set via /proc/irq
 391 */
 392int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
 393{
 394        struct irq_desc *desc = irq_to_desc(irq);
 395        unsigned long flags;
 396        int ret;
 397
 398        raw_spin_lock_irqsave(&desc->lock, flags);
 399        ret = setup_affinity(desc, mask);
 400        raw_spin_unlock_irqrestore(&desc->lock, flags);
 401        return ret;
 402}
 403
 404#else
 405static inline int
 406setup_affinity(struct irq_desc *desc, struct cpumask *mask)
 407{
 408        return 0;
 409}
 410#endif
 411
 412/**
 413 *      irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
 414 *      @irq: interrupt number to set affinity
 415 *      @vcpu_info: vCPU specific data
 416 *
 417 *      This function uses the vCPU specific data to set the vCPU
 418 *      affinity for an irq. The vCPU specific data is passed from
 419 *      outside, such as KVM. One example code path is as below:
 420 *      KVM -> IOMMU -> irq_set_vcpu_affinity().
 421 */
 422int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
 423{
 424        unsigned long flags;
 425        struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
 426        struct irq_data *data;
 427        struct irq_chip *chip;
 428        int ret = -ENOSYS;
 429
 430        if (!desc)
 431                return -EINVAL;
 432
 433        data = irq_desc_get_irq_data(desc);
 434        chip = irq_data_get_irq_chip(data);
 435        if (chip && chip->irq_set_vcpu_affinity)
 436                ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
 437        irq_put_desc_unlock(desc, flags);
 438
 439        return ret;
 440}
 441EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
 442
 443void __disable_irq(struct irq_desc *desc)
 444{
 445        if (!desc->depth++)
 446                irq_disable(desc);
 447}
 448
 449static int __disable_irq_nosync(unsigned int irq)
 450{
 451        unsigned long flags;
 452        struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 453
 454        if (!desc)
 455                return -EINVAL;
 456        __disable_irq(desc);
 457        irq_put_desc_busunlock(desc, flags);
 458        return 0;
 459}
 460
 461/**
 462 *      disable_irq_nosync - disable an irq without waiting
 463 *      @irq: Interrupt to disable
 464 *
 465 *      Disable the selected interrupt line.  Disables and Enables are
 466 *      nested.
 467 *      Unlike disable_irq(), this function does not ensure existing
 468 *      instances of the IRQ handler have completed before returning.
 469 *
 470 *      This function may be called from IRQ context.
 471 */
 472void disable_irq_nosync(unsigned int irq)
 473{
 474        __disable_irq_nosync(irq);
 475}
 476EXPORT_SYMBOL(disable_irq_nosync);
 477
 478/**
 479 *      disable_irq - disable an irq and wait for completion
 480 *      @irq: Interrupt to disable
 481 *
 482 *      Disable the selected interrupt line.  Enables and Disables are
 483 *      nested.
 484 *      This function waits for any pending IRQ handlers for this interrupt
 485 *      to complete before returning. If you use this function while
 486 *      holding a resource the IRQ handler may need you will deadlock.
 487 *
 488 *      This function may be called - with care - from IRQ context.
 489 */
 490void disable_irq(unsigned int irq)
 491{
 492        if (!__disable_irq_nosync(irq))
 493                synchronize_irq(irq);
 494}
 495EXPORT_SYMBOL(disable_irq);
 496
 497/**
 498 *      disable_hardirq - disables an irq and waits for hardirq completion
 499 *      @irq: Interrupt to disable
 500 *
 501 *      Disable the selected interrupt line.  Enables and Disables are
 502 *      nested.
 503 *      This function waits for any pending hard IRQ handlers for this
 504 *      interrupt to complete before returning. If you use this function while
 505 *      holding a resource the hard IRQ handler may need you will deadlock.
 506 *
 507 *      When used to optimistically disable an interrupt from atomic context
 508 *      the return value must be checked.
 509 *
 510 *      Returns: false if a threaded handler is active.
 511 *
 512 *      This function may be called - with care - from IRQ context.
 513 */
 514bool disable_hardirq(unsigned int irq)
 515{
 516        if (!__disable_irq_nosync(irq))
 517                return synchronize_hardirq(irq);
 518
 519        return false;
 520}
 521EXPORT_SYMBOL_GPL(disable_hardirq);
 522
 523void __enable_irq(struct irq_desc *desc)
 524{
 525        switch (desc->depth) {
 526        case 0:
 527 err_out:
 528                WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
 529                     irq_desc_get_irq(desc));
 530                break;
 531        case 1: {
 532                if (desc->istate & IRQS_SUSPENDED)
 533                        goto err_out;
 534                /* Prevent probing on this irq: */
 535                irq_settings_set_noprobe(desc);
 536                irq_enable(desc);
 537                check_irq_resend(desc);
 538                /* fall-through */
 539        }
 540        default:
 541                desc->depth--;
 542        }
 543}
 544
 545/**
 546 *      enable_irq - enable handling of an irq
 547 *      @irq: Interrupt to enable
 548 *
 549 *      Undoes the effect of one call to disable_irq().  If this
 550 *      matches the last disable, processing of interrupts on this
 551 *      IRQ line is re-enabled.
 552 *
 553 *      This function may be called from IRQ context only when
 554 *      desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
 555 */
 556void enable_irq(unsigned int irq)
 557{
 558        unsigned long flags;
 559        struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 560
 561        if (!desc)
 562                return;
 563        if (WARN(!desc->irq_data.chip,
 564                 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
 565                goto out;
 566
 567        __enable_irq(desc);
 568out:
 569        irq_put_desc_busunlock(desc, flags);
 570}
 571EXPORT_SYMBOL(enable_irq);
 572
 573static int set_irq_wake_real(unsigned int irq, unsigned int on)
 574{
 575        struct irq_desc *desc = irq_to_desc(irq);
 576        int ret = -ENXIO;
 577
 578        if (irq_desc_get_chip(desc)->flags &  IRQCHIP_SKIP_SET_WAKE)
 579                return 0;
 580
 581        if (desc->irq_data.chip->irq_set_wake)
 582                ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
 583
 584        return ret;
 585}
 586
 587/**
 588 *      irq_set_irq_wake - control irq power management wakeup
 589 *      @irq:   interrupt to control
 590 *      @on:    enable/disable power management wakeup
 591 *
 592 *      Enable/disable power management wakeup mode, which is
 593 *      disabled by default.  Enables and disables must match,
 594 *      just as they match for non-wakeup mode support.
 595 *
 596 *      Wakeup mode lets this IRQ wake the system from sleep
 597 *      states like "suspend to RAM".
 598 */
 599int irq_set_irq_wake(unsigned int irq, unsigned int on)
 600{
 601        unsigned long flags;
 602        struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 603        int ret = 0;
 604
 605        if (!desc)
 606                return -EINVAL;
 607
 608        /* wakeup-capable irqs can be shared between drivers that
 609         * don't need to have the same sleep mode behaviors.
 610         */
 611        if (on) {
 612                if (desc->wake_depth++ == 0) {
 613                        ret = set_irq_wake_real(irq, on);
 614                        if (ret)
 615                                desc->wake_depth = 0;
 616                        else
 617                                irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
 618                }
 619        } else {
 620                if (desc->wake_depth == 0) {
 621                        WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
 622                } else if (--desc->wake_depth == 0) {
 623                        ret = set_irq_wake_real(irq, on);
 624                        if (ret)
 625                                desc->wake_depth = 1;
 626                        else
 627                                irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
 628                }
 629        }
 630        irq_put_desc_busunlock(desc, flags);
 631        return ret;
 632}
 633EXPORT_SYMBOL(irq_set_irq_wake);
 634
 635/*
 636 * Internal function that tells the architecture code whether a
 637 * particular irq has been exclusively allocated or is available
 638 * for driver use.
 639 */
 640int can_request_irq(unsigned int irq, unsigned long irqflags)
 641{
 642        unsigned long flags;
 643        struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
 644        int canrequest = 0;
 645
 646        if (!desc)
 647                return 0;
 648
 649        if (irq_settings_can_request(desc)) {
 650                if (!desc->action ||
 651                    irqflags & desc->action->flags & IRQF_SHARED)
 652                        canrequest = 1;
 653        }
 654        irq_put_desc_unlock(desc, flags);
 655        return canrequest;
 656}
 657
 658int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
 659{
 660        struct irq_chip *chip = desc->irq_data.chip;
 661        int ret, unmask = 0;
 662
 663        if (!chip || !chip->irq_set_type) {
 664                /*
 665                 * IRQF_TRIGGER_* but the PIC does not support multiple
 666                 * flow-types?
 667                 */
 668                pr_debug("No set_type function for IRQ %d (%s)\n",
 669                         irq_desc_get_irq(desc),
 670                         chip ? (chip->name ? : "unknown") : "unknown");
 671                return 0;
 672        }
 673
 674        if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
 675                if (!irqd_irq_masked(&desc->irq_data))
 676                        mask_irq(desc);
 677                if (!irqd_irq_disabled(&desc->irq_data))
 678                        unmask = 1;
 679        }
 680
 681        /* Mask all flags except trigger mode */
 682        flags &= IRQ_TYPE_SENSE_MASK;
 683        ret = chip->irq_set_type(&desc->irq_data, flags);
 684
 685        switch (ret) {
 686        case IRQ_SET_MASK_OK:
 687        case IRQ_SET_MASK_OK_DONE:
 688                irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
 689                irqd_set(&desc->irq_data, flags);
 690
 691        case IRQ_SET_MASK_OK_NOCOPY:
 692                flags = irqd_get_trigger_type(&desc->irq_data);
 693                irq_settings_set_trigger_mask(desc, flags);
 694                irqd_clear(&desc->irq_data, IRQD_LEVEL);
 695                irq_settings_clr_level(desc);
 696                if (flags & IRQ_TYPE_LEVEL_MASK) {
 697                        irq_settings_set_level(desc);
 698                        irqd_set(&desc->irq_data, IRQD_LEVEL);
 699                }
 700
 701                ret = 0;
 702                break;
 703        default:
 704                pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
 705                       flags, irq_desc_get_irq(desc), chip->irq_set_type);
 706        }
 707        if (unmask)
 708                unmask_irq(desc);
 709        return ret;
 710}
 711
 712#ifdef CONFIG_HARDIRQS_SW_RESEND
 713int irq_set_parent(int irq, int parent_irq)
 714{
 715        unsigned long flags;
 716        struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
 717
 718        if (!desc)
 719                return -EINVAL;
 720
 721        desc->parent_irq = parent_irq;
 722
 723        irq_put_desc_unlock(desc, flags);
 724        return 0;
 725}
 726EXPORT_SYMBOL_GPL(irq_set_parent);
 727#endif
 728
 729/*
 730 * Default primary interrupt handler for threaded interrupts. Is
 731 * assigned as primary handler when request_threaded_irq is called
 732 * with handler == NULL. Useful for oneshot interrupts.
 733 */
 734static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
 735{
 736        return IRQ_WAKE_THREAD;
 737}
 738
 739/*
 740 * Primary handler for nested threaded interrupts. Should never be
 741 * called.
 742 */
 743static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
 744{
 745        WARN(1, "Primary handler called for nested irq %d\n", irq);
 746        return IRQ_NONE;
 747}
 748
 749static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
 750{
 751        WARN(1, "Secondary action handler called for irq %d\n", irq);
 752        return IRQ_NONE;
 753}
 754
 755static int irq_wait_for_interrupt(struct irqaction *action)
 756{
 757        set_current_state(TASK_INTERRUPTIBLE);
 758
 759        while (!kthread_should_stop()) {
 760
 761                if (test_and_clear_bit(IRQTF_RUNTHREAD,
 762                                       &action->thread_flags)) {
 763                        __set_current_state(TASK_RUNNING);
 764                        return 0;
 765                }
 766                schedule();
 767                set_current_state(TASK_INTERRUPTIBLE);
 768        }
 769        __set_current_state(TASK_RUNNING);
 770        return -1;
 771}
 772
 773/*
 774 * Oneshot interrupts keep the irq line masked until the threaded
 775 * handler finished. unmask if the interrupt has not been disabled and
 776 * is marked MASKED.
 777 */
 778static void irq_finalize_oneshot(struct irq_desc *desc,
 779                                 struct irqaction *action)
 780{
 781        if (!(desc->istate & IRQS_ONESHOT) ||
 782            action->handler == irq_forced_secondary_handler)
 783                return;
 784again:
 785        chip_bus_lock(desc);
 786        raw_spin_lock_irq(&desc->lock);
 787
 788        /*
 789         * Implausible though it may be we need to protect us against
 790         * the following scenario:
 791         *
 792         * The thread is faster done than the hard interrupt handler
 793         * on the other CPU. If we unmask the irq line then the
 794         * interrupt can come in again and masks the line, leaves due
 795         * to IRQS_INPROGRESS and the irq line is masked forever.
 796         *
 797         * This also serializes the state of shared oneshot handlers
 798         * versus "desc->threads_onehsot |= action->thread_mask;" in
 799         * irq_wake_thread(). See the comment there which explains the
 800         * serialization.
 801         */
 802        if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
 803                raw_spin_unlock_irq(&desc->lock);
 804                chip_bus_sync_unlock(desc);
 805                cpu_relax();
 806                goto again;
 807        }
 808
 809        /*
 810         * Now check again, whether the thread should run. Otherwise
 811         * we would clear the threads_oneshot bit of this thread which
 812         * was just set.
 813         */
 814        if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
 815                goto out_unlock;
 816
 817        desc->threads_oneshot &= ~action->thread_mask;
 818
 819        if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
 820            irqd_irq_masked(&desc->irq_data))
 821                unmask_threaded_irq(desc);
 822
 823out_unlock:
 824        raw_spin_unlock_irq(&desc->lock);
 825        chip_bus_sync_unlock(desc);
 826}
 827
 828#ifdef CONFIG_SMP
 829/*
 830 * Check whether we need to change the affinity of the interrupt thread.
 831 */
 832static void
 833irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
 834{
 835        cpumask_var_t mask;
 836        bool valid = true;
 837
 838        if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
 839                return;
 840
 841        /*
 842         * In case we are out of memory we set IRQTF_AFFINITY again and
 843         * try again next time
 844         */
 845        if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
 846                set_bit(IRQTF_AFFINITY, &action->thread_flags);
 847                return;
 848        }
 849
 850        raw_spin_lock_irq(&desc->lock);
 851        /*
 852         * This code is triggered unconditionally. Check the affinity
 853         * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
 854         */
 855        if (desc->irq_common_data.affinity)
 856                cpumask_copy(mask, desc->irq_common_data.affinity);
 857        else
 858                valid = false;
 859        raw_spin_unlock_irq(&desc->lock);
 860
 861        if (valid)
 862                set_cpus_allowed_ptr(current, mask);
 863        free_cpumask_var(mask);
 864}
 865#else
 866static inline void
 867irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
 868#endif
 869
 870/*
 871 * Interrupts which are not explicitely requested as threaded
 872 * interrupts rely on the implicit bh/preempt disable of the hard irq
 873 * context. So we need to disable bh here to avoid deadlocks and other
 874 * side effects.
 875 */
 876static irqreturn_t
 877irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
 878{
 879        irqreturn_t ret;
 880
 881        local_bh_disable();
 882        ret = action->thread_fn(action->irq, action->dev_id);
 883        irq_finalize_oneshot(desc, action);
 884        local_bh_enable();
 885        return ret;
 886}
 887
 888/*
 889 * Interrupts explicitly requested as threaded interrupts want to be
 890 * preemtible - many of them need to sleep and wait for slow busses to
 891 * complete.
 892 */
 893static irqreturn_t irq_thread_fn(struct irq_desc *desc,
 894                struct irqaction *action)
 895{
 896        irqreturn_t ret;
 897
 898        ret = action->thread_fn(action->irq, action->dev_id);
 899        irq_finalize_oneshot(desc, action);
 900        return ret;
 901}
 902
 903static void wake_threads_waitq(struct irq_desc *desc)
 904{
 905        if (atomic_dec_and_test(&desc->threads_active))
 906                wake_up(&desc->wait_for_threads);
 907}
 908
 909static void irq_thread_dtor(struct callback_head *unused)
 910{
 911        struct task_struct *tsk = current;
 912        struct irq_desc *desc;
 913        struct irqaction *action;
 914
 915        if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
 916                return;
 917
 918        action = kthread_data(tsk);
 919
 920        pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
 921               tsk->comm, tsk->pid, action->irq);
 922
 923
 924        desc = irq_to_desc(action->irq);
 925        /*
 926         * If IRQTF_RUNTHREAD is set, we need to decrement
 927         * desc->threads_active and wake possible waiters.
 928         */
 929        if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
 930                wake_threads_waitq(desc);
 931
 932        /* Prevent a stale desc->threads_oneshot */
 933        irq_finalize_oneshot(desc, action);
 934}
 935
 936static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
 937{
 938        struct irqaction *secondary = action->secondary;
 939
 940        if (WARN_ON_ONCE(!secondary))
 941                return;
 942
 943        raw_spin_lock_irq(&desc->lock);
 944        __irq_wake_thread(desc, secondary);
 945        raw_spin_unlock_irq(&desc->lock);
 946}
 947
 948/*
 949 * Interrupt handler thread
 950 */
 951static int irq_thread(void *data)
 952{
 953        struct callback_head on_exit_work;
 954        struct irqaction *action = data;
 955        struct irq_desc *desc = irq_to_desc(action->irq);
 956        irqreturn_t (*handler_fn)(struct irq_desc *desc,
 957                        struct irqaction *action);
 958
 959        if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
 960                                        &action->thread_flags))
 961                handler_fn = irq_forced_thread_fn;
 962        else
 963                handler_fn = irq_thread_fn;
 964
 965        init_task_work(&on_exit_work, irq_thread_dtor);
 966        task_work_add(current, &on_exit_work, false);
 967
 968        irq_thread_check_affinity(desc, action);
 969
 970        while (!irq_wait_for_interrupt(action)) {
 971                irqreturn_t action_ret;
 972
 973                irq_thread_check_affinity(desc, action);
 974
 975                action_ret = handler_fn(desc, action);
 976                if (action_ret == IRQ_HANDLED)
 977                        atomic_inc(&desc->threads_handled);
 978                if (action_ret == IRQ_WAKE_THREAD)
 979                        irq_wake_secondary(desc, action);
 980
 981                wake_threads_waitq(desc);
 982        }
 983
 984        /*
 985         * This is the regular exit path. __free_irq() is stopping the
 986         * thread via kthread_stop() after calling
 987         * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
 988         * oneshot mask bit can be set. We cannot verify that as we
 989         * cannot touch the oneshot mask at this point anymore as
 990         * __setup_irq() might have given out currents thread_mask
 991         * again.
 992         */
 993        task_work_cancel(current, irq_thread_dtor);
 994        return 0;
 995}
 996
 997/**
 998 *      irq_wake_thread - wake the irq thread for the action identified by dev_id
 999 *      @irq:           Interrupt line
1000 *      @dev_id:        Device identity for which the thread should be woken
1001 *
1002 */
1003void irq_wake_thread(unsigned int irq, void *dev_id)
1004{
1005        struct irq_desc *desc = irq_to_desc(irq);
1006        struct irqaction *action;
1007        unsigned long flags;
1008
1009        if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1010                return;
1011
1012        raw_spin_lock_irqsave(&desc->lock, flags);
1013        for_each_action_of_desc(desc, action) {
1014                if (action->dev_id == dev_id) {
1015                        if (action->thread)
1016                                __irq_wake_thread(desc, action);
1017                        break;
1018                }
1019        }
1020        raw_spin_unlock_irqrestore(&desc->lock, flags);
1021}
1022EXPORT_SYMBOL_GPL(irq_wake_thread);
1023
1024static int irq_setup_forced_threading(struct irqaction *new)
1025{
1026        if (!force_irqthreads)
1027                return 0;
1028        if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1029                return 0;
1030
1031        new->flags |= IRQF_ONESHOT;
1032
1033        /*
1034         * Handle the case where we have a real primary handler and a
1035         * thread handler. We force thread them as well by creating a
1036         * secondary action.
1037         */
1038        if (new->handler != irq_default_primary_handler && new->thread_fn) {
1039                /* Allocate the secondary action */
1040                new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1041                if (!new->secondary)
1042                        return -ENOMEM;
1043                new->secondary->handler = irq_forced_secondary_handler;
1044                new->secondary->thread_fn = new->thread_fn;
1045                new->secondary->dev_id = new->dev_id;
1046                new->secondary->irq = new->irq;
1047                new->secondary->name = new->name;
1048        }
1049        /* Deal with the primary handler */
1050        set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1051        new->thread_fn = new->handler;
1052        new->handler = irq_default_primary_handler;
1053        return 0;
1054}
1055
1056static int irq_request_resources(struct irq_desc *desc)
1057{
1058        struct irq_data *d = &desc->irq_data;
1059        struct irq_chip *c = d->chip;
1060
1061        return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1062}
1063
1064static void irq_release_resources(struct irq_desc *desc)
1065{
1066        struct irq_data *d = &desc->irq_data;
1067        struct irq_chip *c = d->chip;
1068
1069        if (c->irq_release_resources)
1070                c->irq_release_resources(d);
1071}
1072
1073static int
1074setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1075{
1076        struct task_struct *t;
1077        struct sched_param param = {
1078                .sched_priority = MAX_USER_RT_PRIO/2,
1079        };
1080
1081        if (!secondary) {
1082                t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1083                                   new->name);
1084        } else {
1085                t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1086                                   new->name);
1087                param.sched_priority -= 1;
1088        }
1089
1090        if (IS_ERR(t))
1091                return PTR_ERR(t);
1092
1093        sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
1094
1095        /*
1096         * We keep the reference to the task struct even if
1097         * the thread dies to avoid that the interrupt code
1098         * references an already freed task_struct.
1099         */
1100        get_task_struct(t);
1101        new->thread = t;
1102        /*
1103         * Tell the thread to set its affinity. This is
1104         * important for shared interrupt handlers as we do
1105         * not invoke setup_affinity() for the secondary
1106         * handlers as everything is already set up. Even for
1107         * interrupts marked with IRQF_NO_BALANCE this is
1108         * correct as we want the thread to move to the cpu(s)
1109         * on which the requesting code placed the interrupt.
1110         */
1111        set_bit(IRQTF_AFFINITY, &new->thread_flags);
1112        return 0;
1113}
1114
1115/*
1116 * Internal function to register an irqaction - typically used to
1117 * allocate special interrupts that are part of the architecture.
1118 */
1119static int
1120__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1121{
1122        struct irqaction *old, **old_ptr;
1123        unsigned long flags, thread_mask = 0;
1124        int ret, nested, shared = 0;
1125        cpumask_var_t mask;
1126
1127        if (!desc)
1128                return -EINVAL;
1129
1130        if (desc->irq_data.chip == &no_irq_chip)
1131                return -ENOSYS;
1132        if (!try_module_get(desc->owner))
1133                return -ENODEV;
1134
1135        new->irq = irq;
1136
1137        /*
1138         * If the trigger type is not specified by the caller,
1139         * then use the default for this interrupt.
1140         */
1141        if (!(new->flags & IRQF_TRIGGER_MASK))
1142                new->flags |= irqd_get_trigger_type(&desc->irq_data);
1143
1144        /*
1145         * Check whether the interrupt nests into another interrupt
1146         * thread.
1147         */
1148        nested = irq_settings_is_nested_thread(desc);
1149        if (nested) {
1150                if (!new->thread_fn) {
1151                        ret = -EINVAL;
1152                        goto out_mput;
1153                }
1154                /*
1155                 * Replace the primary handler which was provided from
1156                 * the driver for non nested interrupt handling by the
1157                 * dummy function which warns when called.
1158                 */
1159                new->handler = irq_nested_primary_handler;
1160        } else {
1161                if (irq_settings_can_thread(desc)) {
1162                        ret = irq_setup_forced_threading(new);
1163                        if (ret)
1164                                goto out_mput;
1165                }
1166        }
1167
1168        /*
1169         * Create a handler thread when a thread function is supplied
1170         * and the interrupt does not nest into another interrupt
1171         * thread.
1172         */
1173        if (new->thread_fn && !nested) {
1174                ret = setup_irq_thread(new, irq, false);
1175                if (ret)
1176                        goto out_mput;
1177                if (new->secondary) {
1178                        ret = setup_irq_thread(new->secondary, irq, true);
1179                        if (ret)
1180                                goto out_thread;
1181                }
1182        }
1183
1184        if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
1185                ret = -ENOMEM;
1186                goto out_thread;
1187        }
1188
1189        /*
1190         * Drivers are often written to work w/o knowledge about the
1191         * underlying irq chip implementation, so a request for a
1192         * threaded irq without a primary hard irq context handler
1193         * requires the ONESHOT flag to be set. Some irq chips like
1194         * MSI based interrupts are per se one shot safe. Check the
1195         * chip flags, so we can avoid the unmask dance at the end of
1196         * the threaded handler for those.
1197         */
1198        if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1199                new->flags &= ~IRQF_ONESHOT;
1200
1201        /*
1202         * The following block of code has to be executed atomically
1203         */
1204        raw_spin_lock_irqsave(&desc->lock, flags);
1205        old_ptr = &desc->action;
1206        old = *old_ptr;
1207        if (old) {
1208                /*
1209                 * Can't share interrupts unless both agree to and are
1210                 * the same type (level, edge, polarity). So both flag
1211                 * fields must have IRQF_SHARED set and the bits which
1212                 * set the trigger type must match. Also all must
1213                 * agree on ONESHOT.
1214                 */
1215                if (!((old->flags & new->flags) & IRQF_SHARED) ||
1216                    ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
1217                    ((old->flags ^ new->flags) & IRQF_ONESHOT))
1218                        goto mismatch;
1219
1220                /* All handlers must agree on per-cpuness */
1221                if ((old->flags & IRQF_PERCPU) !=
1222                    (new->flags & IRQF_PERCPU))
1223                        goto mismatch;
1224
1225                /* add new interrupt at end of irq queue */
1226                do {
1227                        /*
1228                         * Or all existing action->thread_mask bits,
1229                         * so we can find the next zero bit for this
1230                         * new action.
1231                         */
1232                        thread_mask |= old->thread_mask;
1233                        old_ptr = &old->next;
1234                        old = *old_ptr;
1235                } while (old);
1236                shared = 1;
1237        }
1238
1239        /*
1240         * Setup the thread mask for this irqaction for ONESHOT. For
1241         * !ONESHOT irqs the thread mask is 0 so we can avoid a
1242         * conditional in irq_wake_thread().
1243         */
1244        if (new->flags & IRQF_ONESHOT) {
1245                /*
1246                 * Unlikely to have 32 resp 64 irqs sharing one line,
1247                 * but who knows.
1248                 */
1249                if (thread_mask == ~0UL) {
1250                        ret = -EBUSY;
1251                        goto out_mask;
1252                }
1253                /*
1254                 * The thread_mask for the action is or'ed to
1255                 * desc->thread_active to indicate that the
1256                 * IRQF_ONESHOT thread handler has been woken, but not
1257                 * yet finished. The bit is cleared when a thread
1258                 * completes. When all threads of a shared interrupt
1259                 * line have completed desc->threads_active becomes
1260                 * zero and the interrupt line is unmasked. See
1261                 * handle.c:irq_wake_thread() for further information.
1262                 *
1263                 * If no thread is woken by primary (hard irq context)
1264                 * interrupt handlers, then desc->threads_active is
1265                 * also checked for zero to unmask the irq line in the
1266                 * affected hard irq flow handlers
1267                 * (handle_[fasteoi|level]_irq).
1268                 *
1269                 * The new action gets the first zero bit of
1270                 * thread_mask assigned. See the loop above which or's
1271                 * all existing action->thread_mask bits.
1272                 */
1273                new->thread_mask = 1 << ffz(thread_mask);
1274
1275        } else if (new->handler == irq_default_primary_handler &&
1276                   !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1277                /*
1278                 * The interrupt was requested with handler = NULL, so
1279                 * we use the default primary handler for it. But it
1280                 * does not have the oneshot flag set. In combination
1281                 * with level interrupts this is deadly, because the
1282                 * default primary handler just wakes the thread, then
1283                 * the irq lines is reenabled, but the device still
1284                 * has the level irq asserted. Rinse and repeat....
1285                 *
1286                 * While this works for edge type interrupts, we play
1287                 * it safe and reject unconditionally because we can't
1288                 * say for sure which type this interrupt really
1289                 * has. The type flags are unreliable as the
1290                 * underlying chip implementation can override them.
1291                 */
1292                pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1293                       irq);
1294                ret = -EINVAL;
1295                goto out_mask;
1296        }
1297
1298        if (!shared) {
1299                ret = irq_request_resources(desc);
1300                if (ret) {
1301                        pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1302                               new->name, irq, desc->irq_data.chip->name);
1303                        goto out_mask;
1304                }
1305
1306                init_waitqueue_head(&desc->wait_for_threads);
1307
1308                /* Setup the type (level, edge polarity) if configured: */
1309                if (new->flags & IRQF_TRIGGER_MASK) {
1310                        ret = __irq_set_trigger(desc,
1311                                                new->flags & IRQF_TRIGGER_MASK);
1312
1313                        if (ret)
1314                                goto out_mask;
1315                }
1316
1317                desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1318                                  IRQS_ONESHOT | IRQS_WAITING);
1319                irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1320
1321                if (new->flags & IRQF_PERCPU) {
1322                        irqd_set(&desc->irq_data, IRQD_PER_CPU);
1323                        irq_settings_set_per_cpu(desc);
1324                }
1325
1326                if (new->flags & IRQF_ONESHOT)
1327                        desc->istate |= IRQS_ONESHOT;
1328
1329                if (irq_settings_can_autoenable(desc))
1330                        irq_startup(desc, true);
1331                else
1332                        /* Undo nested disables: */
1333                        desc->depth = 1;
1334
1335                /* Exclude IRQ from balancing if requested */
1336                if (new->flags & IRQF_NOBALANCING) {
1337                        irq_settings_set_no_balancing(desc);
1338                        irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1339                }
1340
1341                /* Set default affinity mask once everything is setup */
1342                setup_affinity(desc, mask);
1343
1344        } else if (new->flags & IRQF_TRIGGER_MASK) {
1345                unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1346                unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1347
1348                if (nmsk != omsk)
1349                        /* hope the handler works with current  trigger mode */
1350                        pr_warn("irq %d uses trigger mode %u; requested %u\n",
1351                                irq, omsk, nmsk);
1352        }
1353
1354        *old_ptr = new;
1355
1356        irq_pm_install_action(desc, new);
1357
1358        /* Reset broken irq detection when installing new handler */
1359        desc->irq_count = 0;
1360        desc->irqs_unhandled = 0;
1361
1362        /*
1363         * Check whether we disabled the irq via the spurious handler
1364         * before. Reenable it and give it another chance.
1365         */
1366        if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1367                desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1368                __enable_irq(desc);
1369        }
1370
1371        raw_spin_unlock_irqrestore(&desc->lock, flags);
1372
1373        /*
1374         * Strictly no need to wake it up, but hung_task complains
1375         * when no hard interrupt wakes the thread up.
1376         */
1377        if (new->thread)
1378                wake_up_process(new->thread);
1379        if (new->secondary)
1380                wake_up_process(new->secondary->thread);
1381
1382        register_irq_proc(irq, desc);
1383        new->dir = NULL;
1384        register_handler_proc(irq, new);
1385        free_cpumask_var(mask);
1386
1387        return 0;
1388
1389mismatch:
1390        if (!(new->flags & IRQF_PROBE_SHARED)) {
1391                pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1392                       irq, new->flags, new->name, old->flags, old->name);
1393#ifdef CONFIG_DEBUG_SHIRQ
1394                dump_stack();
1395#endif
1396        }
1397        ret = -EBUSY;
1398
1399out_mask:
1400        raw_spin_unlock_irqrestore(&desc->lock, flags);
1401        free_cpumask_var(mask);
1402
1403out_thread:
1404        if (new->thread) {
1405                struct task_struct *t = new->thread;
1406
1407                new->thread = NULL;
1408                kthread_stop(t);
1409                put_task_struct(t);
1410        }
1411        if (new->secondary && new->secondary->thread) {
1412                struct task_struct *t = new->secondary->thread;
1413
1414                new->secondary->thread = NULL;
1415                kthread_stop(t);
1416                put_task_struct(t);
1417        }
1418out_mput:
1419        module_put(desc->owner);
1420        return ret;
1421}
1422
1423/**
1424 *      setup_irq - setup an interrupt
1425 *      @irq: Interrupt line to setup
1426 *      @act: irqaction for the interrupt
1427 *
1428 * Used to statically setup interrupts in the early boot process.
1429 */
1430int setup_irq(unsigned int irq, struct irqaction *act)
1431{
1432        int retval;
1433        struct irq_desc *desc = irq_to_desc(irq);
1434
1435        if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1436                return -EINVAL;
1437
1438        retval = irq_chip_pm_get(&desc->irq_data);
1439        if (retval < 0)
1440                return retval;
1441
1442        chip_bus_lock(desc);
1443        retval = __setup_irq(irq, desc, act);
1444        chip_bus_sync_unlock(desc);
1445
1446        if (retval)
1447                irq_chip_pm_put(&desc->irq_data);
1448
1449        return retval;
1450}
1451EXPORT_SYMBOL_GPL(setup_irq);
1452
1453/*
1454 * Internal function to unregister an irqaction - used to free
1455 * regular and special interrupts that are part of the architecture.
1456 */
1457static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1458{
1459        struct irq_desc *desc = irq_to_desc(irq);
1460        struct irqaction *action, **action_ptr;
1461        unsigned long flags;
1462
1463        WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1464
1465        if (!desc)
1466                return NULL;
1467
1468        chip_bus_lock(desc);
1469        raw_spin_lock_irqsave(&desc->lock, flags);
1470
1471        /*
1472         * There can be multiple actions per IRQ descriptor, find the right
1473         * one based on the dev_id:
1474         */
1475        action_ptr = &desc->action;
1476        for (;;) {
1477                action = *action_ptr;
1478
1479                if (!action) {
1480                        WARN(1, "Trying to free already-free IRQ %d\n", irq);
1481                        raw_spin_unlock_irqrestore(&desc->lock, flags);
1482                        chip_bus_sync_unlock(desc);
1483                        return NULL;
1484                }
1485
1486                if (action->dev_id == dev_id)
1487                        break;
1488                action_ptr = &action->next;
1489        }
1490
1491        /* Found it - now remove it from the list of entries: */
1492        *action_ptr = action->next;
1493
1494        irq_pm_remove_action(desc, action);
1495
1496        /* If this was the last handler, shut down the IRQ line: */
1497        if (!desc->action) {
1498                irq_settings_clr_disable_unlazy(desc);
1499                irq_shutdown(desc);
1500                irq_release_resources(desc);
1501        }
1502
1503#ifdef CONFIG_SMP
1504        /* make sure affinity_hint is cleaned up */
1505        if (WARN_ON_ONCE(desc->affinity_hint))
1506                desc->affinity_hint = NULL;
1507#endif
1508
1509        raw_spin_unlock_irqrestore(&desc->lock, flags);
1510        chip_bus_sync_unlock(desc);
1511
1512        unregister_handler_proc(irq, action);
1513
1514        /* Make sure it's not being used on another CPU: */
1515        synchronize_irq(irq);
1516
1517#ifdef CONFIG_DEBUG_SHIRQ
1518        /*
1519         * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1520         * event to happen even now it's being freed, so let's make sure that
1521         * is so by doing an extra call to the handler ....
1522         *
1523         * ( We do this after actually deregistering it, to make sure that a
1524         *   'real' IRQ doesn't run in * parallel with our fake. )
1525         */
1526        if (action->flags & IRQF_SHARED) {
1527                local_irq_save(flags);
1528                action->handler(irq, dev_id);
1529                local_irq_restore(flags);
1530        }
1531#endif
1532
1533        if (action->thread) {
1534                kthread_stop(action->thread);
1535                put_task_struct(action->thread);
1536                if (action->secondary && action->secondary->thread) {
1537                        kthread_stop(action->secondary->thread);
1538                        put_task_struct(action->secondary->thread);
1539                }
1540        }
1541
1542        irq_chip_pm_put(&desc->irq_data);
1543        module_put(desc->owner);
1544        kfree(action->secondary);
1545        return action;
1546}
1547
1548/**
1549 *      remove_irq - free an interrupt
1550 *      @irq: Interrupt line to free
1551 *      @act: irqaction for the interrupt
1552 *
1553 * Used to remove interrupts statically setup by the early boot process.
1554 */
1555void remove_irq(unsigned int irq, struct irqaction *act)
1556{
1557        struct irq_desc *desc = irq_to_desc(irq);
1558
1559        if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1560            __free_irq(irq, act->dev_id);
1561}
1562EXPORT_SYMBOL_GPL(remove_irq);
1563
1564/**
1565 *      free_irq - free an interrupt allocated with request_irq
1566 *      @irq: Interrupt line to free
1567 *      @dev_id: Device identity to free
1568 *
1569 *      Remove an interrupt handler. The handler is removed and if the
1570 *      interrupt line is no longer in use by any driver it is disabled.
1571 *      On a shared IRQ the caller must ensure the interrupt is disabled
1572 *      on the card it drives before calling this function. The function
1573 *      does not return until any executing interrupts for this IRQ
1574 *      have completed.
1575 *
1576 *      This function must not be called from interrupt context.
1577 */
1578void free_irq(unsigned int irq, void *dev_id)
1579{
1580        struct irq_desc *desc = irq_to_desc(irq);
1581
1582        if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1583                return;
1584
1585#ifdef CONFIG_SMP
1586        if (WARN_ON(desc->affinity_notify))
1587                desc->affinity_notify = NULL;
1588#endif
1589
1590        kfree(__free_irq(irq, dev_id));
1591}
1592EXPORT_SYMBOL(free_irq);
1593
1594/**
1595 *      request_threaded_irq - allocate an interrupt line
1596 *      @irq: Interrupt line to allocate
1597 *      @handler: Function to be called when the IRQ occurs.
1598 *                Primary handler for threaded interrupts
1599 *                If NULL and thread_fn != NULL the default
1600 *                primary handler is installed
1601 *      @thread_fn: Function called from the irq handler thread
1602 *                  If NULL, no irq thread is created
1603 *      @irqflags: Interrupt type flags
1604 *      @devname: An ascii name for the claiming device
1605 *      @dev_id: A cookie passed back to the handler function
1606 *
1607 *      This call allocates interrupt resources and enables the
1608 *      interrupt line and IRQ handling. From the point this
1609 *      call is made your handler function may be invoked. Since
1610 *      your handler function must clear any interrupt the board
1611 *      raises, you must take care both to initialise your hardware
1612 *      and to set up the interrupt handler in the right order.
1613 *
1614 *      If you want to set up a threaded irq handler for your device
1615 *      then you need to supply @handler and @thread_fn. @handler is
1616 *      still called in hard interrupt context and has to check
1617 *      whether the interrupt originates from the device. If yes it
1618 *      needs to disable the interrupt on the device and return
1619 *      IRQ_WAKE_THREAD which will wake up the handler thread and run
1620 *      @thread_fn. This split handler design is necessary to support
1621 *      shared interrupts.
1622 *
1623 *      Dev_id must be globally unique. Normally the address of the
1624 *      device data structure is used as the cookie. Since the handler
1625 *      receives this value it makes sense to use it.
1626 *
1627 *      If your interrupt is shared you must pass a non NULL dev_id
1628 *      as this is required when freeing the interrupt.
1629 *
1630 *      Flags:
1631 *
1632 *      IRQF_SHARED             Interrupt is shared
1633 *      IRQF_TRIGGER_*          Specify active edge(s) or level
1634 *
1635 */
1636int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1637                         irq_handler_t thread_fn, unsigned long irqflags,
1638                         const char *devname, void *dev_id)
1639{
1640        struct irqaction *action;
1641        struct irq_desc *desc;
1642        int retval;
1643
1644        if (irq == IRQ_NOTCONNECTED)
1645                return -ENOTCONN;
1646
1647        /*
1648         * Sanity-check: shared interrupts must pass in a real dev-ID,
1649         * otherwise we'll have trouble later trying to figure out
1650         * which interrupt is which (messes up the interrupt freeing
1651         * logic etc).
1652         *
1653         * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
1654         * it cannot be set along with IRQF_NO_SUSPEND.
1655         */
1656        if (((irqflags & IRQF_SHARED) && !dev_id) ||
1657            (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
1658            ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
1659                return -EINVAL;
1660
1661        desc = irq_to_desc(irq);
1662        if (!desc)
1663                return -EINVAL;
1664
1665        if (!irq_settings_can_request(desc) ||
1666            WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1667                return -EINVAL;
1668
1669        if (!handler) {
1670                if (!thread_fn)
1671                        return -EINVAL;
1672                handler = irq_default_primary_handler;
1673        }
1674
1675        action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1676        if (!action)
1677                return -ENOMEM;
1678
1679        action->handler = handler;
1680        action->thread_fn = thread_fn;
1681        action->flags = irqflags;
1682        action->name = devname;
1683        action->dev_id = dev_id;
1684
1685        retval = irq_chip_pm_get(&desc->irq_data);
1686        if (retval < 0) {
1687                kfree(action);
1688                return retval;
1689        }
1690
1691        chip_bus_lock(desc);
1692        retval = __setup_irq(irq, desc, action);
1693        chip_bus_sync_unlock(desc);
1694
1695        if (retval) {
1696                irq_chip_pm_put(&desc->irq_data);
1697                kfree(action->secondary);
1698                kfree(action);
1699        }
1700
1701#ifdef CONFIG_DEBUG_SHIRQ_FIXME
1702        if (!retval && (irqflags & IRQF_SHARED)) {
1703                /*
1704                 * It's a shared IRQ -- the driver ought to be prepared for it
1705                 * to happen immediately, so let's make sure....
1706                 * We disable the irq to make sure that a 'real' IRQ doesn't
1707                 * run in parallel with our fake.
1708                 */
1709                unsigned long flags;
1710
1711                disable_irq(irq);
1712                local_irq_save(flags);
1713
1714                handler(irq, dev_id);
1715
1716                local_irq_restore(flags);
1717                enable_irq(irq);
1718        }
1719#endif
1720        return retval;
1721}
1722EXPORT_SYMBOL(request_threaded_irq);
1723
1724/**
1725 *      request_any_context_irq - allocate an interrupt line
1726 *      @irq: Interrupt line to allocate
1727 *      @handler: Function to be called when the IRQ occurs.
1728 *                Threaded handler for threaded interrupts.
1729 *      @flags: Interrupt type flags
1730 *      @name: An ascii name for the claiming device
1731 *      @dev_id: A cookie passed back to the handler function
1732 *
1733 *      This call allocates interrupt resources and enables the
1734 *      interrupt line and IRQ handling. It selects either a
1735 *      hardirq or threaded handling method depending on the
1736 *      context.
1737 *
1738 *      On failure, it returns a negative value. On success,
1739 *      it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1740 */
1741int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1742                            unsigned long flags, const char *name, void *dev_id)
1743{
1744        struct irq_desc *desc;
1745        int ret;
1746
1747        if (irq == IRQ_NOTCONNECTED)
1748                return -ENOTCONN;
1749
1750        desc = irq_to_desc(irq);
1751        if (!desc)
1752                return -EINVAL;
1753
1754        if (irq_settings_is_nested_thread(desc)) {
1755                ret = request_threaded_irq(irq, NULL, handler,
1756                                           flags, name, dev_id);
1757                return !ret ? IRQC_IS_NESTED : ret;
1758        }
1759
1760        ret = request_irq(irq, handler, flags, name, dev_id);
1761        return !ret ? IRQC_IS_HARDIRQ : ret;
1762}
1763EXPORT_SYMBOL_GPL(request_any_context_irq);
1764
1765void enable_percpu_irq(unsigned int irq, unsigned int type)
1766{
1767        unsigned int cpu = smp_processor_id();
1768        unsigned long flags;
1769        struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1770
1771        if (!desc)
1772                return;
1773
1774        /*
1775         * If the trigger type is not specified by the caller, then
1776         * use the default for this interrupt.
1777         */
1778        type &= IRQ_TYPE_SENSE_MASK;
1779        if (type == IRQ_TYPE_NONE)
1780                type = irqd_get_trigger_type(&desc->irq_data);
1781
1782        if (type != IRQ_TYPE_NONE) {
1783                int ret;
1784
1785                ret = __irq_set_trigger(desc, type);
1786
1787                if (ret) {
1788                        WARN(1, "failed to set type for IRQ%d\n", irq);
1789                        goto out;
1790                }
1791        }
1792
1793        irq_percpu_enable(desc, cpu);
1794out:
1795        irq_put_desc_unlock(desc, flags);
1796}
1797EXPORT_SYMBOL_GPL(enable_percpu_irq);
1798
1799/**
1800 * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
1801 * @irq:        Linux irq number to check for
1802 *
1803 * Must be called from a non migratable context. Returns the enable
1804 * state of a per cpu interrupt on the current cpu.
1805 */
1806bool irq_percpu_is_enabled(unsigned int irq)
1807{
1808        unsigned int cpu = smp_processor_id();
1809        struct irq_desc *desc;
1810        unsigned long flags;
1811        bool is_enabled;
1812
1813        desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1814        if (!desc)
1815                return false;
1816
1817        is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
1818        irq_put_desc_unlock(desc, flags);
1819
1820        return is_enabled;
1821}
1822EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
1823
1824void disable_percpu_irq(unsigned int irq)
1825{
1826        unsigned int cpu = smp_processor_id();
1827        unsigned long flags;
1828        struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1829
1830        if (!desc)
1831                return;
1832
1833        irq_percpu_disable(desc, cpu);
1834        irq_put_desc_unlock(desc, flags);
1835}
1836EXPORT_SYMBOL_GPL(disable_percpu_irq);
1837
1838/*
1839 * Internal function to unregister a percpu irqaction.
1840 */
1841static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1842{
1843        struct irq_desc *desc = irq_to_desc(irq);
1844        struct irqaction *action;
1845        unsigned long flags;
1846
1847        WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1848
1849        if (!desc)
1850                return NULL;
1851
1852        raw_spin_lock_irqsave(&desc->lock, flags);
1853
1854        action = desc->action;
1855        if (!action || action->percpu_dev_id != dev_id) {
1856                WARN(1, "Trying to free already-free IRQ %d\n", irq);
1857                goto bad;
1858        }
1859
1860        if (!cpumask_empty(desc->percpu_enabled)) {
1861                WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
1862                     irq, cpumask_first(desc->percpu_enabled));
1863                goto bad;
1864        }
1865
1866        /* Found it - now remove it from the list of entries: */
1867        desc->action = NULL;
1868
1869        raw_spin_unlock_irqrestore(&desc->lock, flags);
1870
1871        unregister_handler_proc(irq, action);
1872
1873        irq_chip_pm_put(&desc->irq_data);
1874        module_put(desc->owner);
1875        return action;
1876
1877bad:
1878        raw_spin_unlock_irqrestore(&desc->lock, flags);
1879        return NULL;
1880}
1881
1882/**
1883 *      remove_percpu_irq - free a per-cpu interrupt
1884 *      @irq: Interrupt line to free
1885 *      @act: irqaction for the interrupt
1886 *
1887 * Used to remove interrupts statically setup by the early boot process.
1888 */
1889void remove_percpu_irq(unsigned int irq, struct irqaction *act)
1890{
1891        struct irq_desc *desc = irq_to_desc(irq);
1892
1893        if (desc && irq_settings_is_per_cpu_devid(desc))
1894            __free_percpu_irq(irq, act->percpu_dev_id);
1895}
1896
1897/**
1898 *      free_percpu_irq - free an interrupt allocated with request_percpu_irq
1899 *      @irq: Interrupt line to free
1900 *      @dev_id: Device identity to free
1901 *
1902 *      Remove a percpu interrupt handler. The handler is removed, but
1903 *      the interrupt line is not disabled. This must be done on each
1904 *      CPU before calling this function. The function does not return
1905 *      until any executing interrupts for this IRQ have completed.
1906 *
1907 *      This function must not be called from interrupt context.
1908 */
1909void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1910{
1911        struct irq_desc *desc = irq_to_desc(irq);
1912
1913        if (!desc || !irq_settings_is_per_cpu_devid(desc))
1914                return;
1915
1916        chip_bus_lock(desc);
1917        kfree(__free_percpu_irq(irq, dev_id));
1918        chip_bus_sync_unlock(desc);
1919}
1920EXPORT_SYMBOL_GPL(free_percpu_irq);
1921
1922/**
1923 *      setup_percpu_irq - setup a per-cpu interrupt
1924 *      @irq: Interrupt line to setup
1925 *      @act: irqaction for the interrupt
1926 *
1927 * Used to statically setup per-cpu interrupts in the early boot process.
1928 */
1929int setup_percpu_irq(unsigned int irq, struct irqaction *act)
1930{
1931        struct irq_desc *desc = irq_to_desc(irq);
1932        int retval;
1933
1934        if (!desc || !irq_settings_is_per_cpu_devid(desc))
1935                return -EINVAL;
1936
1937        retval = irq_chip_pm_get(&desc->irq_data);
1938        if (retval < 0)
1939                return retval;
1940
1941        chip_bus_lock(desc);
1942        retval = __setup_irq(irq, desc, act);
1943        chip_bus_sync_unlock(desc);
1944
1945        if (retval)
1946                irq_chip_pm_put(&desc->irq_data);
1947
1948        return retval;
1949}
1950
1951/**
1952 *      request_percpu_irq - allocate a percpu interrupt line
1953 *      @irq: Interrupt line to allocate
1954 *      @handler: Function to be called when the IRQ occurs.
1955 *      @devname: An ascii name for the claiming device
1956 *      @dev_id: A percpu cookie passed back to the handler function
1957 *
1958 *      This call allocates interrupt resources and enables the
1959 *      interrupt on the local CPU. If the interrupt is supposed to be
1960 *      enabled on other CPUs, it has to be done on each CPU using
1961 *      enable_percpu_irq().
1962 *
1963 *      Dev_id must be globally unique. It is a per-cpu variable, and
1964 *      the handler gets called with the interrupted CPU's instance of
1965 *      that variable.
1966 */
1967int request_percpu_irq(unsigned int irq, irq_handler_t handler,
1968                       const char *devname, void __percpu *dev_id)
1969{
1970        struct irqaction *action;
1971        struct irq_desc *desc;
1972        int retval;
1973
1974        if (!dev_id)
1975                return -EINVAL;
1976
1977        desc = irq_to_desc(irq);
1978        if (!desc || !irq_settings_can_request(desc) ||
1979            !irq_settings_is_per_cpu_devid(desc))
1980                return -EINVAL;
1981
1982        action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1983        if (!action)
1984                return -ENOMEM;
1985
1986        action->handler = handler;
1987        action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
1988        action->name = devname;
1989        action->percpu_dev_id = dev_id;
1990
1991        retval = irq_chip_pm_get(&desc->irq_data);
1992        if (retval < 0) {
1993                kfree(action);
1994                return retval;
1995        }
1996
1997        chip_bus_lock(desc);
1998        retval = __setup_irq(irq, desc, action);
1999        chip_bus_sync_unlock(desc);
2000
2001        if (retval) {
2002                irq_chip_pm_put(&desc->irq_data);
2003                kfree(action);
2004        }
2005
2006        return retval;
2007}
2008EXPORT_SYMBOL_GPL(request_percpu_irq);
2009
2010/**
2011 *      irq_get_irqchip_state - returns the irqchip state of a interrupt.
2012 *      @irq: Interrupt line that is forwarded to a VM
2013 *      @which: One of IRQCHIP_STATE_* the caller wants to know about
2014 *      @state: a pointer to a boolean where the state is to be storeed
2015 *
2016 *      This call snapshots the internal irqchip state of an
2017 *      interrupt, returning into @state the bit corresponding to
2018 *      stage @which
2019 *
2020 *      This function should be called with preemption disabled if the
2021 *      interrupt controller has per-cpu registers.
2022 */
2023int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2024                          bool *state)
2025{
2026        struct irq_desc *desc;
2027        struct irq_data *data;
2028        struct irq_chip *chip;
2029        unsigned long flags;
2030        int err = -EINVAL;
2031
2032        desc = irq_get_desc_buslock(irq, &flags, 0);
2033        if (!desc)
2034                return err;
2035
2036        data = irq_desc_get_irq_data(desc);
2037
2038        do {
2039                chip = irq_data_get_irq_chip(data);
2040                if (chip->irq_get_irqchip_state)
2041                        break;
2042#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2043                data = data->parent_data;
2044#else
2045                data = NULL;
2046#endif
2047        } while (data);
2048
2049        if (data)
2050                err = chip->irq_get_irqchip_state(data, which, state);
2051
2052        irq_put_desc_busunlock(desc, flags);
2053        return err;
2054}
2055EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
2056
2057/**
2058 *      irq_set_irqchip_state - set the state of a forwarded interrupt.
2059 *      @irq: Interrupt line that is forwarded to a VM
2060 *      @which: State to be restored (one of IRQCHIP_STATE_*)
2061 *      @val: Value corresponding to @which
2062 *
2063 *      This call sets the internal irqchip state of an interrupt,
2064 *      depending on the value of @which.
2065 *
2066 *      This function should be called with preemption disabled if the
2067 *      interrupt controller has per-cpu registers.
2068 */
2069int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2070                          bool val)
2071{
2072        struct irq_desc *desc;
2073        struct irq_data *data;
2074        struct irq_chip *chip;
2075        unsigned long flags;
2076        int err = -EINVAL;
2077
2078        desc = irq_get_desc_buslock(irq, &flags, 0);
2079        if (!desc)
2080                return err;
2081
2082        data = irq_desc_get_irq_data(desc);
2083
2084        do {
2085                chip = irq_data_get_irq_chip(data);
2086                if (chip->irq_set_irqchip_state)
2087                        break;
2088#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2089                data = data->parent_data;
2090#else
2091                data = NULL;
2092#endif
2093        } while (data);
2094
2095        if (data)
2096                err = chip->irq_set_irqchip_state(data, which, val);
2097
2098        irq_put_desc_busunlock(desc, flags);
2099        return err;
2100}
2101EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
2102