linux/kernel/irq/manage.c
<<
>>
Prefs
   1/*
   2 * linux/kernel/irq/manage.c
   3 *
   4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
   5 * Copyright (C) 2005-2006 Thomas Gleixner
   6 *
   7 * This file contains driver APIs to the irq subsystem.
   8 */
   9
  10#define pr_fmt(fmt) "genirq: " fmt
  11
  12#include <linux/irq.h>
  13#include <linux/kthread.h>
  14#include <linux/module.h>
  15#include <linux/random.h>
  16#include <linux/interrupt.h>
  17#include <linux/slab.h>
  18#include <linux/sched.h>
  19#include <linux/sched/rt.h>
  20#include <linux/sched/task.h>
  21#include <uapi/linux/sched/types.h>
  22#include <linux/task_work.h>
  23
  24#include "internals.h"
  25
  26#ifdef CONFIG_IRQ_FORCED_THREADING
  27__read_mostly bool force_irqthreads;
  28
  29static int __init setup_forced_irqthreads(char *arg)
  30{
  31        force_irqthreads = true;
  32        return 0;
  33}
  34early_param("threadirqs", setup_forced_irqthreads);
  35#endif
  36
  37static void __synchronize_hardirq(struct irq_desc *desc)
  38{
  39        bool inprogress;
  40
  41        do {
  42                unsigned long flags;
  43
  44                /*
  45                 * Wait until we're out of the critical section.  This might
  46                 * give the wrong answer due to the lack of memory barriers.
  47                 */
  48                while (irqd_irq_inprogress(&desc->irq_data))
  49                        cpu_relax();
  50
  51                /* Ok, that indicated we're done: double-check carefully. */
  52                raw_spin_lock_irqsave(&desc->lock, flags);
  53                inprogress = irqd_irq_inprogress(&desc->irq_data);
  54                raw_spin_unlock_irqrestore(&desc->lock, flags);
  55
  56                /* Oops, that failed? */
  57        } while (inprogress);
  58}
  59
  60/**
  61 *      synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
  62 *      @irq: interrupt number to wait for
  63 *
  64 *      This function waits for any pending hard IRQ handlers for this
  65 *      interrupt to complete before returning. If you use this
  66 *      function while holding a resource the IRQ handler may need you
  67 *      will deadlock. It does not take associated threaded handlers
  68 *      into account.
  69 *
  70 *      Do not use this for shutdown scenarios where you must be sure
  71 *      that all parts (hardirq and threaded handler) have completed.
  72 *
  73 *      Returns: false if a threaded handler is active.
  74 *
  75 *      This function may be called - with care - from IRQ context.
  76 */
  77bool synchronize_hardirq(unsigned int irq)
  78{
  79        struct irq_desc *desc = irq_to_desc(irq);
  80
  81        if (desc) {
  82                __synchronize_hardirq(desc);
  83                return !atomic_read(&desc->threads_active);
  84        }
  85
  86        return true;
  87}
  88EXPORT_SYMBOL(synchronize_hardirq);
  89
  90/**
  91 *      synchronize_irq - wait for pending IRQ handlers (on other CPUs)
  92 *      @irq: interrupt number to wait for
  93 *
  94 *      This function waits for any pending IRQ handlers for this interrupt
  95 *      to complete before returning. If you use this function while
  96 *      holding a resource the IRQ handler may need you will deadlock.
  97 *
  98 *      This function may be called - with care - from IRQ context.
  99 */
 100void synchronize_irq(unsigned int irq)
 101{
 102        struct irq_desc *desc = irq_to_desc(irq);
 103
 104        if (desc) {
 105                __synchronize_hardirq(desc);
 106                /*
 107                 * We made sure that no hardirq handler is
 108                 * running. Now verify that no threaded handlers are
 109                 * active.
 110                 */
 111                wait_event(desc->wait_for_threads,
 112                           !atomic_read(&desc->threads_active));
 113        }
 114}
 115EXPORT_SYMBOL(synchronize_irq);
 116
 117#ifdef CONFIG_SMP
 118cpumask_var_t irq_default_affinity;
 119
 120static bool __irq_can_set_affinity(struct irq_desc *desc)
 121{
 122        if (!desc || !irqd_can_balance(&desc->irq_data) ||
 123            !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
 124                return false;
 125        return true;
 126}
 127
 128/**
 129 *      irq_can_set_affinity - Check if the affinity of a given irq can be set
 130 *      @irq:           Interrupt to check
 131 *
 132 */
 133int irq_can_set_affinity(unsigned int irq)
 134{
 135        return __irq_can_set_affinity(irq_to_desc(irq));
 136}
 137
 138/**
 139 * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
 140 * @irq:        Interrupt to check
 141 *
 142 * Like irq_can_set_affinity() above, but additionally checks for the
 143 * AFFINITY_MANAGED flag.
 144 */
 145bool irq_can_set_affinity_usr(unsigned int irq)
 146{
 147        struct irq_desc *desc = irq_to_desc(irq);
 148
 149        return __irq_can_set_affinity(desc) &&
 150                !irqd_affinity_is_managed(&desc->irq_data);
 151}
 152
 153/**
 154 *      irq_set_thread_affinity - Notify irq threads to adjust affinity
 155 *      @desc:          irq descriptor which has affitnity changed
 156 *
 157 *      We just set IRQTF_AFFINITY and delegate the affinity setting
 158 *      to the interrupt thread itself. We can not call
 159 *      set_cpus_allowed_ptr() here as we hold desc->lock and this
 160 *      code can be called from hard interrupt context.
 161 */
 162void irq_set_thread_affinity(struct irq_desc *desc)
 163{
 164        struct irqaction *action;
 165
 166        for_each_action_of_desc(desc, action)
 167                if (action->thread)
 168                        set_bit(IRQTF_AFFINITY, &action->thread_flags);
 169}
 170
 171#ifdef CONFIG_GENERIC_PENDING_IRQ
 172static inline bool irq_can_move_pcntxt(struct irq_data *data)
 173{
 174        return irqd_can_move_in_process_context(data);
 175}
 176static inline bool irq_move_pending(struct irq_data *data)
 177{
 178        return irqd_is_setaffinity_pending(data);
 179}
 180static inline void
 181irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
 182{
 183        cpumask_copy(desc->pending_mask, mask);
 184}
 185static inline void
 186irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
 187{
 188        cpumask_copy(mask, desc->pending_mask);
 189}
 190#else
 191static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
 192static inline bool irq_move_pending(struct irq_data *data) { return false; }
 193static inline void
 194irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
 195static inline void
 196irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
 197#endif
 198
 199int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
 200                        bool force)
 201{
 202        struct irq_desc *desc = irq_data_to_desc(data);
 203        struct irq_chip *chip = irq_data_get_irq_chip(data);
 204        int ret;
 205
 206        ret = chip->irq_set_affinity(data, mask, force);
 207        switch (ret) {
 208        case IRQ_SET_MASK_OK:
 209        case IRQ_SET_MASK_OK_DONE:
 210                cpumask_copy(desc->irq_common_data.affinity, mask);
 211        case IRQ_SET_MASK_OK_NOCOPY:
 212                irq_set_thread_affinity(desc);
 213                ret = 0;
 214        }
 215
 216        return ret;
 217}
 218
 219int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
 220                            bool force)
 221{
 222        struct irq_chip *chip = irq_data_get_irq_chip(data);
 223        struct irq_desc *desc = irq_data_to_desc(data);
 224        int ret = 0;
 225
 226        if (!chip || !chip->irq_set_affinity)
 227                return -EINVAL;
 228
 229        if (irq_can_move_pcntxt(data)) {
 230                ret = irq_do_set_affinity(data, mask, force);
 231        } else {
 232                irqd_set_move_pending(data);
 233                irq_copy_pending(desc, mask);
 234        }
 235
 236        if (desc->affinity_notify) {
 237                kref_get(&desc->affinity_notify->kref);
 238                schedule_work(&desc->affinity_notify->work);
 239        }
 240        irqd_set(data, IRQD_AFFINITY_SET);
 241
 242        return ret;
 243}
 244
 245int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
 246{
 247        struct irq_desc *desc = irq_to_desc(irq);
 248        unsigned long flags;
 249        int ret;
 250
 251        if (!desc)
 252                return -EINVAL;
 253
 254        raw_spin_lock_irqsave(&desc->lock, flags);
 255        ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
 256        raw_spin_unlock_irqrestore(&desc->lock, flags);
 257        return ret;
 258}
 259
 260int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
 261{
 262        unsigned long flags;
 263        struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 264
 265        if (!desc)
 266                return -EINVAL;
 267        desc->affinity_hint = m;
 268        irq_put_desc_unlock(desc, flags);
 269        /* set the initial affinity to prevent every interrupt being on CPU0 */
 270        if (m)
 271                __irq_set_affinity(irq, m, false);
 272        return 0;
 273}
 274EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
 275
 276static void irq_affinity_notify(struct work_struct *work)
 277{
 278        struct irq_affinity_notify *notify =
 279                container_of(work, struct irq_affinity_notify, work);
 280        struct irq_desc *desc = irq_to_desc(notify->irq);
 281        cpumask_var_t cpumask;
 282        unsigned long flags;
 283
 284        if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
 285                goto out;
 286
 287        raw_spin_lock_irqsave(&desc->lock, flags);
 288        if (irq_move_pending(&desc->irq_data))
 289                irq_get_pending(cpumask, desc);
 290        else
 291                cpumask_copy(cpumask, desc->irq_common_data.affinity);
 292        raw_spin_unlock_irqrestore(&desc->lock, flags);
 293
 294        notify->notify(notify, cpumask);
 295
 296        free_cpumask_var(cpumask);
 297out:
 298        kref_put(&notify->kref, notify->release);
 299}
 300
 301/**
 302 *      irq_set_affinity_notifier - control notification of IRQ affinity changes
 303 *      @irq:           Interrupt for which to enable/disable notification
 304 *      @notify:        Context for notification, or %NULL to disable
 305 *                      notification.  Function pointers must be initialised;
 306 *                      the other fields will be initialised by this function.
 307 *
 308 *      Must be called in process context.  Notification may only be enabled
 309 *      after the IRQ is allocated and must be disabled before the IRQ is
 310 *      freed using free_irq().
 311 */
 312int
 313irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
 314{
 315        struct irq_desc *desc = irq_to_desc(irq);
 316        struct irq_affinity_notify *old_notify;
 317        unsigned long flags;
 318
 319        /* The release function is promised process context */
 320        might_sleep();
 321
 322        if (!desc)
 323                return -EINVAL;
 324
 325        /* Complete initialisation of *notify */
 326        if (notify) {
 327                notify->irq = irq;
 328                kref_init(&notify->kref);
 329                INIT_WORK(&notify->work, irq_affinity_notify);
 330        }
 331
 332        raw_spin_lock_irqsave(&desc->lock, flags);
 333        old_notify = desc->affinity_notify;
 334        desc->affinity_notify = notify;
 335        raw_spin_unlock_irqrestore(&desc->lock, flags);
 336
 337        if (old_notify)
 338                kref_put(&old_notify->kref, old_notify->release);
 339
 340        return 0;
 341}
 342EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
 343
 344#ifndef CONFIG_AUTO_IRQ_AFFINITY
 345/*
 346 * Generic version of the affinity autoselector.
 347 */
 348static int setup_affinity(struct irq_desc *desc, struct cpumask *mask)
 349{
 350        struct cpumask *set = irq_default_affinity;
 351        int node = irq_desc_get_node(desc);
 352
 353        /* Excludes PER_CPU and NO_BALANCE interrupts */
 354        if (!__irq_can_set_affinity(desc))
 355                return 0;
 356
 357        /*
 358         * Preserve the managed affinity setting and a userspace affinity
 359         * setup, but make sure that one of the targets is online.
 360         */
 361        if (irqd_affinity_is_managed(&desc->irq_data) ||
 362            irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
 363                if (cpumask_intersects(desc->irq_common_data.affinity,
 364                                       cpu_online_mask))
 365                        set = desc->irq_common_data.affinity;
 366                else
 367                        irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
 368        }
 369
 370        cpumask_and(mask, cpu_online_mask, set);
 371        if (node != NUMA_NO_NODE) {
 372                const struct cpumask *nodemask = cpumask_of_node(node);
 373
 374                /* make sure at least one of the cpus in nodemask is online */
 375                if (cpumask_intersects(mask, nodemask))
 376                        cpumask_and(mask, mask, nodemask);
 377        }
 378        irq_do_set_affinity(&desc->irq_data, mask, false);
 379        return 0;
 380}
 381#else
 382/* Wrapper for ALPHA specific affinity selector magic */
 383static inline int setup_affinity(struct irq_desc *d, struct cpumask *mask)
 384{
 385        return irq_select_affinity(irq_desc_get_irq(d));
 386}
 387#endif
 388
 389/*
 390 * Called when affinity is set via /proc/irq
 391 */
 392int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
 393{
 394        struct irq_desc *desc = irq_to_desc(irq);
 395        unsigned long flags;
 396        int ret;
 397
 398        raw_spin_lock_irqsave(&desc->lock, flags);
 399        ret = setup_affinity(desc, mask);
 400        raw_spin_unlock_irqrestore(&desc->lock, flags);
 401        return ret;
 402}
 403
 404#else
 405static inline int
 406setup_affinity(struct irq_desc *desc, struct cpumask *mask)
 407{
 408        return 0;
 409}
 410#endif
 411
 412/**
 413 *      irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
 414 *      @irq: interrupt number to set affinity
 415 *      @vcpu_info: vCPU specific data
 416 *
 417 *      This function uses the vCPU specific data to set the vCPU
 418 *      affinity for an irq. The vCPU specific data is passed from
 419 *      outside, such as KVM. One example code path is as below:
 420 *      KVM -> IOMMU -> irq_set_vcpu_affinity().
 421 */
 422int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
 423{
 424        unsigned long flags;
 425        struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
 426        struct irq_data *data;
 427        struct irq_chip *chip;
 428        int ret = -ENOSYS;
 429
 430        if (!desc)
 431                return -EINVAL;
 432
 433        data = irq_desc_get_irq_data(desc);
 434        chip = irq_data_get_irq_chip(data);
 435        if (chip && chip->irq_set_vcpu_affinity)
 436                ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
 437        irq_put_desc_unlock(desc, flags);
 438
 439        return ret;
 440}
 441EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
 442
 443void __disable_irq(struct irq_desc *desc)
 444{
 445        if (!desc->depth++)
 446                irq_disable(desc);
 447}
 448
 449static int __disable_irq_nosync(unsigned int irq)
 450{
 451        unsigned long flags;
 452        struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 453
 454        if (!desc)
 455                return -EINVAL;
 456        __disable_irq(desc);
 457        irq_put_desc_busunlock(desc, flags);
 458        return 0;
 459}
 460
 461/**
 462 *      disable_irq_nosync - disable an irq without waiting
 463 *      @irq: Interrupt to disable
 464 *
 465 *      Disable the selected interrupt line.  Disables and Enables are
 466 *      nested.
 467 *      Unlike disable_irq(), this function does not ensure existing
 468 *      instances of the IRQ handler have completed before returning.
 469 *
 470 *      This function may be called from IRQ context.
 471 */
 472void disable_irq_nosync(unsigned int irq)
 473{
 474        __disable_irq_nosync(irq);
 475}
 476EXPORT_SYMBOL(disable_irq_nosync);
 477
 478/**
 479 *      disable_irq - disable an irq and wait for completion
 480 *      @irq: Interrupt to disable
 481 *
 482 *      Disable the selected interrupt line.  Enables and Disables are
 483 *      nested.
 484 *      This function waits for any pending IRQ handlers for this interrupt
 485 *      to complete before returning. If you use this function while
 486 *      holding a resource the IRQ handler may need you will deadlock.
 487 *
 488 *      This function may be called - with care - from IRQ context.
 489 */
 490void disable_irq(unsigned int irq)
 491{
 492        if (!__disable_irq_nosync(irq))
 493                synchronize_irq(irq);
 494}
 495EXPORT_SYMBOL(disable_irq);
 496
 497/**
 498 *      disable_hardirq - disables an irq and waits for hardirq completion
 499 *      @irq: Interrupt to disable
 500 *
 501 *      Disable the selected interrupt line.  Enables and Disables are
 502 *      nested.
 503 *      This function waits for any pending hard IRQ handlers for this
 504 *      interrupt to complete before returning. If you use this function while
 505 *      holding a resource the hard IRQ handler may need you will deadlock.
 506 *
 507 *      When used to optimistically disable an interrupt from atomic context
 508 *      the return value must be checked.
 509 *
 510 *      Returns: false if a threaded handler is active.
 511 *
 512 *      This function may be called - with care - from IRQ context.
 513 */
 514bool disable_hardirq(unsigned int irq)
 515{
 516        if (!__disable_irq_nosync(irq))
 517                return synchronize_hardirq(irq);
 518
 519        return false;
 520}
 521EXPORT_SYMBOL_GPL(disable_hardirq);
 522
 523void __enable_irq(struct irq_desc *desc)
 524{
 525        switch (desc->depth) {
 526        case 0:
 527 err_out:
 528                WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
 529                     irq_desc_get_irq(desc));
 530                break;
 531        case 1: {
 532                if (desc->istate & IRQS_SUSPENDED)
 533                        goto err_out;
 534                /* Prevent probing on this irq: */
 535                irq_settings_set_noprobe(desc);
 536                irq_enable(desc);
 537                check_irq_resend(desc);
 538                /* fall-through */
 539        }
 540        default:
 541                desc->depth--;
 542        }
 543}
 544
 545/**
 546 *      enable_irq - enable handling of an irq
 547 *      @irq: Interrupt to enable
 548 *
 549 *      Undoes the effect of one call to disable_irq().  If this
 550 *      matches the last disable, processing of interrupts on this
 551 *      IRQ line is re-enabled.
 552 *
 553 *      This function may be called from IRQ context only when
 554 *      desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
 555 */
 556void enable_irq(unsigned int irq)
 557{
 558        unsigned long flags;
 559        struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 560
 561        if (!desc)
 562                return;
 563        if (WARN(!desc->irq_data.chip,
 564                 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
 565                goto out;
 566
 567        __enable_irq(desc);
 568out:
 569        irq_put_desc_busunlock(desc, flags);
 570}
 571EXPORT_SYMBOL(enable_irq);
 572
 573static int set_irq_wake_real(unsigned int irq, unsigned int on)
 574{
 575        struct irq_desc *desc = irq_to_desc(irq);
 576        int ret = -ENXIO;
 577
 578        if (irq_desc_get_chip(desc)->flags &  IRQCHIP_SKIP_SET_WAKE)
 579                return 0;
 580
 581        if (desc->irq_data.chip->irq_set_wake)
 582                ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
 583
 584        return ret;
 585}
 586
 587/**
 588 *      irq_set_irq_wake - control irq power management wakeup
 589 *      @irq:   interrupt to control
 590 *      @on:    enable/disable power management wakeup
 591 *
 592 *      Enable/disable power management wakeup mode, which is
 593 *      disabled by default.  Enables and disables must match,
 594 *      just as they match for non-wakeup mode support.
 595 *
 596 *      Wakeup mode lets this IRQ wake the system from sleep
 597 *      states like "suspend to RAM".
 598 */
 599int irq_set_irq_wake(unsigned int irq, unsigned int on)
 600{
 601        unsigned long flags;
 602        struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 603        int ret = 0;
 604
 605        if (!desc)
 606                return -EINVAL;
 607
 608        /* wakeup-capable irqs can be shared between drivers that
 609         * don't need to have the same sleep mode behaviors.
 610         */
 611        if (on) {
 612                if (desc->wake_depth++ == 0) {
 613                        ret = set_irq_wake_real(irq, on);
 614                        if (ret)
 615                                desc->wake_depth = 0;
 616                        else
 617                                irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
 618                }
 619        } else {
 620                if (desc->wake_depth == 0) {
 621                        WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
 622                } else if (--desc->wake_depth == 0) {
 623                        ret = set_irq_wake_real(irq, on);
 624                        if (ret)
 625                                desc->wake_depth = 1;
 626                        else
 627                                irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
 628                }
 629        }
 630        irq_put_desc_busunlock(desc, flags);
 631        return ret;
 632}
 633EXPORT_SYMBOL(irq_set_irq_wake);
 634
 635/*
 636 * Internal function that tells the architecture code whether a
 637 * particular irq has been exclusively allocated or is available
 638 * for driver use.
 639 */
 640int can_request_irq(unsigned int irq, unsigned long irqflags)
 641{
 642        unsigned long flags;
 643        struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
 644        int canrequest = 0;
 645
 646        if (!desc)
 647                return 0;
 648
 649        if (irq_settings_can_request(desc)) {
 650                if (!desc->action ||
 651                    irqflags & desc->action->flags & IRQF_SHARED)
 652                        canrequest = 1;
 653        }
 654        irq_put_desc_unlock(desc, flags);
 655        return canrequest;
 656}
 657
 658int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
 659{
 660        struct irq_chip *chip = desc->irq_data.chip;
 661        int ret, unmask = 0;
 662
 663        if (!chip || !chip->irq_set_type) {
 664                /*
 665                 * IRQF_TRIGGER_* but the PIC does not support multiple
 666                 * flow-types?
 667                 */
 668                pr_debug("No set_type function for IRQ %d (%s)\n",
 669                         irq_desc_get_irq(desc),
 670                         chip ? (chip->name ? : "unknown") : "unknown");
 671                return 0;
 672        }
 673
 674        if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
 675                if (!irqd_irq_masked(&desc->irq_data))
 676                        mask_irq(desc);
 677                if (!irqd_irq_disabled(&desc->irq_data))
 678                        unmask = 1;
 679        }
 680
 681        /* Mask all flags except trigger mode */
 682        flags &= IRQ_TYPE_SENSE_MASK;
 683        ret = chip->irq_set_type(&desc->irq_data, flags);
 684
 685        switch (ret) {
 686        case IRQ_SET_MASK_OK:
 687        case IRQ_SET_MASK_OK_DONE:
 688                irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
 689                irqd_set(&desc->irq_data, flags);
 690
 691        case IRQ_SET_MASK_OK_NOCOPY:
 692                flags = irqd_get_trigger_type(&desc->irq_data);
 693                irq_settings_set_trigger_mask(desc, flags);
 694                irqd_clear(&desc->irq_data, IRQD_LEVEL);
 695                irq_settings_clr_level(desc);
 696                if (flags & IRQ_TYPE_LEVEL_MASK) {
 697                        irq_settings_set_level(desc);
 698                        irqd_set(&desc->irq_data, IRQD_LEVEL);
 699                }
 700
 701                ret = 0;
 702                break;
 703        default:
 704                pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
 705                       flags, irq_desc_get_irq(desc), chip->irq_set_type);
 706        }
 707        if (unmask)
 708                unmask_irq(desc);
 709        return ret;
 710}
 711
 712#ifdef CONFIG_HARDIRQS_SW_RESEND
 713int irq_set_parent(int irq, int parent_irq)
 714{
 715        unsigned long flags;
 716        struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
 717
 718        if (!desc)
 719                return -EINVAL;
 720
 721        desc->parent_irq = parent_irq;
 722
 723        irq_put_desc_unlock(desc, flags);
 724        return 0;
 725}
 726EXPORT_SYMBOL_GPL(irq_set_parent);
 727#endif
 728
 729/*
 730 * Default primary interrupt handler for threaded interrupts. Is
 731 * assigned as primary handler when request_threaded_irq is called
 732 * with handler == NULL. Useful for oneshot interrupts.
 733 */
 734static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
 735{
 736        return IRQ_WAKE_THREAD;
 737}
 738
 739/*
 740 * Primary handler for nested threaded interrupts. Should never be
 741 * called.
 742 */
 743static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
 744{
 745        WARN(1, "Primary handler called for nested irq %d\n", irq);
 746        return IRQ_NONE;
 747}
 748
 749static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
 750{
 751        WARN(1, "Secondary action handler called for irq %d\n", irq);
 752        return IRQ_NONE;
 753}
 754
 755static int irq_wait_for_interrupt(struct irqaction *action)
 756{
 757        set_current_state(TASK_INTERRUPTIBLE);
 758
 759        while (!kthread_should_stop()) {
 760
 761                if (test_and_clear_bit(IRQTF_RUNTHREAD,
 762                                       &action->thread_flags)) {
 763                        __set_current_state(TASK_RUNNING);
 764                        return 0;
 765                }
 766                schedule();
 767                set_current_state(TASK_INTERRUPTIBLE);
 768        }
 769        __set_current_state(TASK_RUNNING);
 770        return -1;
 771}
 772
 773/*
 774 * Oneshot interrupts keep the irq line masked until the threaded
 775 * handler finished. unmask if the interrupt has not been disabled and
 776 * is marked MASKED.
 777 */
 778static void irq_finalize_oneshot(struct irq_desc *desc,
 779                                 struct irqaction *action)
 780{
 781        if (!(desc->istate & IRQS_ONESHOT) ||
 782            action->handler == irq_forced_secondary_handler)
 783                return;
 784again:
 785        chip_bus_lock(desc);
 786        raw_spin_lock_irq(&desc->lock);
 787
 788        /*
 789         * Implausible though it may be we need to protect us against
 790         * the following scenario:
 791         *
 792         * The thread is faster done than the hard interrupt handler
 793         * on the other CPU. If we unmask the irq line then the
 794         * interrupt can come in again and masks the line, leaves due
 795         * to IRQS_INPROGRESS and the irq line is masked forever.
 796         *
 797         * This also serializes the state of shared oneshot handlers
 798         * versus "desc->threads_onehsot |= action->thread_mask;" in
 799         * irq_wake_thread(). See the comment there which explains the
 800         * serialization.
 801         */
 802        if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
 803                raw_spin_unlock_irq(&desc->lock);
 804                chip_bus_sync_unlock(desc);
 805                cpu_relax();
 806                goto again;
 807        }
 808
 809        /*
 810         * Now check again, whether the thread should run. Otherwise
 811         * we would clear the threads_oneshot bit of this thread which
 812         * was just set.
 813         */
 814        if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
 815                goto out_unlock;
 816
 817        desc->threads_oneshot &= ~action->thread_mask;
 818
 819        if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
 820            irqd_irq_masked(&desc->irq_data))
 821                unmask_threaded_irq(desc);
 822
 823out_unlock:
 824        raw_spin_unlock_irq(&desc->lock);
 825        chip_bus_sync_unlock(desc);
 826}
 827
 828#ifdef CONFIG_SMP
 829/*
 830 * Check whether we need to change the affinity of the interrupt thread.
 831 */
 832static void
 833irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
 834{
 835        cpumask_var_t mask;
 836        bool valid = true;
 837
 838        if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
 839                return;
 840
 841        /*
 842         * In case we are out of memory we set IRQTF_AFFINITY again and
 843         * try again next time
 844         */
 845        if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
 846                set_bit(IRQTF_AFFINITY, &action->thread_flags);
 847                return;
 848        }
 849
 850        raw_spin_lock_irq(&desc->lock);
 851        /*
 852         * This code is triggered unconditionally. Check the affinity
 853         * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
 854         */
 855        if (cpumask_available(desc->irq_common_data.affinity))
 856                cpumask_copy(mask, desc->irq_common_data.affinity);
 857        else
 858                valid = false;
 859        raw_spin_unlock_irq(&desc->lock);
 860
 861        if (valid)
 862                set_cpus_allowed_ptr(current, mask);
 863        free_cpumask_var(mask);
 864}
 865#else
 866static inline void
 867irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
 868#endif
 869
 870/*
 871 * Interrupts which are not explicitely requested as threaded
 872 * interrupts rely on the implicit bh/preempt disable of the hard irq
 873 * context. So we need to disable bh here to avoid deadlocks and other
 874 * side effects.
 875 */
 876static irqreturn_t
 877irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
 878{
 879        irqreturn_t ret;
 880
 881        local_bh_disable();
 882        ret = action->thread_fn(action->irq, action->dev_id);
 883        irq_finalize_oneshot(desc, action);
 884        local_bh_enable();
 885        return ret;
 886}
 887
 888/*
 889 * Interrupts explicitly requested as threaded interrupts want to be
 890 * preemtible - many of them need to sleep and wait for slow busses to
 891 * complete.
 892 */
 893static irqreturn_t irq_thread_fn(struct irq_desc *desc,
 894                struct irqaction *action)
 895{
 896        irqreturn_t ret;
 897
 898        ret = action->thread_fn(action->irq, action->dev_id);
 899        irq_finalize_oneshot(desc, action);
 900        return ret;
 901}
 902
 903static void wake_threads_waitq(struct irq_desc *desc)
 904{
 905        if (atomic_dec_and_test(&desc->threads_active))
 906                wake_up(&desc->wait_for_threads);
 907}
 908
 909static void irq_thread_dtor(struct callback_head *unused)
 910{
 911        struct task_struct *tsk = current;
 912        struct irq_desc *desc;
 913        struct irqaction *action;
 914
 915        if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
 916                return;
 917
 918        action = kthread_data(tsk);
 919
 920        pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
 921               tsk->comm, tsk->pid, action->irq);
 922
 923
 924        desc = irq_to_desc(action->irq);
 925        /*
 926         * If IRQTF_RUNTHREAD is set, we need to decrement
 927         * desc->threads_active and wake possible waiters.
 928         */
 929        if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
 930                wake_threads_waitq(desc);
 931
 932        /* Prevent a stale desc->threads_oneshot */
 933        irq_finalize_oneshot(desc, action);
 934}
 935
 936static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
 937{
 938        struct irqaction *secondary = action->secondary;
 939
 940        if (WARN_ON_ONCE(!secondary))
 941                return;
 942
 943        raw_spin_lock_irq(&desc->lock);
 944        __irq_wake_thread(desc, secondary);
 945        raw_spin_unlock_irq(&desc->lock);
 946}
 947
 948/*
 949 * Interrupt handler thread
 950 */
 951static int irq_thread(void *data)
 952{
 953        struct callback_head on_exit_work;
 954        struct irqaction *action = data;
 955        struct irq_desc *desc = irq_to_desc(action->irq);
 956        irqreturn_t (*handler_fn)(struct irq_desc *desc,
 957                        struct irqaction *action);
 958
 959        if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
 960                                        &action->thread_flags))
 961                handler_fn = irq_forced_thread_fn;
 962        else
 963                handler_fn = irq_thread_fn;
 964
 965        init_task_work(&on_exit_work, irq_thread_dtor);
 966        task_work_add(current, &on_exit_work, false);
 967
 968        irq_thread_check_affinity(desc, action);
 969
 970        while (!irq_wait_for_interrupt(action)) {
 971                irqreturn_t action_ret;
 972
 973                irq_thread_check_affinity(desc, action);
 974
 975                action_ret = handler_fn(desc, action);
 976                if (action_ret == IRQ_HANDLED)
 977                        atomic_inc(&desc->threads_handled);
 978                if (action_ret == IRQ_WAKE_THREAD)
 979                        irq_wake_secondary(desc, action);
 980
 981                wake_threads_waitq(desc);
 982        }
 983
 984        /*
 985         * This is the regular exit path. __free_irq() is stopping the
 986         * thread via kthread_stop() after calling
 987         * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
 988         * oneshot mask bit can be set. We cannot verify that as we
 989         * cannot touch the oneshot mask at this point anymore as
 990         * __setup_irq() might have given out currents thread_mask
 991         * again.
 992         */
 993        task_work_cancel(current, irq_thread_dtor);
 994        return 0;
 995}
 996
 997/**
 998 *      irq_wake_thread - wake the irq thread for the action identified by dev_id
 999 *      @irq:           Interrupt line
1000 *      @dev_id:        Device identity for which the thread should be woken
1001 *
1002 */
1003void irq_wake_thread(unsigned int irq, void *dev_id)
1004{
1005        struct irq_desc *desc = irq_to_desc(irq);
1006        struct irqaction *action;
1007        unsigned long flags;
1008
1009        if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1010                return;
1011
1012        raw_spin_lock_irqsave(&desc->lock, flags);
1013        for_each_action_of_desc(desc, action) {
1014                if (action->dev_id == dev_id) {
1015                        if (action->thread)
1016                                __irq_wake_thread(desc, action);
1017                        break;
1018                }
1019        }
1020        raw_spin_unlock_irqrestore(&desc->lock, flags);
1021}
1022EXPORT_SYMBOL_GPL(irq_wake_thread);
1023
1024static int irq_setup_forced_threading(struct irqaction *new)
1025{
1026        if (!force_irqthreads)
1027                return 0;
1028        if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1029                return 0;
1030
1031        new->flags |= IRQF_ONESHOT;
1032
1033        /*
1034         * Handle the case where we have a real primary handler and a
1035         * thread handler. We force thread them as well by creating a
1036         * secondary action.
1037         */
1038        if (new->handler != irq_default_primary_handler && new->thread_fn) {
1039                /* Allocate the secondary action */
1040                new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1041                if (!new->secondary)
1042                        return -ENOMEM;
1043                new->secondary->handler = irq_forced_secondary_handler;
1044                new->secondary->thread_fn = new->thread_fn;
1045                new->secondary->dev_id = new->dev_id;
1046                new->secondary->irq = new->irq;
1047                new->secondary->name = new->name;
1048        }
1049        /* Deal with the primary handler */
1050        set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1051        new->thread_fn = new->handler;
1052        new->handler = irq_default_primary_handler;
1053        return 0;
1054}
1055
1056static int irq_request_resources(struct irq_desc *desc)
1057{
1058        struct irq_data *d = &desc->irq_data;
1059        struct irq_chip *c = d->chip;
1060
1061        return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1062}
1063
1064static void irq_release_resources(struct irq_desc *desc)
1065{
1066        struct irq_data *d = &desc->irq_data;
1067        struct irq_chip *c = d->chip;
1068
1069        if (c->irq_release_resources)
1070                c->irq_release_resources(d);
1071}
1072
1073static int
1074setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1075{
1076        struct task_struct *t;
1077        struct sched_param param = {
1078                .sched_priority = MAX_USER_RT_PRIO/2,
1079        };
1080
1081        if (!secondary) {
1082                t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1083                                   new->name);
1084        } else {
1085                t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1086                                   new->name);
1087                param.sched_priority -= 1;
1088        }
1089
1090        if (IS_ERR(t))
1091                return PTR_ERR(t);
1092
1093        sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
1094
1095        /*
1096         * We keep the reference to the task struct even if
1097         * the thread dies to avoid that the interrupt code
1098         * references an already freed task_struct.
1099         */
1100        get_task_struct(t);
1101        new->thread = t;
1102        /*
1103         * Tell the thread to set its affinity. This is
1104         * important for shared interrupt handlers as we do
1105         * not invoke setup_affinity() for the secondary
1106         * handlers as everything is already set up. Even for
1107         * interrupts marked with IRQF_NO_BALANCE this is
1108         * correct as we want the thread to move to the cpu(s)
1109         * on which the requesting code placed the interrupt.
1110         */
1111        set_bit(IRQTF_AFFINITY, &new->thread_flags);
1112        return 0;
1113}
1114
1115/*
1116 * Internal function to register an irqaction - typically used to
1117 * allocate special interrupts that are part of the architecture.
1118 */
1119static int
1120__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1121{
1122        struct irqaction *old, **old_ptr;
1123        unsigned long flags, thread_mask = 0;
1124        int ret, nested, shared = 0;
1125        cpumask_var_t mask;
1126
1127        if (!desc)
1128                return -EINVAL;
1129
1130        if (desc->irq_data.chip == &no_irq_chip)
1131                return -ENOSYS;
1132        if (!try_module_get(desc->owner))
1133                return -ENODEV;
1134
1135        new->irq = irq;
1136
1137        /*
1138         * If the trigger type is not specified by the caller,
1139         * then use the default for this interrupt.
1140         */
1141        if (!(new->flags & IRQF_TRIGGER_MASK))
1142                new->flags |= irqd_get_trigger_type(&desc->irq_data);
1143
1144        /*
1145         * Check whether the interrupt nests into another interrupt
1146         * thread.
1147         */
1148        nested = irq_settings_is_nested_thread(desc);
1149        if (nested) {
1150                if (!new->thread_fn) {
1151                        ret = -EINVAL;
1152                        goto out_mput;
1153                }
1154                /*
1155                 * Replace the primary handler which was provided from
1156                 * the driver for non nested interrupt handling by the
1157                 * dummy function which warns when called.
1158                 */
1159                new->handler = irq_nested_primary_handler;
1160        } else {
1161                if (irq_settings_can_thread(desc)) {
1162                        ret = irq_setup_forced_threading(new);
1163                        if (ret)
1164                                goto out_mput;
1165                }
1166        }
1167
1168        /*
1169         * Create a handler thread when a thread function is supplied
1170         * and the interrupt does not nest into another interrupt
1171         * thread.
1172         */
1173        if (new->thread_fn && !nested) {
1174                ret = setup_irq_thread(new, irq, false);
1175                if (ret)
1176                        goto out_mput;
1177                if (new->secondary) {
1178                        ret = setup_irq_thread(new->secondary, irq, true);
1179                        if (ret)
1180                                goto out_thread;
1181                }
1182        }
1183
1184        if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
1185                ret = -ENOMEM;
1186                goto out_thread;
1187        }
1188
1189        /*
1190         * Drivers are often written to work w/o knowledge about the
1191         * underlying irq chip implementation, so a request for a
1192         * threaded irq without a primary hard irq context handler
1193         * requires the ONESHOT flag to be set. Some irq chips like
1194         * MSI based interrupts are per se one shot safe. Check the
1195         * chip flags, so we can avoid the unmask dance at the end of
1196         * the threaded handler for those.
1197         */
1198        if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1199                new->flags &= ~IRQF_ONESHOT;
1200
1201        /*
1202         * The following block of code has to be executed atomically
1203         */
1204        raw_spin_lock_irqsave(&desc->lock, flags);
1205        old_ptr = &desc->action;
1206        old = *old_ptr;
1207        if (old) {
1208                /*
1209                 * Can't share interrupts unless both agree to and are
1210                 * the same type (level, edge, polarity). So both flag
1211                 * fields must have IRQF_SHARED set and the bits which
1212                 * set the trigger type must match. Also all must
1213                 * agree on ONESHOT.
1214                 */
1215                unsigned int oldtype = irqd_get_trigger_type(&desc->irq_data);
1216
1217                if (!((old->flags & new->flags) & IRQF_SHARED) ||
1218                    (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
1219                    ((old->flags ^ new->flags) & IRQF_ONESHOT))
1220                        goto mismatch;
1221
1222                /* All handlers must agree on per-cpuness */
1223                if ((old->flags & IRQF_PERCPU) !=
1224                    (new->flags & IRQF_PERCPU))
1225                        goto mismatch;
1226
1227                /* add new interrupt at end of irq queue */
1228                do {
1229                        /*
1230                         * Or all existing action->thread_mask bits,
1231                         * so we can find the next zero bit for this
1232                         * new action.
1233                         */
1234                        thread_mask |= old->thread_mask;
1235                        old_ptr = &old->next;
1236                        old = *old_ptr;
1237                } while (old);
1238                shared = 1;
1239        }
1240
1241        /*
1242         * Setup the thread mask for this irqaction for ONESHOT. For
1243         * !ONESHOT irqs the thread mask is 0 so we can avoid a
1244         * conditional in irq_wake_thread().
1245         */
1246        if (new->flags & IRQF_ONESHOT) {
1247                /*
1248                 * Unlikely to have 32 resp 64 irqs sharing one line,
1249                 * but who knows.
1250                 */
1251                if (thread_mask == ~0UL) {
1252                        ret = -EBUSY;
1253                        goto out_mask;
1254                }
1255                /*
1256                 * The thread_mask for the action is or'ed to
1257                 * desc->thread_active to indicate that the
1258                 * IRQF_ONESHOT thread handler has been woken, but not
1259                 * yet finished. The bit is cleared when a thread
1260                 * completes. When all threads of a shared interrupt
1261                 * line have completed desc->threads_active becomes
1262                 * zero and the interrupt line is unmasked. See
1263                 * handle.c:irq_wake_thread() for further information.
1264                 *
1265                 * If no thread is woken by primary (hard irq context)
1266                 * interrupt handlers, then desc->threads_active is
1267                 * also checked for zero to unmask the irq line in the
1268                 * affected hard irq flow handlers
1269                 * (handle_[fasteoi|level]_irq).
1270                 *
1271                 * The new action gets the first zero bit of
1272                 * thread_mask assigned. See the loop above which or's
1273                 * all existing action->thread_mask bits.
1274                 */
1275                new->thread_mask = 1 << ffz(thread_mask);
1276
1277        } else if (new->handler == irq_default_primary_handler &&
1278                   !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1279                /*
1280                 * The interrupt was requested with handler = NULL, so
1281                 * we use the default primary handler for it. But it
1282                 * does not have the oneshot flag set. In combination
1283                 * with level interrupts this is deadly, because the
1284                 * default primary handler just wakes the thread, then
1285                 * the irq lines is reenabled, but the device still
1286                 * has the level irq asserted. Rinse and repeat....
1287                 *
1288                 * While this works for edge type interrupts, we play
1289                 * it safe and reject unconditionally because we can't
1290                 * say for sure which type this interrupt really
1291                 * has. The type flags are unreliable as the
1292                 * underlying chip implementation can override them.
1293                 */
1294                pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1295                       irq);
1296                ret = -EINVAL;
1297                goto out_mask;
1298        }
1299
1300        if (!shared) {
1301                ret = irq_request_resources(desc);
1302                if (ret) {
1303                        pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1304                               new->name, irq, desc->irq_data.chip->name);
1305                        goto out_mask;
1306                }
1307
1308                init_waitqueue_head(&desc->wait_for_threads);
1309
1310                /* Setup the type (level, edge polarity) if configured: */
1311                if (new->flags & IRQF_TRIGGER_MASK) {
1312                        ret = __irq_set_trigger(desc,
1313                                                new->flags & IRQF_TRIGGER_MASK);
1314
1315                        if (ret) {
1316                                irq_release_resources(desc);
1317                                goto out_mask;
1318                        }
1319                }
1320
1321                desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1322                                  IRQS_ONESHOT | IRQS_WAITING);
1323                irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1324
1325                if (new->flags & IRQF_PERCPU) {
1326                        irqd_set(&desc->irq_data, IRQD_PER_CPU);
1327                        irq_settings_set_per_cpu(desc);
1328                }
1329
1330                if (new->flags & IRQF_ONESHOT)
1331                        desc->istate |= IRQS_ONESHOT;
1332
1333                if (irq_settings_can_autoenable(desc))
1334                        irq_startup(desc, true);
1335                else
1336                        /* Undo nested disables: */
1337                        desc->depth = 1;
1338
1339                /* Exclude IRQ from balancing if requested */
1340                if (new->flags & IRQF_NOBALANCING) {
1341                        irq_settings_set_no_balancing(desc);
1342                        irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1343                }
1344
1345                /* Set default affinity mask once everything is setup */
1346                setup_affinity(desc, mask);
1347
1348        } else if (new->flags & IRQF_TRIGGER_MASK) {
1349                unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1350                unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1351
1352                if (nmsk != omsk)
1353                        /* hope the handler works with current  trigger mode */
1354                        pr_warn("irq %d uses trigger mode %u; requested %u\n",
1355                                irq, omsk, nmsk);
1356        }
1357
1358        *old_ptr = new;
1359
1360        irq_pm_install_action(desc, new);
1361
1362        /* Reset broken irq detection when installing new handler */
1363        desc->irq_count = 0;
1364        desc->irqs_unhandled = 0;
1365
1366        /*
1367         * Check whether we disabled the irq via the spurious handler
1368         * before. Reenable it and give it another chance.
1369         */
1370        if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1371                desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1372                __enable_irq(desc);
1373        }
1374
1375        raw_spin_unlock_irqrestore(&desc->lock, flags);
1376
1377        /*
1378         * Strictly no need to wake it up, but hung_task complains
1379         * when no hard interrupt wakes the thread up.
1380         */
1381        if (new->thread)
1382                wake_up_process(new->thread);
1383        if (new->secondary)
1384                wake_up_process(new->secondary->thread);
1385
1386        register_irq_proc(irq, desc);
1387        new->dir = NULL;
1388        register_handler_proc(irq, new);
1389        free_cpumask_var(mask);
1390
1391        return 0;
1392
1393mismatch:
1394        if (!(new->flags & IRQF_PROBE_SHARED)) {
1395                pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1396                       irq, new->flags, new->name, old->flags, old->name);
1397#ifdef CONFIG_DEBUG_SHIRQ
1398                dump_stack();
1399#endif
1400        }
1401        ret = -EBUSY;
1402
1403out_mask:
1404        raw_spin_unlock_irqrestore(&desc->lock, flags);
1405        free_cpumask_var(mask);
1406
1407out_thread:
1408        if (new->thread) {
1409                struct task_struct *t = new->thread;
1410
1411                new->thread = NULL;
1412                kthread_stop(t);
1413                put_task_struct(t);
1414        }
1415        if (new->secondary && new->secondary->thread) {
1416                struct task_struct *t = new->secondary->thread;
1417
1418                new->secondary->thread = NULL;
1419                kthread_stop(t);
1420                put_task_struct(t);
1421        }
1422out_mput:
1423        module_put(desc->owner);
1424        return ret;
1425}
1426
1427/**
1428 *      setup_irq - setup an interrupt
1429 *      @irq: Interrupt line to setup
1430 *      @act: irqaction for the interrupt
1431 *
1432 * Used to statically setup interrupts in the early boot process.
1433 */
1434int setup_irq(unsigned int irq, struct irqaction *act)
1435{
1436        int retval;
1437        struct irq_desc *desc = irq_to_desc(irq);
1438
1439        if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1440                return -EINVAL;
1441
1442        retval = irq_chip_pm_get(&desc->irq_data);
1443        if (retval < 0)
1444                return retval;
1445
1446        chip_bus_lock(desc);
1447        retval = __setup_irq(irq, desc, act);
1448        chip_bus_sync_unlock(desc);
1449
1450        if (retval)
1451                irq_chip_pm_put(&desc->irq_data);
1452
1453        return retval;
1454}
1455EXPORT_SYMBOL_GPL(setup_irq);
1456
1457/*
1458 * Internal function to unregister an irqaction - used to free
1459 * regular and special interrupts that are part of the architecture.
1460 */
1461static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1462{
1463        struct irq_desc *desc = irq_to_desc(irq);
1464        struct irqaction *action, **action_ptr;
1465        unsigned long flags;
1466
1467        WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1468
1469        if (!desc)
1470                return NULL;
1471
1472        chip_bus_lock(desc);
1473        raw_spin_lock_irqsave(&desc->lock, flags);
1474
1475        /*
1476         * There can be multiple actions per IRQ descriptor, find the right
1477         * one based on the dev_id:
1478         */
1479        action_ptr = &desc->action;
1480        for (;;) {
1481                action = *action_ptr;
1482
1483                if (!action) {
1484                        WARN(1, "Trying to free already-free IRQ %d\n", irq);
1485                        raw_spin_unlock_irqrestore(&desc->lock, flags);
1486                        chip_bus_sync_unlock(desc);
1487                        return NULL;
1488                }
1489
1490                if (action->dev_id == dev_id)
1491                        break;
1492                action_ptr = &action->next;
1493        }
1494
1495        /* Found it - now remove it from the list of entries: */
1496        *action_ptr = action->next;
1497
1498        irq_pm_remove_action(desc, action);
1499
1500        /* If this was the last handler, shut down the IRQ line: */
1501        if (!desc->action) {
1502                irq_settings_clr_disable_unlazy(desc);
1503                irq_shutdown(desc);
1504                irq_release_resources(desc);
1505        }
1506
1507#ifdef CONFIG_SMP
1508        /* make sure affinity_hint is cleaned up */
1509        if (WARN_ON_ONCE(desc->affinity_hint))
1510                desc->affinity_hint = NULL;
1511#endif
1512
1513        raw_spin_unlock_irqrestore(&desc->lock, flags);
1514        chip_bus_sync_unlock(desc);
1515
1516        unregister_handler_proc(irq, action);
1517
1518        /* Make sure it's not being used on another CPU: */
1519        synchronize_irq(irq);
1520
1521#ifdef CONFIG_DEBUG_SHIRQ
1522        /*
1523         * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1524         * event to happen even now it's being freed, so let's make sure that
1525         * is so by doing an extra call to the handler ....
1526         *
1527         * ( We do this after actually deregistering it, to make sure that a
1528         *   'real' IRQ doesn't run in * parallel with our fake. )
1529         */
1530        if (action->flags & IRQF_SHARED) {
1531                local_irq_save(flags);
1532                action->handler(irq, dev_id);
1533                local_irq_restore(flags);
1534        }
1535#endif
1536
1537        if (action->thread) {
1538                kthread_stop(action->thread);
1539                put_task_struct(action->thread);
1540                if (action->secondary && action->secondary->thread) {
1541                        kthread_stop(action->secondary->thread);
1542                        put_task_struct(action->secondary->thread);
1543                }
1544        }
1545
1546        irq_chip_pm_put(&desc->irq_data);
1547        module_put(desc->owner);
1548        kfree(action->secondary);
1549        return action;
1550}
1551
1552/**
1553 *      remove_irq - free an interrupt
1554 *      @irq: Interrupt line to free
1555 *      @act: irqaction for the interrupt
1556 *
1557 * Used to remove interrupts statically setup by the early boot process.
1558 */
1559void remove_irq(unsigned int irq, struct irqaction *act)
1560{
1561        struct irq_desc *desc = irq_to_desc(irq);
1562
1563        if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1564                __free_irq(irq, act->dev_id);
1565}
1566EXPORT_SYMBOL_GPL(remove_irq);
1567
1568/**
1569 *      free_irq - free an interrupt allocated with request_irq
1570 *      @irq: Interrupt line to free
1571 *      @dev_id: Device identity to free
1572 *
1573 *      Remove an interrupt handler. The handler is removed and if the
1574 *      interrupt line is no longer in use by any driver it is disabled.
1575 *      On a shared IRQ the caller must ensure the interrupt is disabled
1576 *      on the card it drives before calling this function. The function
1577 *      does not return until any executing interrupts for this IRQ
1578 *      have completed.
1579 *
1580 *      This function must not be called from interrupt context.
1581 *
1582 *      Returns the devname argument passed to request_irq.
1583 */
1584const void *free_irq(unsigned int irq, void *dev_id)
1585{
1586        struct irq_desc *desc = irq_to_desc(irq);
1587        struct irqaction *action;
1588        const char *devname;
1589
1590        if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1591                return NULL;
1592
1593#ifdef CONFIG_SMP
1594        if (WARN_ON(desc->affinity_notify))
1595                desc->affinity_notify = NULL;
1596#endif
1597
1598        action = __free_irq(irq, dev_id);
1599        devname = action->name;
1600        kfree(action);
1601        return devname;
1602}
1603EXPORT_SYMBOL(free_irq);
1604
1605/**
1606 *      request_threaded_irq - allocate an interrupt line
1607 *      @irq: Interrupt line to allocate
1608 *      @handler: Function to be called when the IRQ occurs.
1609 *                Primary handler for threaded interrupts
1610 *                If NULL and thread_fn != NULL the default
1611 *                primary handler is installed
1612 *      @thread_fn: Function called from the irq handler thread
1613 *                  If NULL, no irq thread is created
1614 *      @irqflags: Interrupt type flags
1615 *      @devname: An ascii name for the claiming device
1616 *      @dev_id: A cookie passed back to the handler function
1617 *
1618 *      This call allocates interrupt resources and enables the
1619 *      interrupt line and IRQ handling. From the point this
1620 *      call is made your handler function may be invoked. Since
1621 *      your handler function must clear any interrupt the board
1622 *      raises, you must take care both to initialise your hardware
1623 *      and to set up the interrupt handler in the right order.
1624 *
1625 *      If you want to set up a threaded irq handler for your device
1626 *      then you need to supply @handler and @thread_fn. @handler is
1627 *      still called in hard interrupt context and has to check
1628 *      whether the interrupt originates from the device. If yes it
1629 *      needs to disable the interrupt on the device and return
1630 *      IRQ_WAKE_THREAD which will wake up the handler thread and run
1631 *      @thread_fn. This split handler design is necessary to support
1632 *      shared interrupts.
1633 *
1634 *      Dev_id must be globally unique. Normally the address of the
1635 *      device data structure is used as the cookie. Since the handler
1636 *      receives this value it makes sense to use it.
1637 *
1638 *      If your interrupt is shared you must pass a non NULL dev_id
1639 *      as this is required when freeing the interrupt.
1640 *
1641 *      Flags:
1642 *
1643 *      IRQF_SHARED             Interrupt is shared
1644 *      IRQF_TRIGGER_*          Specify active edge(s) or level
1645 *
1646 */
1647int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1648                         irq_handler_t thread_fn, unsigned long irqflags,
1649                         const char *devname, void *dev_id)
1650{
1651        struct irqaction *action;
1652        struct irq_desc *desc;
1653        int retval;
1654
1655        if (irq == IRQ_NOTCONNECTED)
1656                return -ENOTCONN;
1657
1658        /*
1659         * Sanity-check: shared interrupts must pass in a real dev-ID,
1660         * otherwise we'll have trouble later trying to figure out
1661         * which interrupt is which (messes up the interrupt freeing
1662         * logic etc).
1663         *
1664         * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
1665         * it cannot be set along with IRQF_NO_SUSPEND.
1666         */
1667        if (((irqflags & IRQF_SHARED) && !dev_id) ||
1668            (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
1669            ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
1670                return -EINVAL;
1671
1672        desc = irq_to_desc(irq);
1673        if (!desc)
1674                return -EINVAL;
1675
1676        if (!irq_settings_can_request(desc) ||
1677            WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1678                return -EINVAL;
1679
1680        if (!handler) {
1681                if (!thread_fn)
1682                        return -EINVAL;
1683                handler = irq_default_primary_handler;
1684        }
1685
1686        action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1687        if (!action)
1688                return -ENOMEM;
1689
1690        action->handler = handler;
1691        action->thread_fn = thread_fn;
1692        action->flags = irqflags;
1693        action->name = devname;
1694        action->dev_id = dev_id;
1695
1696        retval = irq_chip_pm_get(&desc->irq_data);
1697        if (retval < 0) {
1698                kfree(action);
1699                return retval;
1700        }
1701
1702        chip_bus_lock(desc);
1703        retval = __setup_irq(irq, desc, action);
1704        chip_bus_sync_unlock(desc);
1705
1706        if (retval) {
1707                irq_chip_pm_put(&desc->irq_data);
1708                kfree(action->secondary);
1709                kfree(action);
1710        }
1711
1712#ifdef CONFIG_DEBUG_SHIRQ_FIXME
1713        if (!retval && (irqflags & IRQF_SHARED)) {
1714                /*
1715                 * It's a shared IRQ -- the driver ought to be prepared for it
1716                 * to happen immediately, so let's make sure....
1717                 * We disable the irq to make sure that a 'real' IRQ doesn't
1718                 * run in parallel with our fake.
1719                 */
1720                unsigned long flags;
1721
1722                disable_irq(irq);
1723                local_irq_save(flags);
1724
1725                handler(irq, dev_id);
1726
1727                local_irq_restore(flags);
1728                enable_irq(irq);
1729        }
1730#endif
1731        return retval;
1732}
1733EXPORT_SYMBOL(request_threaded_irq);
1734
1735/**
1736 *      request_any_context_irq - allocate an interrupt line
1737 *      @irq: Interrupt line to allocate
1738 *      @handler: Function to be called when the IRQ occurs.
1739 *                Threaded handler for threaded interrupts.
1740 *      @flags: Interrupt type flags
1741 *      @name: An ascii name for the claiming device
1742 *      @dev_id: A cookie passed back to the handler function
1743 *
1744 *      This call allocates interrupt resources and enables the
1745 *      interrupt line and IRQ handling. It selects either a
1746 *      hardirq or threaded handling method depending on the
1747 *      context.
1748 *
1749 *      On failure, it returns a negative value. On success,
1750 *      it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1751 */
1752int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1753                            unsigned long flags, const char *name, void *dev_id)
1754{
1755        struct irq_desc *desc;
1756        int ret;
1757
1758        if (irq == IRQ_NOTCONNECTED)
1759                return -ENOTCONN;
1760
1761        desc = irq_to_desc(irq);
1762        if (!desc)
1763                return -EINVAL;
1764
1765        if (irq_settings_is_nested_thread(desc)) {
1766                ret = request_threaded_irq(irq, NULL, handler,
1767                                           flags, name, dev_id);
1768                return !ret ? IRQC_IS_NESTED : ret;
1769        }
1770
1771        ret = request_irq(irq, handler, flags, name, dev_id);
1772        return !ret ? IRQC_IS_HARDIRQ : ret;
1773}
1774EXPORT_SYMBOL_GPL(request_any_context_irq);
1775
1776void enable_percpu_irq(unsigned int irq, unsigned int type)
1777{
1778        unsigned int cpu = smp_processor_id();
1779        unsigned long flags;
1780        struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1781
1782        if (!desc)
1783                return;
1784
1785        /*
1786         * If the trigger type is not specified by the caller, then
1787         * use the default for this interrupt.
1788         */
1789        type &= IRQ_TYPE_SENSE_MASK;
1790        if (type == IRQ_TYPE_NONE)
1791                type = irqd_get_trigger_type(&desc->irq_data);
1792
1793        if (type != IRQ_TYPE_NONE) {
1794                int ret;
1795
1796                ret = __irq_set_trigger(desc, type);
1797
1798                if (ret) {
1799                        WARN(1, "failed to set type for IRQ%d\n", irq);
1800                        goto out;
1801                }
1802        }
1803
1804        irq_percpu_enable(desc, cpu);
1805out:
1806        irq_put_desc_unlock(desc, flags);
1807}
1808EXPORT_SYMBOL_GPL(enable_percpu_irq);
1809
1810/**
1811 * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
1812 * @irq:        Linux irq number to check for
1813 *
1814 * Must be called from a non migratable context. Returns the enable
1815 * state of a per cpu interrupt on the current cpu.
1816 */
1817bool irq_percpu_is_enabled(unsigned int irq)
1818{
1819        unsigned int cpu = smp_processor_id();
1820        struct irq_desc *desc;
1821        unsigned long flags;
1822        bool is_enabled;
1823
1824        desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1825        if (!desc)
1826                return false;
1827
1828        is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
1829        irq_put_desc_unlock(desc, flags);
1830
1831        return is_enabled;
1832}
1833EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
1834
1835void disable_percpu_irq(unsigned int irq)
1836{
1837        unsigned int cpu = smp_processor_id();
1838        unsigned long flags;
1839        struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1840
1841        if (!desc)
1842                return;
1843
1844        irq_percpu_disable(desc, cpu);
1845        irq_put_desc_unlock(desc, flags);
1846}
1847EXPORT_SYMBOL_GPL(disable_percpu_irq);
1848
1849/*
1850 * Internal function to unregister a percpu irqaction.
1851 */
1852static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1853{
1854        struct irq_desc *desc = irq_to_desc(irq);
1855        struct irqaction *action;
1856        unsigned long flags;
1857
1858        WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1859
1860        if (!desc)
1861                return NULL;
1862
1863        raw_spin_lock_irqsave(&desc->lock, flags);
1864
1865        action = desc->action;
1866        if (!action || action->percpu_dev_id != dev_id) {
1867                WARN(1, "Trying to free already-free IRQ %d\n", irq);
1868                goto bad;
1869        }
1870
1871        if (!cpumask_empty(desc->percpu_enabled)) {
1872                WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
1873                     irq, cpumask_first(desc->percpu_enabled));
1874                goto bad;
1875        }
1876
1877        /* Found it - now remove it from the list of entries: */
1878        desc->action = NULL;
1879
1880        raw_spin_unlock_irqrestore(&desc->lock, flags);
1881
1882        unregister_handler_proc(irq, action);
1883
1884        irq_chip_pm_put(&desc->irq_data);
1885        module_put(desc->owner);
1886        return action;
1887
1888bad:
1889        raw_spin_unlock_irqrestore(&desc->lock, flags);
1890        return NULL;
1891}
1892
1893/**
1894 *      remove_percpu_irq - free a per-cpu interrupt
1895 *      @irq: Interrupt line to free
1896 *      @act: irqaction for the interrupt
1897 *
1898 * Used to remove interrupts statically setup by the early boot process.
1899 */
1900void remove_percpu_irq(unsigned int irq, struct irqaction *act)
1901{
1902        struct irq_desc *desc = irq_to_desc(irq);
1903
1904        if (desc && irq_settings_is_per_cpu_devid(desc))
1905            __free_percpu_irq(irq, act->percpu_dev_id);
1906}
1907
1908/**
1909 *      free_percpu_irq - free an interrupt allocated with request_percpu_irq
1910 *      @irq: Interrupt line to free
1911 *      @dev_id: Device identity to free
1912 *
1913 *      Remove a percpu interrupt handler. The handler is removed, but
1914 *      the interrupt line is not disabled. This must be done on each
1915 *      CPU before calling this function. The function does not return
1916 *      until any executing interrupts for this IRQ have completed.
1917 *
1918 *      This function must not be called from interrupt context.
1919 */
1920void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1921{
1922        struct irq_desc *desc = irq_to_desc(irq);
1923
1924        if (!desc || !irq_settings_is_per_cpu_devid(desc))
1925                return;
1926
1927        chip_bus_lock(desc);
1928        kfree(__free_percpu_irq(irq, dev_id));
1929        chip_bus_sync_unlock(desc);
1930}
1931EXPORT_SYMBOL_GPL(free_percpu_irq);
1932
1933/**
1934 *      setup_percpu_irq - setup a per-cpu interrupt
1935 *      @irq: Interrupt line to setup
1936 *      @act: irqaction for the interrupt
1937 *
1938 * Used to statically setup per-cpu interrupts in the early boot process.
1939 */
1940int setup_percpu_irq(unsigned int irq, struct irqaction *act)
1941{
1942        struct irq_desc *desc = irq_to_desc(irq);
1943        int retval;
1944
1945        if (!desc || !irq_settings_is_per_cpu_devid(desc))
1946                return -EINVAL;
1947
1948        retval = irq_chip_pm_get(&desc->irq_data);
1949        if (retval < 0)
1950                return retval;
1951
1952        chip_bus_lock(desc);
1953        retval = __setup_irq(irq, desc, act);
1954        chip_bus_sync_unlock(desc);
1955
1956        if (retval)
1957                irq_chip_pm_put(&desc->irq_data);
1958
1959        return retval;
1960}
1961
1962/**
1963 *      request_percpu_irq - allocate a percpu interrupt line
1964 *      @irq: Interrupt line to allocate
1965 *      @handler: Function to be called when the IRQ occurs.
1966 *      @devname: An ascii name for the claiming device
1967 *      @dev_id: A percpu cookie passed back to the handler function
1968 *
1969 *      This call allocates interrupt resources and enables the
1970 *      interrupt on the local CPU. If the interrupt is supposed to be
1971 *      enabled on other CPUs, it has to be done on each CPU using
1972 *      enable_percpu_irq().
1973 *
1974 *      Dev_id must be globally unique. It is a per-cpu variable, and
1975 *      the handler gets called with the interrupted CPU's instance of
1976 *      that variable.
1977 */
1978int request_percpu_irq(unsigned int irq, irq_handler_t handler,
1979                       const char *devname, void __percpu *dev_id)
1980{
1981        struct irqaction *action;
1982        struct irq_desc *desc;
1983        int retval;
1984
1985        if (!dev_id)
1986                return -EINVAL;
1987
1988        desc = irq_to_desc(irq);
1989        if (!desc || !irq_settings_can_request(desc) ||
1990            !irq_settings_is_per_cpu_devid(desc))
1991                return -EINVAL;
1992
1993        action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1994        if (!action)
1995                return -ENOMEM;
1996
1997        action->handler = handler;
1998        action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
1999        action->name = devname;
2000        action->percpu_dev_id = dev_id;
2001
2002        retval = irq_chip_pm_get(&desc->irq_data);
2003        if (retval < 0) {
2004                kfree(action);
2005                return retval;
2006        }
2007
2008        chip_bus_lock(desc);
2009        retval = __setup_irq(irq, desc, action);
2010        chip_bus_sync_unlock(desc);
2011
2012        if (retval) {
2013                irq_chip_pm_put(&desc->irq_data);
2014                kfree(action);
2015        }
2016
2017        return retval;
2018}
2019EXPORT_SYMBOL_GPL(request_percpu_irq);
2020
2021/**
2022 *      irq_get_irqchip_state - returns the irqchip state of a interrupt.
2023 *      @irq: Interrupt line that is forwarded to a VM
2024 *      @which: One of IRQCHIP_STATE_* the caller wants to know about
2025 *      @state: a pointer to a boolean where the state is to be storeed
2026 *
2027 *      This call snapshots the internal irqchip state of an
2028 *      interrupt, returning into @state the bit corresponding to
2029 *      stage @which
2030 *
2031 *      This function should be called with preemption disabled if the
2032 *      interrupt controller has per-cpu registers.
2033 */
2034int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2035                          bool *state)
2036{
2037        struct irq_desc *desc;
2038        struct irq_data *data;
2039        struct irq_chip *chip;
2040        unsigned long flags;
2041        int err = -EINVAL;
2042
2043        desc = irq_get_desc_buslock(irq, &flags, 0);
2044        if (!desc)
2045                return err;
2046
2047        data = irq_desc_get_irq_data(desc);
2048
2049        do {
2050                chip = irq_data_get_irq_chip(data);
2051                if (chip->irq_get_irqchip_state)
2052                        break;
2053#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2054                data = data->parent_data;
2055#else
2056                data = NULL;
2057#endif
2058        } while (data);
2059
2060        if (data)
2061                err = chip->irq_get_irqchip_state(data, which, state);
2062
2063        irq_put_desc_busunlock(desc, flags);
2064        return err;
2065}
2066EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
2067
2068/**
2069 *      irq_set_irqchip_state - set the state of a forwarded interrupt.
2070 *      @irq: Interrupt line that is forwarded to a VM
2071 *      @which: State to be restored (one of IRQCHIP_STATE_*)
2072 *      @val: Value corresponding to @which
2073 *
2074 *      This call sets the internal irqchip state of an interrupt,
2075 *      depending on the value of @which.
2076 *
2077 *      This function should be called with preemption disabled if the
2078 *      interrupt controller has per-cpu registers.
2079 */
2080int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2081                          bool val)
2082{
2083        struct irq_desc *desc;
2084        struct irq_data *data;
2085        struct irq_chip *chip;
2086        unsigned long flags;
2087        int err = -EINVAL;
2088
2089        desc = irq_get_desc_buslock(irq, &flags, 0);
2090        if (!desc)
2091                return err;
2092
2093        data = irq_desc_get_irq_data(desc);
2094
2095        do {
2096                chip = irq_data_get_irq_chip(data);
2097                if (chip->irq_set_irqchip_state)
2098                        break;
2099#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2100                data = data->parent_data;
2101#else
2102                data = NULL;
2103#endif
2104        } while (data);
2105
2106        if (data)
2107                err = chip->irq_set_irqchip_state(data, which, val);
2108
2109        irq_put_desc_busunlock(desc, flags);
2110        return err;
2111}
2112EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
2113