linux/kernel/irq/chip.c
<<
>>
Prefs
   1/*
   2 * linux/kernel/irq/chip.c
   3 *
   4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
   5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
   6 *
   7 * This file contains the core interrupt handling code, for irq-chip
   8 * based architectures.
   9 *
  10 * Detailed information is available in Documentation/core-api/genericirq.rst
  11 */
  12
  13#include <linux/irq.h>
  14#include <linux/msi.h>
  15#include <linux/module.h>
  16#include <linux/interrupt.h>
  17#include <linux/kernel_stat.h>
  18#include <linux/irqdomain.h>
  19
  20#include <trace/events/irq.h>
  21
  22#include "internals.h"
  23
  24static irqreturn_t bad_chained_irq(int irq, void *dev_id)
  25{
  26        WARN_ONCE(1, "Chained irq %d should not call an action\n", irq);
  27        return IRQ_NONE;
  28}
  29
  30/*
  31 * Chained handlers should never call action on their IRQ. This default
  32 * action will emit warning if such thing happens.
  33 */
  34struct irqaction chained_action = {
  35        .handler = bad_chained_irq,
  36};
  37
  38/**
  39 *      irq_set_chip - set the irq chip for an irq
  40 *      @irq:   irq number
  41 *      @chip:  pointer to irq chip description structure
  42 */
  43int irq_set_chip(unsigned int irq, struct irq_chip *chip)
  44{
  45        unsigned long flags;
  46        struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
  47
  48        if (!desc)
  49                return -EINVAL;
  50
  51        if (!chip)
  52                chip = &no_irq_chip;
  53
  54        desc->irq_data.chip = chip;
  55        irq_put_desc_unlock(desc, flags);
  56        /*
  57         * For !CONFIG_SPARSE_IRQ make the irq show up in
  58         * allocated_irqs.
  59         */
  60        irq_mark_irq(irq);
  61        return 0;
  62}
  63EXPORT_SYMBOL(irq_set_chip);
  64
  65/**
  66 *      irq_set_type - set the irq trigger type for an irq
  67 *      @irq:   irq number
  68 *      @type:  IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
  69 */
  70int irq_set_irq_type(unsigned int irq, unsigned int type)
  71{
  72        unsigned long flags;
  73        struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
  74        int ret = 0;
  75
  76        if (!desc)
  77                return -EINVAL;
  78
  79        ret = __irq_set_trigger(desc, type);
  80        irq_put_desc_busunlock(desc, flags);
  81        return ret;
  82}
  83EXPORT_SYMBOL(irq_set_irq_type);
  84
  85/**
  86 *      irq_set_handler_data - set irq handler data for an irq
  87 *      @irq:   Interrupt number
  88 *      @data:  Pointer to interrupt specific data
  89 *
  90 *      Set the hardware irq controller data for an irq
  91 */
  92int irq_set_handler_data(unsigned int irq, void *data)
  93{
  94        unsigned long flags;
  95        struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
  96
  97        if (!desc)
  98                return -EINVAL;
  99        desc->irq_common_data.handler_data = data;
 100        irq_put_desc_unlock(desc, flags);
 101        return 0;
 102}
 103EXPORT_SYMBOL(irq_set_handler_data);
 104
 105/**
 106 *      irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
 107 *      @irq_base:      Interrupt number base
 108 *      @irq_offset:    Interrupt number offset
 109 *      @entry:         Pointer to MSI descriptor data
 110 *
 111 *      Set the MSI descriptor entry for an irq at offset
 112 */
 113int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
 114                         struct msi_desc *entry)
 115{
 116        unsigned long flags;
 117        struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 118
 119        if (!desc)
 120                return -EINVAL;
 121        desc->irq_common_data.msi_desc = entry;
 122        if (entry && !irq_offset)
 123                entry->irq = irq_base;
 124        irq_put_desc_unlock(desc, flags);
 125        return 0;
 126}
 127
 128/**
 129 *      irq_set_msi_desc - set MSI descriptor data for an irq
 130 *      @irq:   Interrupt number
 131 *      @entry: Pointer to MSI descriptor data
 132 *
 133 *      Set the MSI descriptor entry for an irq
 134 */
 135int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
 136{
 137        return irq_set_msi_desc_off(irq, 0, entry);
 138}
 139
 140/**
 141 *      irq_set_chip_data - set irq chip data for an irq
 142 *      @irq:   Interrupt number
 143 *      @data:  Pointer to chip specific data
 144 *
 145 *      Set the hardware irq chip data for an irq
 146 */
 147int irq_set_chip_data(unsigned int irq, void *data)
 148{
 149        unsigned long flags;
 150        struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
 151
 152        if (!desc)
 153                return -EINVAL;
 154        desc->irq_data.chip_data = data;
 155        irq_put_desc_unlock(desc, flags);
 156        return 0;
 157}
 158EXPORT_SYMBOL(irq_set_chip_data);
 159
 160struct irq_data *irq_get_irq_data(unsigned int irq)
 161{
 162        struct irq_desc *desc = irq_to_desc(irq);
 163
 164        return desc ? &desc->irq_data : NULL;
 165}
 166EXPORT_SYMBOL_GPL(irq_get_irq_data);
 167
 168static void irq_state_clr_disabled(struct irq_desc *desc)
 169{
 170        irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
 171}
 172
 173static void irq_state_clr_masked(struct irq_desc *desc)
 174{
 175        irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
 176}
 177
 178static void irq_state_clr_started(struct irq_desc *desc)
 179{
 180        irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED);
 181}
 182
 183static void irq_state_set_started(struct irq_desc *desc)
 184{
 185        irqd_set(&desc->irq_data, IRQD_IRQ_STARTED);
 186}
 187
 188enum {
 189        IRQ_STARTUP_NORMAL,
 190        IRQ_STARTUP_MANAGED,
 191        IRQ_STARTUP_ABORT,
 192};
 193
 194#ifdef CONFIG_SMP
 195static int
 196__irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
 197{
 198        struct irq_data *d = irq_desc_get_irq_data(desc);
 199
 200        if (!irqd_affinity_is_managed(d))
 201                return IRQ_STARTUP_NORMAL;
 202
 203        irqd_clr_managed_shutdown(d);
 204
 205        if (cpumask_any_and(aff, cpu_online_mask) >= nr_cpu_ids) {
 206                /*
 207                 * Catch code which fiddles with enable_irq() on a managed
 208                 * and potentially shutdown IRQ. Chained interrupt
 209                 * installment or irq auto probing should not happen on
 210                 * managed irqs either. Emit a warning, break the affinity
 211                 * and start it up as a normal interrupt.
 212                 */
 213                if (WARN_ON_ONCE(force))
 214                        return IRQ_STARTUP_NORMAL;
 215                /*
 216                 * The interrupt was requested, but there is no online CPU
 217                 * in it's affinity mask. Put it into managed shutdown
 218                 * state and let the cpu hotplug mechanism start it up once
 219                 * a CPU in the mask becomes available.
 220                 */
 221                irqd_set_managed_shutdown(d);
 222                return IRQ_STARTUP_ABORT;
 223        }
 224        return IRQ_STARTUP_MANAGED;
 225}
 226#else
 227static __always_inline int
 228__irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
 229{
 230        return IRQ_STARTUP_NORMAL;
 231}
 232#endif
 233
 234static int __irq_startup(struct irq_desc *desc)
 235{
 236        struct irq_data *d = irq_desc_get_irq_data(desc);
 237        int ret = 0;
 238
 239        irq_domain_activate_irq(d);
 240        if (d->chip->irq_startup) {
 241                ret = d->chip->irq_startup(d);
 242                irq_state_clr_disabled(desc);
 243                irq_state_clr_masked(desc);
 244        } else {
 245                irq_enable(desc);
 246        }
 247        irq_state_set_started(desc);
 248        return ret;
 249}
 250
 251int irq_startup(struct irq_desc *desc, bool resend, bool force)
 252{
 253        struct irq_data *d = irq_desc_get_irq_data(desc);
 254        struct cpumask *aff = irq_data_get_affinity_mask(d);
 255        int ret = 0;
 256
 257        desc->depth = 0;
 258
 259        if (irqd_is_started(d)) {
 260                irq_enable(desc);
 261        } else {
 262                switch (__irq_startup_managed(desc, aff, force)) {
 263                case IRQ_STARTUP_NORMAL:
 264                        ret = __irq_startup(desc);
 265                        irq_setup_affinity(desc);
 266                        break;
 267                case IRQ_STARTUP_MANAGED:
 268                        irq_do_set_affinity(d, aff, false);
 269                        ret = __irq_startup(desc);
 270                        break;
 271                case IRQ_STARTUP_ABORT:
 272                        return 0;
 273                }
 274        }
 275        if (resend)
 276                check_irq_resend(desc);
 277
 278        return ret;
 279}
 280
 281static void __irq_disable(struct irq_desc *desc, bool mask);
 282
 283void irq_shutdown(struct irq_desc *desc)
 284{
 285        if (irqd_is_started(&desc->irq_data)) {
 286                desc->depth = 1;
 287                if (desc->irq_data.chip->irq_shutdown) {
 288                        desc->irq_data.chip->irq_shutdown(&desc->irq_data);
 289                        irq_state_set_disabled(desc);
 290                        irq_state_set_masked(desc);
 291                } else {
 292                        __irq_disable(desc, true);
 293                }
 294                irq_state_clr_started(desc);
 295        }
 296        /*
 297         * This must be called even if the interrupt was never started up,
 298         * because the activation can happen before the interrupt is
 299         * available for request/startup. It has it's own state tracking so
 300         * it's safe to call it unconditionally.
 301         */
 302        irq_domain_deactivate_irq(&desc->irq_data);
 303}
 304
 305void irq_enable(struct irq_desc *desc)
 306{
 307        if (!irqd_irq_disabled(&desc->irq_data)) {
 308                unmask_irq(desc);
 309        } else {
 310                irq_state_clr_disabled(desc);
 311                if (desc->irq_data.chip->irq_enable) {
 312                        desc->irq_data.chip->irq_enable(&desc->irq_data);
 313                        irq_state_clr_masked(desc);
 314                } else {
 315                        unmask_irq(desc);
 316                }
 317        }
 318}
 319
 320static void __irq_disable(struct irq_desc *desc, bool mask)
 321{
 322        if (irqd_irq_disabled(&desc->irq_data)) {
 323                if (mask)
 324                        mask_irq(desc);
 325        } else {
 326                irq_state_set_disabled(desc);
 327                if (desc->irq_data.chip->irq_disable) {
 328                        desc->irq_data.chip->irq_disable(&desc->irq_data);
 329                        irq_state_set_masked(desc);
 330                } else if (mask) {
 331                        mask_irq(desc);
 332                }
 333        }
 334}
 335
 336/**
 337 * irq_disable - Mark interrupt disabled
 338 * @desc:       irq descriptor which should be disabled
 339 *
 340 * If the chip does not implement the irq_disable callback, we
 341 * use a lazy disable approach. That means we mark the interrupt
 342 * disabled, but leave the hardware unmasked. That's an
 343 * optimization because we avoid the hardware access for the
 344 * common case where no interrupt happens after we marked it
 345 * disabled. If an interrupt happens, then the interrupt flow
 346 * handler masks the line at the hardware level and marks it
 347 * pending.
 348 *
 349 * If the interrupt chip does not implement the irq_disable callback,
 350 * a driver can disable the lazy approach for a particular irq line by
 351 * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can
 352 * be used for devices which cannot disable the interrupt at the
 353 * device level under certain circumstances and have to use
 354 * disable_irq[_nosync] instead.
 355 */
 356void irq_disable(struct irq_desc *desc)
 357{
 358        __irq_disable(desc, irq_settings_disable_unlazy(desc));
 359}
 360
 361void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
 362{
 363        if (desc->irq_data.chip->irq_enable)
 364                desc->irq_data.chip->irq_enable(&desc->irq_data);
 365        else
 366                desc->irq_data.chip->irq_unmask(&desc->irq_data);
 367        cpumask_set_cpu(cpu, desc->percpu_enabled);
 368}
 369
 370void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
 371{
 372        if (desc->irq_data.chip->irq_disable)
 373                desc->irq_data.chip->irq_disable(&desc->irq_data);
 374        else
 375                desc->irq_data.chip->irq_mask(&desc->irq_data);
 376        cpumask_clear_cpu(cpu, desc->percpu_enabled);
 377}
 378
 379static inline void mask_ack_irq(struct irq_desc *desc)
 380{
 381        if (desc->irq_data.chip->irq_mask_ack) {
 382                desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
 383                irq_state_set_masked(desc);
 384        } else {
 385                mask_irq(desc);
 386                if (desc->irq_data.chip->irq_ack)
 387                        desc->irq_data.chip->irq_ack(&desc->irq_data);
 388        }
 389}
 390
 391void mask_irq(struct irq_desc *desc)
 392{
 393        if (irqd_irq_masked(&desc->irq_data))
 394                return;
 395
 396        if (desc->irq_data.chip->irq_mask) {
 397                desc->irq_data.chip->irq_mask(&desc->irq_data);
 398                irq_state_set_masked(desc);
 399        }
 400}
 401
 402void unmask_irq(struct irq_desc *desc)
 403{
 404        if (!irqd_irq_masked(&desc->irq_data))
 405                return;
 406
 407        if (desc->irq_data.chip->irq_unmask) {
 408                desc->irq_data.chip->irq_unmask(&desc->irq_data);
 409                irq_state_clr_masked(desc);
 410        }
 411}
 412
 413void unmask_threaded_irq(struct irq_desc *desc)
 414{
 415        struct irq_chip *chip = desc->irq_data.chip;
 416
 417        if (chip->flags & IRQCHIP_EOI_THREADED)
 418                chip->irq_eoi(&desc->irq_data);
 419
 420        unmask_irq(desc);
 421}
 422
 423/*
 424 *      handle_nested_irq - Handle a nested irq from a irq thread
 425 *      @irq:   the interrupt number
 426 *
 427 *      Handle interrupts which are nested into a threaded interrupt
 428 *      handler. The handler function is called inside the calling
 429 *      threads context.
 430 */
 431void handle_nested_irq(unsigned int irq)
 432{
 433        struct irq_desc *desc = irq_to_desc(irq);
 434        struct irqaction *action;
 435        irqreturn_t action_ret;
 436
 437        might_sleep();
 438
 439        raw_spin_lock_irq(&desc->lock);
 440
 441        desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 442
 443        action = desc->action;
 444        if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
 445                desc->istate |= IRQS_PENDING;
 446                goto out_unlock;
 447        }
 448
 449        kstat_incr_irqs_this_cpu(desc);
 450        irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
 451        raw_spin_unlock_irq(&desc->lock);
 452
 453        action_ret = IRQ_NONE;
 454        for_each_action_of_desc(desc, action)
 455                action_ret |= action->thread_fn(action->irq, action->dev_id);
 456
 457        if (!noirqdebug)
 458                note_interrupt(desc, action_ret);
 459
 460        raw_spin_lock_irq(&desc->lock);
 461        irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
 462
 463out_unlock:
 464        raw_spin_unlock_irq(&desc->lock);
 465}
 466EXPORT_SYMBOL_GPL(handle_nested_irq);
 467
 468static bool irq_check_poll(struct irq_desc *desc)
 469{
 470        if (!(desc->istate & IRQS_POLL_INPROGRESS))
 471                return false;
 472        return irq_wait_for_poll(desc);
 473}
 474
 475static bool irq_may_run(struct irq_desc *desc)
 476{
 477        unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED;
 478
 479        /*
 480         * If the interrupt is not in progress and is not an armed
 481         * wakeup interrupt, proceed.
 482         */
 483        if (!irqd_has_set(&desc->irq_data, mask))
 484                return true;
 485
 486        /*
 487         * If the interrupt is an armed wakeup source, mark it pending
 488         * and suspended, disable it and notify the pm core about the
 489         * event.
 490         */
 491        if (irq_pm_check_wakeup(desc))
 492                return false;
 493
 494        /*
 495         * Handle a potential concurrent poll on a different core.
 496         */
 497        return irq_check_poll(desc);
 498}
 499
 500/**
 501 *      handle_simple_irq - Simple and software-decoded IRQs.
 502 *      @desc:  the interrupt description structure for this irq
 503 *
 504 *      Simple interrupts are either sent from a demultiplexing interrupt
 505 *      handler or come from hardware, where no interrupt hardware control
 506 *      is necessary.
 507 *
 508 *      Note: The caller is expected to handle the ack, clear, mask and
 509 *      unmask issues if necessary.
 510 */
 511void handle_simple_irq(struct irq_desc *desc)
 512{
 513        raw_spin_lock(&desc->lock);
 514
 515        if (!irq_may_run(desc))
 516                goto out_unlock;
 517
 518        desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 519
 520        if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
 521                desc->istate |= IRQS_PENDING;
 522                goto out_unlock;
 523        }
 524
 525        kstat_incr_irqs_this_cpu(desc);
 526        handle_irq_event(desc);
 527
 528out_unlock:
 529        raw_spin_unlock(&desc->lock);
 530}
 531EXPORT_SYMBOL_GPL(handle_simple_irq);
 532
 533/**
 534 *      handle_untracked_irq - Simple and software-decoded IRQs.
 535 *      @desc:  the interrupt description structure for this irq
 536 *
 537 *      Untracked interrupts are sent from a demultiplexing interrupt
 538 *      handler when the demultiplexer does not know which device it its
 539 *      multiplexed irq domain generated the interrupt. IRQ's handled
 540 *      through here are not subjected to stats tracking, randomness, or
 541 *      spurious interrupt detection.
 542 *
 543 *      Note: Like handle_simple_irq, the caller is expected to handle
 544 *      the ack, clear, mask and unmask issues if necessary.
 545 */
 546void handle_untracked_irq(struct irq_desc *desc)
 547{
 548        unsigned int flags = 0;
 549
 550        raw_spin_lock(&desc->lock);
 551
 552        if (!irq_may_run(desc))
 553                goto out_unlock;
 554
 555        desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 556
 557        if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
 558                desc->istate |= IRQS_PENDING;
 559                goto out_unlock;
 560        }
 561
 562        desc->istate &= ~IRQS_PENDING;
 563        irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
 564        raw_spin_unlock(&desc->lock);
 565
 566        __handle_irq_event_percpu(desc, &flags);
 567
 568        raw_spin_lock(&desc->lock);
 569        irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
 570
 571out_unlock:
 572        raw_spin_unlock(&desc->lock);
 573}
 574EXPORT_SYMBOL_GPL(handle_untracked_irq);
 575
 576/*
 577 * Called unconditionally from handle_level_irq() and only for oneshot
 578 * interrupts from handle_fasteoi_irq()
 579 */
 580static void cond_unmask_irq(struct irq_desc *desc)
 581{
 582        /*
 583         * We need to unmask in the following cases:
 584         * - Standard level irq (IRQF_ONESHOT is not set)
 585         * - Oneshot irq which did not wake the thread (caused by a
 586         *   spurious interrupt or a primary handler handling it
 587         *   completely).
 588         */
 589        if (!irqd_irq_disabled(&desc->irq_data) &&
 590            irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
 591                unmask_irq(desc);
 592}
 593
 594/**
 595 *      handle_level_irq - Level type irq handler
 596 *      @desc:  the interrupt description structure for this irq
 597 *
 598 *      Level type interrupts are active as long as the hardware line has
 599 *      the active level. This may require to mask the interrupt and unmask
 600 *      it after the associated handler has acknowledged the device, so the
 601 *      interrupt line is back to inactive.
 602 */
 603void handle_level_irq(struct irq_desc *desc)
 604{
 605        raw_spin_lock(&desc->lock);
 606        mask_ack_irq(desc);
 607
 608        if (!irq_may_run(desc))
 609                goto out_unlock;
 610
 611        desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 612
 613        /*
 614         * If its disabled or no action available
 615         * keep it masked and get out of here
 616         */
 617        if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
 618                desc->istate |= IRQS_PENDING;
 619                goto out_unlock;
 620        }
 621
 622        kstat_incr_irqs_this_cpu(desc);
 623        handle_irq_event(desc);
 624
 625        cond_unmask_irq(desc);
 626
 627out_unlock:
 628        raw_spin_unlock(&desc->lock);
 629}
 630EXPORT_SYMBOL_GPL(handle_level_irq);
 631
 632#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
 633static inline void preflow_handler(struct irq_desc *desc)
 634{
 635        if (desc->preflow_handler)
 636                desc->preflow_handler(&desc->irq_data);
 637}
 638#else
 639static inline void preflow_handler(struct irq_desc *desc) { }
 640#endif
 641
 642static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
 643{
 644        if (!(desc->istate & IRQS_ONESHOT)) {
 645                chip->irq_eoi(&desc->irq_data);
 646                return;
 647        }
 648        /*
 649         * We need to unmask in the following cases:
 650         * - Oneshot irq which did not wake the thread (caused by a
 651         *   spurious interrupt or a primary handler handling it
 652         *   completely).
 653         */
 654        if (!irqd_irq_disabled(&desc->irq_data) &&
 655            irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
 656                chip->irq_eoi(&desc->irq_data);
 657                unmask_irq(desc);
 658        } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
 659                chip->irq_eoi(&desc->irq_data);
 660        }
 661}
 662
 663/**
 664 *      handle_fasteoi_irq - irq handler for transparent controllers
 665 *      @desc:  the interrupt description structure for this irq
 666 *
 667 *      Only a single callback will be issued to the chip: an ->eoi()
 668 *      call when the interrupt has been serviced. This enables support
 669 *      for modern forms of interrupt handlers, which handle the flow
 670 *      details in hardware, transparently.
 671 */
 672void handle_fasteoi_irq(struct irq_desc *desc)
 673{
 674        struct irq_chip *chip = desc->irq_data.chip;
 675
 676        raw_spin_lock(&desc->lock);
 677
 678        if (!irq_may_run(desc))
 679                goto out;
 680
 681        desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 682
 683        /*
 684         * If its disabled or no action available
 685         * then mask it and get out of here:
 686         */
 687        if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
 688                desc->istate |= IRQS_PENDING;
 689                mask_irq(desc);
 690                goto out;
 691        }
 692
 693        kstat_incr_irqs_this_cpu(desc);
 694        if (desc->istate & IRQS_ONESHOT)
 695                mask_irq(desc);
 696
 697        preflow_handler(desc);
 698        handle_irq_event(desc);
 699
 700        cond_unmask_eoi_irq(desc, chip);
 701
 702        raw_spin_unlock(&desc->lock);
 703        return;
 704out:
 705        if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
 706                chip->irq_eoi(&desc->irq_data);
 707        raw_spin_unlock(&desc->lock);
 708}
 709EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
 710
 711/**
 712 *      handle_edge_irq - edge type IRQ handler
 713 *      @desc:  the interrupt description structure for this irq
 714 *
 715 *      Interrupt occures on the falling and/or rising edge of a hardware
 716 *      signal. The occurrence is latched into the irq controller hardware
 717 *      and must be acked in order to be reenabled. After the ack another
 718 *      interrupt can happen on the same source even before the first one
 719 *      is handled by the associated event handler. If this happens it
 720 *      might be necessary to disable (mask) the interrupt depending on the
 721 *      controller hardware. This requires to reenable the interrupt inside
 722 *      of the loop which handles the interrupts which have arrived while
 723 *      the handler was running. If all pending interrupts are handled, the
 724 *      loop is left.
 725 */
 726void handle_edge_irq(struct irq_desc *desc)
 727{
 728        raw_spin_lock(&desc->lock);
 729
 730        desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 731
 732        if (!irq_may_run(desc)) {
 733                desc->istate |= IRQS_PENDING;
 734                mask_ack_irq(desc);
 735                goto out_unlock;
 736        }
 737
 738        /*
 739         * If its disabled or no action available then mask it and get
 740         * out of here.
 741         */
 742        if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
 743                desc->istate |= IRQS_PENDING;
 744                mask_ack_irq(desc);
 745                goto out_unlock;
 746        }
 747
 748        kstat_incr_irqs_this_cpu(desc);
 749
 750        /* Start handling the irq */
 751        desc->irq_data.chip->irq_ack(&desc->irq_data);
 752
 753        do {
 754                if (unlikely(!desc->action)) {
 755                        mask_irq(desc);
 756                        goto out_unlock;
 757                }
 758
 759                /*
 760                 * When another irq arrived while we were handling
 761                 * one, we could have masked the irq.
 762                 * Renable it, if it was not disabled in meantime.
 763                 */
 764                if (unlikely(desc->istate & IRQS_PENDING)) {
 765                        if (!irqd_irq_disabled(&desc->irq_data) &&
 766                            irqd_irq_masked(&desc->irq_data))
 767                                unmask_irq(desc);
 768                }
 769
 770                handle_irq_event(desc);
 771
 772        } while ((desc->istate & IRQS_PENDING) &&
 773                 !irqd_irq_disabled(&desc->irq_data));
 774
 775out_unlock:
 776        raw_spin_unlock(&desc->lock);
 777}
 778EXPORT_SYMBOL(handle_edge_irq);
 779
 780#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
 781/**
 782 *      handle_edge_eoi_irq - edge eoi type IRQ handler
 783 *      @desc:  the interrupt description structure for this irq
 784 *
 785 * Similar as the above handle_edge_irq, but using eoi and w/o the
 786 * mask/unmask logic.
 787 */
 788void handle_edge_eoi_irq(struct irq_desc *desc)
 789{
 790        struct irq_chip *chip = irq_desc_get_chip(desc);
 791
 792        raw_spin_lock(&desc->lock);
 793
 794        desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 795
 796        if (!irq_may_run(desc)) {
 797                desc->istate |= IRQS_PENDING;
 798                goto out_eoi;
 799        }
 800
 801        /*
 802         * If its disabled or no action available then mask it and get
 803         * out of here.
 804         */
 805        if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
 806                desc->istate |= IRQS_PENDING;
 807                goto out_eoi;
 808        }
 809
 810        kstat_incr_irqs_this_cpu(desc);
 811
 812        do {
 813                if (unlikely(!desc->action))
 814                        goto out_eoi;
 815
 816                handle_irq_event(desc);
 817
 818        } while ((desc->istate & IRQS_PENDING) &&
 819                 !irqd_irq_disabled(&desc->irq_data));
 820
 821out_eoi:
 822        chip->irq_eoi(&desc->irq_data);
 823        raw_spin_unlock(&desc->lock);
 824}
 825#endif
 826
 827/**
 828 *      handle_percpu_irq - Per CPU local irq handler
 829 *      @desc:  the interrupt description structure for this irq
 830 *
 831 *      Per CPU interrupts on SMP machines without locking requirements
 832 */
 833void handle_percpu_irq(struct irq_desc *desc)
 834{
 835        struct irq_chip *chip = irq_desc_get_chip(desc);
 836
 837        kstat_incr_irqs_this_cpu(desc);
 838
 839        if (chip->irq_ack)
 840                chip->irq_ack(&desc->irq_data);
 841
 842        handle_irq_event_percpu(desc);
 843
 844        if (chip->irq_eoi)
 845                chip->irq_eoi(&desc->irq_data);
 846}
 847
 848/**
 849 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
 850 * @desc:       the interrupt description structure for this irq
 851 *
 852 * Per CPU interrupts on SMP machines without locking requirements. Same as
 853 * handle_percpu_irq() above but with the following extras:
 854 *
 855 * action->percpu_dev_id is a pointer to percpu variables which
 856 * contain the real device id for the cpu on which this handler is
 857 * called
 858 */
 859void handle_percpu_devid_irq(struct irq_desc *desc)
 860{
 861        struct irq_chip *chip = irq_desc_get_chip(desc);
 862        struct irqaction *action = desc->action;
 863        unsigned int irq = irq_desc_get_irq(desc);
 864        irqreturn_t res;
 865
 866        kstat_incr_irqs_this_cpu(desc);
 867
 868        if (chip->irq_ack)
 869                chip->irq_ack(&desc->irq_data);
 870
 871        if (likely(action)) {
 872                trace_irq_handler_entry(irq, action);
 873                res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
 874                trace_irq_handler_exit(irq, action, res);
 875        } else {
 876                unsigned int cpu = smp_processor_id();
 877                bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
 878
 879                if (enabled)
 880                        irq_percpu_disable(desc, cpu);
 881
 882                pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n",
 883                            enabled ? " and unmasked" : "", irq, cpu);
 884        }
 885
 886        if (chip->irq_eoi)
 887                chip->irq_eoi(&desc->irq_data);
 888}
 889
 890static void
 891__irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
 892                     int is_chained, const char *name)
 893{
 894        if (!handle) {
 895                handle = handle_bad_irq;
 896        } else {
 897                struct irq_data *irq_data = &desc->irq_data;
 898#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
 899                /*
 900                 * With hierarchical domains we might run into a
 901                 * situation where the outermost chip is not yet set
 902                 * up, but the inner chips are there.  Instead of
 903                 * bailing we install the handler, but obviously we
 904                 * cannot enable/startup the interrupt at this point.
 905                 */
 906                while (irq_data) {
 907                        if (irq_data->chip != &no_irq_chip)
 908                                break;
 909                        /*
 910                         * Bail out if the outer chip is not set up
 911                         * and the interrrupt supposed to be started
 912                         * right away.
 913                         */
 914                        if (WARN_ON(is_chained))
 915                                return;
 916                        /* Try the parent */
 917                        irq_data = irq_data->parent_data;
 918                }
 919#endif
 920                if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip))
 921                        return;
 922        }
 923
 924        /* Uninstall? */
 925        if (handle == handle_bad_irq) {
 926                if (desc->irq_data.chip != &no_irq_chip)
 927                        mask_ack_irq(desc);
 928                irq_state_set_disabled(desc);
 929                if (is_chained)
 930                        desc->action = NULL;
 931                desc->depth = 1;
 932        }
 933        desc->handle_irq = handle;
 934        desc->name = name;
 935
 936        if (handle != handle_bad_irq && is_chained) {
 937                unsigned int type = irqd_get_trigger_type(&desc->irq_data);
 938
 939                /*
 940                 * We're about to start this interrupt immediately,
 941                 * hence the need to set the trigger configuration.
 942                 * But the .set_type callback may have overridden the
 943                 * flow handler, ignoring that we're dealing with a
 944                 * chained interrupt. Reset it immediately because we
 945                 * do know better.
 946                 */
 947                if (type != IRQ_TYPE_NONE) {
 948                        __irq_set_trigger(desc, type);
 949                        desc->handle_irq = handle;
 950                }
 951
 952                irq_settings_set_noprobe(desc);
 953                irq_settings_set_norequest(desc);
 954                irq_settings_set_nothread(desc);
 955                desc->action = &chained_action;
 956                irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
 957        }
 958}
 959
 960void
 961__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
 962                  const char *name)
 963{
 964        unsigned long flags;
 965        struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
 966
 967        if (!desc)
 968                return;
 969
 970        __irq_do_set_handler(desc, handle, is_chained, name);
 971        irq_put_desc_busunlock(desc, flags);
 972}
 973EXPORT_SYMBOL_GPL(__irq_set_handler);
 974
 975void
 976irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
 977                                 void *data)
 978{
 979        unsigned long flags;
 980        struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
 981
 982        if (!desc)
 983                return;
 984
 985        desc->irq_common_data.handler_data = data;
 986        __irq_do_set_handler(desc, handle, 1, NULL);
 987
 988        irq_put_desc_busunlock(desc, flags);
 989}
 990EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data);
 991
 992void
 993irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
 994                              irq_flow_handler_t handle, const char *name)
 995{
 996        irq_set_chip(irq, chip);
 997        __irq_set_handler(irq, handle, 0, name);
 998}
 999EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
1000
1001void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
1002{
1003        unsigned long flags, trigger, tmp;
1004        struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
1005
1006        if (!desc)
1007                return;
1008
1009        /*
1010         * Warn when a driver sets the no autoenable flag on an already
1011         * active interrupt.
1012         */
1013        WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN));
1014
1015        irq_settings_clr_and_set(desc, clr, set);
1016
1017        trigger = irqd_get_trigger_type(&desc->irq_data);
1018
1019        irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
1020                   IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
1021        if (irq_settings_has_no_balance_set(desc))
1022                irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1023        if (irq_settings_is_per_cpu(desc))
1024                irqd_set(&desc->irq_data, IRQD_PER_CPU);
1025        if (irq_settings_can_move_pcntxt(desc))
1026                irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
1027        if (irq_settings_is_level(desc))
1028                irqd_set(&desc->irq_data, IRQD_LEVEL);
1029
1030        tmp = irq_settings_get_trigger_mask(desc);
1031        if (tmp != IRQ_TYPE_NONE)
1032                trigger = tmp;
1033
1034        irqd_set(&desc->irq_data, trigger);
1035
1036        irq_put_desc_unlock(desc, flags);
1037}
1038EXPORT_SYMBOL_GPL(irq_modify_status);
1039
1040/**
1041 *      irq_cpu_online - Invoke all irq_cpu_online functions.
1042 *
1043 *      Iterate through all irqs and invoke the chip.irq_cpu_online()
1044 *      for each.
1045 */
1046void irq_cpu_online(void)
1047{
1048        struct irq_desc *desc;
1049        struct irq_chip *chip;
1050        unsigned long flags;
1051        unsigned int irq;
1052
1053        for_each_active_irq(irq) {
1054                desc = irq_to_desc(irq);
1055                if (!desc)
1056                        continue;
1057
1058                raw_spin_lock_irqsave(&desc->lock, flags);
1059
1060                chip = irq_data_get_irq_chip(&desc->irq_data);
1061                if (chip && chip->irq_cpu_online &&
1062                    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
1063                     !irqd_irq_disabled(&desc->irq_data)))
1064                        chip->irq_cpu_online(&desc->irq_data);
1065
1066                raw_spin_unlock_irqrestore(&desc->lock, flags);
1067        }
1068}
1069
1070/**
1071 *      irq_cpu_offline - Invoke all irq_cpu_offline functions.
1072 *
1073 *      Iterate through all irqs and invoke the chip.irq_cpu_offline()
1074 *      for each.
1075 */
1076void irq_cpu_offline(void)
1077{
1078        struct irq_desc *desc;
1079        struct irq_chip *chip;
1080        unsigned long flags;
1081        unsigned int irq;
1082
1083        for_each_active_irq(irq) {
1084                desc = irq_to_desc(irq);
1085                if (!desc)
1086                        continue;
1087
1088                raw_spin_lock_irqsave(&desc->lock, flags);
1089
1090                chip = irq_data_get_irq_chip(&desc->irq_data);
1091                if (chip && chip->irq_cpu_offline &&
1092                    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
1093                     !irqd_irq_disabled(&desc->irq_data)))
1094                        chip->irq_cpu_offline(&desc->irq_data);
1095
1096                raw_spin_unlock_irqrestore(&desc->lock, flags);
1097        }
1098}
1099
1100#ifdef  CONFIG_IRQ_DOMAIN_HIERARCHY
1101
1102#ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS
1103/**
1104 *      handle_fasteoi_ack_irq - irq handler for edge hierarchy
1105 *      stacked on transparent controllers
1106 *
1107 *      @desc:  the interrupt description structure for this irq
1108 *
1109 *      Like handle_fasteoi_irq(), but for use with hierarchy where
1110 *      the irq_chip also needs to have its ->irq_ack() function
1111 *      called.
1112 */
1113void handle_fasteoi_ack_irq(struct irq_desc *desc)
1114{
1115        struct irq_chip *chip = desc->irq_data.chip;
1116
1117        raw_spin_lock(&desc->lock);
1118
1119        if (!irq_may_run(desc))
1120                goto out;
1121
1122        desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
1123
1124        /*
1125         * If its disabled or no action available
1126         * then mask it and get out of here:
1127         */
1128        if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
1129                desc->istate |= IRQS_PENDING;
1130                mask_irq(desc);
1131                goto out;
1132        }
1133
1134        kstat_incr_irqs_this_cpu(desc);
1135        if (desc->istate & IRQS_ONESHOT)
1136                mask_irq(desc);
1137
1138        /* Start handling the irq */
1139        desc->irq_data.chip->irq_ack(&desc->irq_data);
1140
1141        preflow_handler(desc);
1142        handle_irq_event(desc);
1143
1144        cond_unmask_eoi_irq(desc, chip);
1145
1146        raw_spin_unlock(&desc->lock);
1147        return;
1148out:
1149        if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
1150                chip->irq_eoi(&desc->irq_data);
1151        raw_spin_unlock(&desc->lock);
1152}
1153EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq);
1154
1155/**
1156 *      handle_fasteoi_mask_irq - irq handler for level hierarchy
1157 *      stacked on transparent controllers
1158 *
1159 *      @desc:  the interrupt description structure for this irq
1160 *
1161 *      Like handle_fasteoi_irq(), but for use with hierarchy where
1162 *      the irq_chip also needs to have its ->irq_mask_ack() function
1163 *      called.
1164 */
1165void handle_fasteoi_mask_irq(struct irq_desc *desc)
1166{
1167        struct irq_chip *chip = desc->irq_data.chip;
1168
1169        raw_spin_lock(&desc->lock);
1170        mask_ack_irq(desc);
1171
1172        if (!irq_may_run(desc))
1173                goto out;
1174
1175        desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
1176
1177        /*
1178         * If its disabled or no action available
1179         * then mask it and get out of here:
1180         */
1181        if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
1182                desc->istate |= IRQS_PENDING;
1183                mask_irq(desc);
1184                goto out;
1185        }
1186
1187        kstat_incr_irqs_this_cpu(desc);
1188        if (desc->istate & IRQS_ONESHOT)
1189                mask_irq(desc);
1190
1191        preflow_handler(desc);
1192        handle_irq_event(desc);
1193
1194        cond_unmask_eoi_irq(desc, chip);
1195
1196        raw_spin_unlock(&desc->lock);
1197        return;
1198out:
1199        if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
1200                chip->irq_eoi(&desc->irq_data);
1201        raw_spin_unlock(&desc->lock);
1202}
1203EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq);
1204
1205#endif /* CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS */
1206
1207/**
1208 * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if
1209 * NULL)
1210 * @data:       Pointer to interrupt specific data
1211 */
1212void irq_chip_enable_parent(struct irq_data *data)
1213{
1214        data = data->parent_data;
1215        if (data->chip->irq_enable)
1216                data->chip->irq_enable(data);
1217        else
1218                data->chip->irq_unmask(data);
1219}
1220EXPORT_SYMBOL_GPL(irq_chip_enable_parent);
1221
1222/**
1223 * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if
1224 * NULL)
1225 * @data:       Pointer to interrupt specific data
1226 */
1227void irq_chip_disable_parent(struct irq_data *data)
1228{
1229        data = data->parent_data;
1230        if (data->chip->irq_disable)
1231                data->chip->irq_disable(data);
1232        else
1233                data->chip->irq_mask(data);
1234}
1235EXPORT_SYMBOL_GPL(irq_chip_disable_parent);
1236
1237/**
1238 * irq_chip_ack_parent - Acknowledge the parent interrupt
1239 * @data:       Pointer to interrupt specific data
1240 */
1241void irq_chip_ack_parent(struct irq_data *data)
1242{
1243        data = data->parent_data;
1244        data->chip->irq_ack(data);
1245}
1246EXPORT_SYMBOL_GPL(irq_chip_ack_parent);
1247
1248/**
1249 * irq_chip_mask_parent - Mask the parent interrupt
1250 * @data:       Pointer to interrupt specific data
1251 */
1252void irq_chip_mask_parent(struct irq_data *data)
1253{
1254        data = data->parent_data;
1255        data->chip->irq_mask(data);
1256}
1257EXPORT_SYMBOL_GPL(irq_chip_mask_parent);
1258
1259/**
1260 * irq_chip_unmask_parent - Unmask the parent interrupt
1261 * @data:       Pointer to interrupt specific data
1262 */
1263void irq_chip_unmask_parent(struct irq_data *data)
1264{
1265        data = data->parent_data;
1266        data->chip->irq_unmask(data);
1267}
1268EXPORT_SYMBOL_GPL(irq_chip_unmask_parent);
1269
1270/**
1271 * irq_chip_eoi_parent - Invoke EOI on the parent interrupt
1272 * @data:       Pointer to interrupt specific data
1273 */
1274void irq_chip_eoi_parent(struct irq_data *data)
1275{
1276        data = data->parent_data;
1277        data->chip->irq_eoi(data);
1278}
1279EXPORT_SYMBOL_GPL(irq_chip_eoi_parent);
1280
1281/**
1282 * irq_chip_set_affinity_parent - Set affinity on the parent interrupt
1283 * @data:       Pointer to interrupt specific data
1284 * @dest:       The affinity mask to set
1285 * @force:      Flag to enforce setting (disable online checks)
1286 *
1287 * Conditinal, as the underlying parent chip might not implement it.
1288 */
1289int irq_chip_set_affinity_parent(struct irq_data *data,
1290                                 const struct cpumask *dest, bool force)
1291{
1292        data = data->parent_data;
1293        if (data->chip->irq_set_affinity)
1294                return data->chip->irq_set_affinity(data, dest, force);
1295
1296        return -ENOSYS;
1297}
1298EXPORT_SYMBOL_GPL(irq_chip_set_affinity_parent);
1299
1300/**
1301 * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
1302 * @data:       Pointer to interrupt specific data
1303 * @type:       IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
1304 *
1305 * Conditional, as the underlying parent chip might not implement it.
1306 */
1307int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
1308{
1309        data = data->parent_data;
1310
1311        if (data->chip->irq_set_type)
1312                return data->chip->irq_set_type(data, type);
1313
1314        return -ENOSYS;
1315}
1316EXPORT_SYMBOL_GPL(irq_chip_set_type_parent);
1317
1318/**
1319 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
1320 * @data:       Pointer to interrupt specific data
1321 *
1322 * Iterate through the domain hierarchy of the interrupt and check
1323 * whether a hw retrigger function exists. If yes, invoke it.
1324 */
1325int irq_chip_retrigger_hierarchy(struct irq_data *data)
1326{
1327        for (data = data->parent_data; data; data = data->parent_data)
1328                if (data->chip && data->chip->irq_retrigger)
1329                        return data->chip->irq_retrigger(data);
1330
1331        return 0;
1332}
1333
1334/**
1335 * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt
1336 * @data:       Pointer to interrupt specific data
1337 * @vcpu_info:  The vcpu affinity information
1338 */
1339int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
1340{
1341        data = data->parent_data;
1342        if (data->chip->irq_set_vcpu_affinity)
1343                return data->chip->irq_set_vcpu_affinity(data, vcpu_info);
1344
1345        return -ENOSYS;
1346}
1347
1348/**
1349 * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt
1350 * @data:       Pointer to interrupt specific data
1351 * @on:         Whether to set or reset the wake-up capability of this irq
1352 *
1353 * Conditional, as the underlying parent chip might not implement it.
1354 */
1355int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
1356{
1357        data = data->parent_data;
1358        if (data->chip->irq_set_wake)
1359                return data->chip->irq_set_wake(data, on);
1360
1361        return -ENOSYS;
1362}
1363#endif
1364
1365/**
1366 * irq_chip_compose_msi_msg - Componse msi message for a irq chip
1367 * @data:       Pointer to interrupt specific data
1368 * @msg:        Pointer to the MSI message
1369 *
1370 * For hierarchical domains we find the first chip in the hierarchy
1371 * which implements the irq_compose_msi_msg callback. For non
1372 * hierarchical we use the top level chip.
1373 */
1374int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1375{
1376        struct irq_data *pos = NULL;
1377
1378#ifdef  CONFIG_IRQ_DOMAIN_HIERARCHY
1379        for (; data; data = data->parent_data)
1380#endif
1381                if (data->chip && data->chip->irq_compose_msi_msg)
1382                        pos = data;
1383        if (!pos)
1384                return -ENOSYS;
1385
1386        pos->chip->irq_compose_msi_msg(pos, msg);
1387
1388        return 0;
1389}
1390
1391/**
1392 * irq_chip_pm_get - Enable power for an IRQ chip
1393 * @data:       Pointer to interrupt specific data
1394 *
1395 * Enable the power to the IRQ chip referenced by the interrupt data
1396 * structure.
1397 */
1398int irq_chip_pm_get(struct irq_data *data)
1399{
1400        int retval;
1401
1402        if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device) {
1403                retval = pm_runtime_get_sync(data->chip->parent_device);
1404                if (retval < 0) {
1405                        pm_runtime_put_noidle(data->chip->parent_device);
1406                        return retval;
1407                }
1408        }
1409
1410        return 0;
1411}
1412
1413/**
1414 * irq_chip_pm_put - Disable power for an IRQ chip
1415 * @data:       Pointer to interrupt specific data
1416 *
1417 * Disable the power to the IRQ chip referenced by the interrupt data
1418 * structure, belongs. Note that power will only be disabled, once this
1419 * function has been called for all IRQs that have called irq_chip_pm_get().
1420 */
1421int irq_chip_pm_put(struct irq_data *data)
1422{
1423        int retval = 0;
1424
1425        if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device)
1426                retval = pm_runtime_put(data->chip->parent_device);
1427
1428        return (retval < 0) ? retval : 0;
1429}
1430