linux/kernel/irq/chip.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
   4 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
   5 *
   6 * This file contains the core interrupt handling code, for irq-chip based
   7 * architectures. Detailed information is available in
   8 * Documentation/core-api/genericirq.rst
   9 */
  10
  11#include <linux/irq.h>
  12#include <linux/msi.h>
  13#include <linux/module.h>
  14#include <linux/interrupt.h>
  15#include <linux/kernel_stat.h>
  16#include <linux/irqdomain.h>
  17
  18#include <trace/events/irq.h>
  19
  20#include "internals.h"
  21
  22static irqreturn_t bad_chained_irq(int irq, void *dev_id)
  23{
  24        WARN_ONCE(1, "Chained irq %d should not call an action\n", irq);
  25        return IRQ_NONE;
  26}
  27
  28/*
  29 * Chained handlers should never call action on their IRQ. This default
  30 * action will emit warning if such thing happens.
  31 */
  32struct irqaction chained_action = {
  33        .handler = bad_chained_irq,
  34};
  35
  36/**
  37 *      irq_set_chip - set the irq chip for an irq
  38 *      @irq:   irq number
  39 *      @chip:  pointer to irq chip description structure
  40 */
  41int irq_set_chip(unsigned int irq, struct irq_chip *chip)
  42{
  43        unsigned long flags;
  44        struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
  45
  46        if (!desc)
  47                return -EINVAL;
  48
  49        if (!chip)
  50                chip = &no_irq_chip;
  51
  52        desc->irq_data.chip = chip;
  53        irq_put_desc_unlock(desc, flags);
  54        /*
  55         * For !CONFIG_SPARSE_IRQ make the irq show up in
  56         * allocated_irqs.
  57         */
  58        irq_mark_irq(irq);
  59        return 0;
  60}
  61EXPORT_SYMBOL(irq_set_chip);
  62
  63/**
  64 *      irq_set_type - set the irq trigger type for an irq
  65 *      @irq:   irq number
  66 *      @type:  IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
  67 */
  68int irq_set_irq_type(unsigned int irq, unsigned int type)
  69{
  70        unsigned long flags;
  71        struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
  72        int ret = 0;
  73
  74        if (!desc)
  75                return -EINVAL;
  76
  77        ret = __irq_set_trigger(desc, type);
  78        irq_put_desc_busunlock(desc, flags);
  79        return ret;
  80}
  81EXPORT_SYMBOL(irq_set_irq_type);
  82
  83/**
  84 *      irq_set_handler_data - set irq handler data for an irq
  85 *      @irq:   Interrupt number
  86 *      @data:  Pointer to interrupt specific data
  87 *
  88 *      Set the hardware irq controller data for an irq
  89 */
  90int irq_set_handler_data(unsigned int irq, void *data)
  91{
  92        unsigned long flags;
  93        struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
  94
  95        if (!desc)
  96                return -EINVAL;
  97        desc->irq_common_data.handler_data = data;
  98        irq_put_desc_unlock(desc, flags);
  99        return 0;
 100}
 101EXPORT_SYMBOL(irq_set_handler_data);
 102
 103/**
 104 *      irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
 105 *      @irq_base:      Interrupt number base
 106 *      @irq_offset:    Interrupt number offset
 107 *      @entry:         Pointer to MSI descriptor data
 108 *
 109 *      Set the MSI descriptor entry for an irq at offset
 110 */
 111int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
 112                         struct msi_desc *entry)
 113{
 114        unsigned long flags;
 115        struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 116
 117        if (!desc)
 118                return -EINVAL;
 119        desc->irq_common_data.msi_desc = entry;
 120        if (entry && !irq_offset)
 121                entry->irq = irq_base;
 122        irq_put_desc_unlock(desc, flags);
 123        return 0;
 124}
 125
 126/**
 127 *      irq_set_msi_desc - set MSI descriptor data for an irq
 128 *      @irq:   Interrupt number
 129 *      @entry: Pointer to MSI descriptor data
 130 *
 131 *      Set the MSI descriptor entry for an irq
 132 */
 133int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
 134{
 135        return irq_set_msi_desc_off(irq, 0, entry);
 136}
 137
 138/**
 139 *      irq_set_chip_data - set irq chip data for an irq
 140 *      @irq:   Interrupt number
 141 *      @data:  Pointer to chip specific data
 142 *
 143 *      Set the hardware irq chip data for an irq
 144 */
 145int irq_set_chip_data(unsigned int irq, void *data)
 146{
 147        unsigned long flags;
 148        struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
 149
 150        if (!desc)
 151                return -EINVAL;
 152        desc->irq_data.chip_data = data;
 153        irq_put_desc_unlock(desc, flags);
 154        return 0;
 155}
 156EXPORT_SYMBOL(irq_set_chip_data);
 157
 158struct irq_data *irq_get_irq_data(unsigned int irq)
 159{
 160        struct irq_desc *desc = irq_to_desc(irq);
 161
 162        return desc ? &desc->irq_data : NULL;
 163}
 164EXPORT_SYMBOL_GPL(irq_get_irq_data);
 165
 166static void irq_state_clr_disabled(struct irq_desc *desc)
 167{
 168        irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
 169}
 170
 171static void irq_state_clr_masked(struct irq_desc *desc)
 172{
 173        irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
 174}
 175
 176static void irq_state_clr_started(struct irq_desc *desc)
 177{
 178        irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED);
 179}
 180
 181static void irq_state_set_started(struct irq_desc *desc)
 182{
 183        irqd_set(&desc->irq_data, IRQD_IRQ_STARTED);
 184}
 185
 186enum {
 187        IRQ_STARTUP_NORMAL,
 188        IRQ_STARTUP_MANAGED,
 189        IRQ_STARTUP_ABORT,
 190};
 191
 192#ifdef CONFIG_SMP
 193static int
 194__irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
 195{
 196        struct irq_data *d = irq_desc_get_irq_data(desc);
 197
 198        if (!irqd_affinity_is_managed(d))
 199                return IRQ_STARTUP_NORMAL;
 200
 201        irqd_clr_managed_shutdown(d);
 202
 203        if (cpumask_any_and(aff, cpu_online_mask) >= nr_cpu_ids) {
 204                /*
 205                 * Catch code which fiddles with enable_irq() on a managed
 206                 * and potentially shutdown IRQ. Chained interrupt
 207                 * installment or irq auto probing should not happen on
 208                 * managed irqs either.
 209                 */
 210                if (WARN_ON_ONCE(force))
 211                        return IRQ_STARTUP_ABORT;
 212                /*
 213                 * The interrupt was requested, but there is no online CPU
 214                 * in it's affinity mask. Put it into managed shutdown
 215                 * state and let the cpu hotplug mechanism start it up once
 216                 * a CPU in the mask becomes available.
 217                 */
 218                return IRQ_STARTUP_ABORT;
 219        }
 220        /*
 221         * Managed interrupts have reserved resources, so this should not
 222         * happen.
 223         */
 224        if (WARN_ON(irq_domain_activate_irq(d, false)))
 225                return IRQ_STARTUP_ABORT;
 226        return IRQ_STARTUP_MANAGED;
 227}
 228#else
 229static __always_inline int
 230__irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
 231{
 232        return IRQ_STARTUP_NORMAL;
 233}
 234#endif
 235
 236static int __irq_startup(struct irq_desc *desc)
 237{
 238        struct irq_data *d = irq_desc_get_irq_data(desc);
 239        int ret = 0;
 240
 241        /* Warn if this interrupt is not activated but try nevertheless */
 242        WARN_ON_ONCE(!irqd_is_activated(d));
 243
 244        if (d->chip->irq_startup) {
 245                ret = d->chip->irq_startup(d);
 246                irq_state_clr_disabled(desc);
 247                irq_state_clr_masked(desc);
 248        } else {
 249                irq_enable(desc);
 250        }
 251        irq_state_set_started(desc);
 252        return ret;
 253}
 254
 255int irq_startup(struct irq_desc *desc, bool resend, bool force)
 256{
 257        struct irq_data *d = irq_desc_get_irq_data(desc);
 258        struct cpumask *aff = irq_data_get_affinity_mask(d);
 259        int ret = 0;
 260
 261        desc->depth = 0;
 262
 263        if (irqd_is_started(d)) {
 264                irq_enable(desc);
 265        } else {
 266                switch (__irq_startup_managed(desc, aff, force)) {
 267                case IRQ_STARTUP_NORMAL:
 268                        ret = __irq_startup(desc);
 269                        irq_setup_affinity(desc);
 270                        break;
 271                case IRQ_STARTUP_MANAGED:
 272                        irq_do_set_affinity(d, aff, false);
 273                        ret = __irq_startup(desc);
 274                        break;
 275                case IRQ_STARTUP_ABORT:
 276                        irqd_set_managed_shutdown(d);
 277                        return 0;
 278                }
 279        }
 280        if (resend)
 281                check_irq_resend(desc);
 282
 283        return ret;
 284}
 285
 286int irq_activate(struct irq_desc *desc)
 287{
 288        struct irq_data *d = irq_desc_get_irq_data(desc);
 289
 290        if (!irqd_affinity_is_managed(d))
 291                return irq_domain_activate_irq(d, false);
 292        return 0;
 293}
 294
 295int irq_activate_and_startup(struct irq_desc *desc, bool resend)
 296{
 297        if (WARN_ON(irq_activate(desc)))
 298                return 0;
 299        return irq_startup(desc, resend, IRQ_START_FORCE);
 300}
 301
 302static void __irq_disable(struct irq_desc *desc, bool mask);
 303
 304void irq_shutdown(struct irq_desc *desc)
 305{
 306        if (irqd_is_started(&desc->irq_data)) {
 307                desc->depth = 1;
 308                if (desc->irq_data.chip->irq_shutdown) {
 309                        desc->irq_data.chip->irq_shutdown(&desc->irq_data);
 310                        irq_state_set_disabled(desc);
 311                        irq_state_set_masked(desc);
 312                } else {
 313                        __irq_disable(desc, true);
 314                }
 315                irq_state_clr_started(desc);
 316        }
 317}
 318
 319
 320void irq_shutdown_and_deactivate(struct irq_desc *desc)
 321{
 322        irq_shutdown(desc);
 323        /*
 324         * This must be called even if the interrupt was never started up,
 325         * because the activation can happen before the interrupt is
 326         * available for request/startup. It has it's own state tracking so
 327         * it's safe to call it unconditionally.
 328         */
 329        irq_domain_deactivate_irq(&desc->irq_data);
 330}
 331
 332void irq_enable(struct irq_desc *desc)
 333{
 334        if (!irqd_irq_disabled(&desc->irq_data)) {
 335                unmask_irq(desc);
 336        } else {
 337                irq_state_clr_disabled(desc);
 338                if (desc->irq_data.chip->irq_enable) {
 339                        desc->irq_data.chip->irq_enable(&desc->irq_data);
 340                        irq_state_clr_masked(desc);
 341                } else {
 342                        unmask_irq(desc);
 343                }
 344        }
 345}
 346
 347static void __irq_disable(struct irq_desc *desc, bool mask)
 348{
 349        if (irqd_irq_disabled(&desc->irq_data)) {
 350                if (mask)
 351                        mask_irq(desc);
 352        } else {
 353                irq_state_set_disabled(desc);
 354                if (desc->irq_data.chip->irq_disable) {
 355                        desc->irq_data.chip->irq_disable(&desc->irq_data);
 356                        irq_state_set_masked(desc);
 357                } else if (mask) {
 358                        mask_irq(desc);
 359                }
 360        }
 361}
 362
 363/**
 364 * irq_disable - Mark interrupt disabled
 365 * @desc:       irq descriptor which should be disabled
 366 *
 367 * If the chip does not implement the irq_disable callback, we
 368 * use a lazy disable approach. That means we mark the interrupt
 369 * disabled, but leave the hardware unmasked. That's an
 370 * optimization because we avoid the hardware access for the
 371 * common case where no interrupt happens after we marked it
 372 * disabled. If an interrupt happens, then the interrupt flow
 373 * handler masks the line at the hardware level and marks it
 374 * pending.
 375 *
 376 * If the interrupt chip does not implement the irq_disable callback,
 377 * a driver can disable the lazy approach for a particular irq line by
 378 * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can
 379 * be used for devices which cannot disable the interrupt at the
 380 * device level under certain circumstances and have to use
 381 * disable_irq[_nosync] instead.
 382 */
 383void irq_disable(struct irq_desc *desc)
 384{
 385        __irq_disable(desc, irq_settings_disable_unlazy(desc));
 386}
 387
 388void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
 389{
 390        if (desc->irq_data.chip->irq_enable)
 391                desc->irq_data.chip->irq_enable(&desc->irq_data);
 392        else
 393                desc->irq_data.chip->irq_unmask(&desc->irq_data);
 394        cpumask_set_cpu(cpu, desc->percpu_enabled);
 395}
 396
 397void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
 398{
 399        if (desc->irq_data.chip->irq_disable)
 400                desc->irq_data.chip->irq_disable(&desc->irq_data);
 401        else
 402                desc->irq_data.chip->irq_mask(&desc->irq_data);
 403        cpumask_clear_cpu(cpu, desc->percpu_enabled);
 404}
 405
 406static inline void mask_ack_irq(struct irq_desc *desc)
 407{
 408        if (desc->irq_data.chip->irq_mask_ack) {
 409                desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
 410                irq_state_set_masked(desc);
 411        } else {
 412                mask_irq(desc);
 413                if (desc->irq_data.chip->irq_ack)
 414                        desc->irq_data.chip->irq_ack(&desc->irq_data);
 415        }
 416}
 417
 418void mask_irq(struct irq_desc *desc)
 419{
 420        if (irqd_irq_masked(&desc->irq_data))
 421                return;
 422
 423        if (desc->irq_data.chip->irq_mask) {
 424                desc->irq_data.chip->irq_mask(&desc->irq_data);
 425                irq_state_set_masked(desc);
 426        }
 427}
 428
 429void unmask_irq(struct irq_desc *desc)
 430{
 431        if (!irqd_irq_masked(&desc->irq_data))
 432                return;
 433
 434        if (desc->irq_data.chip->irq_unmask) {
 435                desc->irq_data.chip->irq_unmask(&desc->irq_data);
 436                irq_state_clr_masked(desc);
 437        }
 438}
 439
 440void unmask_threaded_irq(struct irq_desc *desc)
 441{
 442        struct irq_chip *chip = desc->irq_data.chip;
 443
 444        if (chip->flags & IRQCHIP_EOI_THREADED)
 445                chip->irq_eoi(&desc->irq_data);
 446
 447        unmask_irq(desc);
 448}
 449
 450/*
 451 *      handle_nested_irq - Handle a nested irq from a irq thread
 452 *      @irq:   the interrupt number
 453 *
 454 *      Handle interrupts which are nested into a threaded interrupt
 455 *      handler. The handler function is called inside the calling
 456 *      threads context.
 457 */
 458void handle_nested_irq(unsigned int irq)
 459{
 460        struct irq_desc *desc = irq_to_desc(irq);
 461        struct irqaction *action;
 462        irqreturn_t action_ret;
 463
 464        might_sleep();
 465
 466        raw_spin_lock_irq(&desc->lock);
 467
 468        desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 469
 470        action = desc->action;
 471        if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
 472                desc->istate |= IRQS_PENDING;
 473                goto out_unlock;
 474        }
 475
 476        kstat_incr_irqs_this_cpu(desc);
 477        irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
 478        raw_spin_unlock_irq(&desc->lock);
 479
 480        action_ret = IRQ_NONE;
 481        for_each_action_of_desc(desc, action)
 482                action_ret |= action->thread_fn(action->irq, action->dev_id);
 483
 484        if (!noirqdebug)
 485                note_interrupt(desc, action_ret);
 486
 487        raw_spin_lock_irq(&desc->lock);
 488        irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
 489
 490out_unlock:
 491        raw_spin_unlock_irq(&desc->lock);
 492}
 493EXPORT_SYMBOL_GPL(handle_nested_irq);
 494
 495static bool irq_check_poll(struct irq_desc *desc)
 496{
 497        if (!(desc->istate & IRQS_POLL_INPROGRESS))
 498                return false;
 499        return irq_wait_for_poll(desc);
 500}
 501
 502static bool irq_may_run(struct irq_desc *desc)
 503{
 504        unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED;
 505
 506        /*
 507         * If the interrupt is not in progress and is not an armed
 508         * wakeup interrupt, proceed.
 509         */
 510        if (!irqd_has_set(&desc->irq_data, mask))
 511                return true;
 512
 513        /*
 514         * If the interrupt is an armed wakeup source, mark it pending
 515         * and suspended, disable it and notify the pm core about the
 516         * event.
 517         */
 518        if (irq_pm_check_wakeup(desc))
 519                return false;
 520
 521        /*
 522         * Handle a potential concurrent poll on a different core.
 523         */
 524        return irq_check_poll(desc);
 525}
 526
 527/**
 528 *      handle_simple_irq - Simple and software-decoded IRQs.
 529 *      @desc:  the interrupt description structure for this irq
 530 *
 531 *      Simple interrupts are either sent from a demultiplexing interrupt
 532 *      handler or come from hardware, where no interrupt hardware control
 533 *      is necessary.
 534 *
 535 *      Note: The caller is expected to handle the ack, clear, mask and
 536 *      unmask issues if necessary.
 537 */
 538void handle_simple_irq(struct irq_desc *desc)
 539{
 540        raw_spin_lock(&desc->lock);
 541
 542        if (!irq_may_run(desc))
 543                goto out_unlock;
 544
 545        desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 546
 547        if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
 548                desc->istate |= IRQS_PENDING;
 549                goto out_unlock;
 550        }
 551
 552        kstat_incr_irqs_this_cpu(desc);
 553        handle_irq_event(desc);
 554
 555out_unlock:
 556        raw_spin_unlock(&desc->lock);
 557}
 558EXPORT_SYMBOL_GPL(handle_simple_irq);
 559
 560/**
 561 *      handle_untracked_irq - Simple and software-decoded IRQs.
 562 *      @desc:  the interrupt description structure for this irq
 563 *
 564 *      Untracked interrupts are sent from a demultiplexing interrupt
 565 *      handler when the demultiplexer does not know which device it its
 566 *      multiplexed irq domain generated the interrupt. IRQ's handled
 567 *      through here are not subjected to stats tracking, randomness, or
 568 *      spurious interrupt detection.
 569 *
 570 *      Note: Like handle_simple_irq, the caller is expected to handle
 571 *      the ack, clear, mask and unmask issues if necessary.
 572 */
 573void handle_untracked_irq(struct irq_desc *desc)
 574{
 575        unsigned int flags = 0;
 576
 577        raw_spin_lock(&desc->lock);
 578
 579        if (!irq_may_run(desc))
 580                goto out_unlock;
 581
 582        desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 583
 584        if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
 585                desc->istate |= IRQS_PENDING;
 586                goto out_unlock;
 587        }
 588
 589        desc->istate &= ~IRQS_PENDING;
 590        irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
 591        raw_spin_unlock(&desc->lock);
 592
 593        __handle_irq_event_percpu(desc, &flags);
 594
 595        raw_spin_lock(&desc->lock);
 596        irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
 597
 598out_unlock:
 599        raw_spin_unlock(&desc->lock);
 600}
 601EXPORT_SYMBOL_GPL(handle_untracked_irq);
 602
 603/*
 604 * Called unconditionally from handle_level_irq() and only for oneshot
 605 * interrupts from handle_fasteoi_irq()
 606 */
 607static void cond_unmask_irq(struct irq_desc *desc)
 608{
 609        /*
 610         * We need to unmask in the following cases:
 611         * - Standard level irq (IRQF_ONESHOT is not set)
 612         * - Oneshot irq which did not wake the thread (caused by a
 613         *   spurious interrupt or a primary handler handling it
 614         *   completely).
 615         */
 616        if (!irqd_irq_disabled(&desc->irq_data) &&
 617            irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
 618                unmask_irq(desc);
 619}
 620
 621/**
 622 *      handle_level_irq - Level type irq handler
 623 *      @desc:  the interrupt description structure for this irq
 624 *
 625 *      Level type interrupts are active as long as the hardware line has
 626 *      the active level. This may require to mask the interrupt and unmask
 627 *      it after the associated handler has acknowledged the device, so the
 628 *      interrupt line is back to inactive.
 629 */
 630void handle_level_irq(struct irq_desc *desc)
 631{
 632        raw_spin_lock(&desc->lock);
 633        mask_ack_irq(desc);
 634
 635        if (!irq_may_run(desc))
 636                goto out_unlock;
 637
 638        desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 639
 640        /*
 641         * If its disabled or no action available
 642         * keep it masked and get out of here
 643         */
 644        if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
 645                desc->istate |= IRQS_PENDING;
 646                goto out_unlock;
 647        }
 648
 649        kstat_incr_irqs_this_cpu(desc);
 650        handle_irq_event(desc);
 651
 652        cond_unmask_irq(desc);
 653
 654out_unlock:
 655        raw_spin_unlock(&desc->lock);
 656}
 657EXPORT_SYMBOL_GPL(handle_level_irq);
 658
 659#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
 660static inline void preflow_handler(struct irq_desc *desc)
 661{
 662        if (desc->preflow_handler)
 663                desc->preflow_handler(&desc->irq_data);
 664}
 665#else
 666static inline void preflow_handler(struct irq_desc *desc) { }
 667#endif
 668
 669static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
 670{
 671        if (!(desc->istate & IRQS_ONESHOT)) {
 672                chip->irq_eoi(&desc->irq_data);
 673                return;
 674        }
 675        /*
 676         * We need to unmask in the following cases:
 677         * - Oneshot irq which did not wake the thread (caused by a
 678         *   spurious interrupt or a primary handler handling it
 679         *   completely).
 680         */
 681        if (!irqd_irq_disabled(&desc->irq_data) &&
 682            irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
 683                chip->irq_eoi(&desc->irq_data);
 684                unmask_irq(desc);
 685        } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
 686                chip->irq_eoi(&desc->irq_data);
 687        }
 688}
 689
 690/**
 691 *      handle_fasteoi_irq - irq handler for transparent controllers
 692 *      @desc:  the interrupt description structure for this irq
 693 *
 694 *      Only a single callback will be issued to the chip: an ->eoi()
 695 *      call when the interrupt has been serviced. This enables support
 696 *      for modern forms of interrupt handlers, which handle the flow
 697 *      details in hardware, transparently.
 698 */
 699void handle_fasteoi_irq(struct irq_desc *desc)
 700{
 701        struct irq_chip *chip = desc->irq_data.chip;
 702
 703        raw_spin_lock(&desc->lock);
 704
 705        if (!irq_may_run(desc))
 706                goto out;
 707
 708        desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 709
 710        /*
 711         * If its disabled or no action available
 712         * then mask it and get out of here:
 713         */
 714        if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
 715                desc->istate |= IRQS_PENDING;
 716                mask_irq(desc);
 717                goto out;
 718        }
 719
 720        kstat_incr_irqs_this_cpu(desc);
 721        if (desc->istate & IRQS_ONESHOT)
 722                mask_irq(desc);
 723
 724        preflow_handler(desc);
 725        handle_irq_event(desc);
 726
 727        cond_unmask_eoi_irq(desc, chip);
 728
 729        raw_spin_unlock(&desc->lock);
 730        return;
 731out:
 732        if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
 733                chip->irq_eoi(&desc->irq_data);
 734        raw_spin_unlock(&desc->lock);
 735}
 736EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
 737
 738/**
 739 *      handle_fasteoi_nmi - irq handler for NMI interrupt lines
 740 *      @desc:  the interrupt description structure for this irq
 741 *
 742 *      A simple NMI-safe handler, considering the restrictions
 743 *      from request_nmi.
 744 *
 745 *      Only a single callback will be issued to the chip: an ->eoi()
 746 *      call when the interrupt has been serviced. This enables support
 747 *      for modern forms of interrupt handlers, which handle the flow
 748 *      details in hardware, transparently.
 749 */
 750void handle_fasteoi_nmi(struct irq_desc *desc)
 751{
 752        struct irq_chip *chip = irq_desc_get_chip(desc);
 753        struct irqaction *action = desc->action;
 754        unsigned int irq = irq_desc_get_irq(desc);
 755        irqreturn_t res;
 756
 757        __kstat_incr_irqs_this_cpu(desc);
 758
 759        trace_irq_handler_entry(irq, action);
 760        /*
 761         * NMIs cannot be shared, there is only one action.
 762         */
 763        res = action->handler(irq, action->dev_id);
 764        trace_irq_handler_exit(irq, action, res);
 765
 766        if (chip->irq_eoi)
 767                chip->irq_eoi(&desc->irq_data);
 768}
 769EXPORT_SYMBOL_GPL(handle_fasteoi_nmi);
 770
 771/**
 772 *      handle_edge_irq - edge type IRQ handler
 773 *      @desc:  the interrupt description structure for this irq
 774 *
 775 *      Interrupt occures on the falling and/or rising edge of a hardware
 776 *      signal. The occurrence is latched into the irq controller hardware
 777 *      and must be acked in order to be reenabled. After the ack another
 778 *      interrupt can happen on the same source even before the first one
 779 *      is handled by the associated event handler. If this happens it
 780 *      might be necessary to disable (mask) the interrupt depending on the
 781 *      controller hardware. This requires to reenable the interrupt inside
 782 *      of the loop which handles the interrupts which have arrived while
 783 *      the handler was running. If all pending interrupts are handled, the
 784 *      loop is left.
 785 */
 786void handle_edge_irq(struct irq_desc *desc)
 787{
 788        raw_spin_lock(&desc->lock);
 789
 790        desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 791
 792        if (!irq_may_run(desc)) {
 793                desc->istate |= IRQS_PENDING;
 794                mask_ack_irq(desc);
 795                goto out_unlock;
 796        }
 797
 798        /*
 799         * If its disabled or no action available then mask it and get
 800         * out of here.
 801         */
 802        if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
 803                desc->istate |= IRQS_PENDING;
 804                mask_ack_irq(desc);
 805                goto out_unlock;
 806        }
 807
 808        kstat_incr_irqs_this_cpu(desc);
 809
 810        /* Start handling the irq */
 811        desc->irq_data.chip->irq_ack(&desc->irq_data);
 812
 813        do {
 814                if (unlikely(!desc->action)) {
 815                        mask_irq(desc);
 816                        goto out_unlock;
 817                }
 818
 819                /*
 820                 * When another irq arrived while we were handling
 821                 * one, we could have masked the irq.
 822                 * Renable it, if it was not disabled in meantime.
 823                 */
 824                if (unlikely(desc->istate & IRQS_PENDING)) {
 825                        if (!irqd_irq_disabled(&desc->irq_data) &&
 826                            irqd_irq_masked(&desc->irq_data))
 827                                unmask_irq(desc);
 828                }
 829
 830                handle_irq_event(desc);
 831
 832        } while ((desc->istate & IRQS_PENDING) &&
 833                 !irqd_irq_disabled(&desc->irq_data));
 834
 835out_unlock:
 836        raw_spin_unlock(&desc->lock);
 837}
 838EXPORT_SYMBOL(handle_edge_irq);
 839
 840#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
 841/**
 842 *      handle_edge_eoi_irq - edge eoi type IRQ handler
 843 *      @desc:  the interrupt description structure for this irq
 844 *
 845 * Similar as the above handle_edge_irq, but using eoi and w/o the
 846 * mask/unmask logic.
 847 */
 848void handle_edge_eoi_irq(struct irq_desc *desc)
 849{
 850        struct irq_chip *chip = irq_desc_get_chip(desc);
 851
 852        raw_spin_lock(&desc->lock);
 853
 854        desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 855
 856        if (!irq_may_run(desc)) {
 857                desc->istate |= IRQS_PENDING;
 858                goto out_eoi;
 859        }
 860
 861        /*
 862         * If its disabled or no action available then mask it and get
 863         * out of here.
 864         */
 865        if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
 866                desc->istate |= IRQS_PENDING;
 867                goto out_eoi;
 868        }
 869
 870        kstat_incr_irqs_this_cpu(desc);
 871
 872        do {
 873                if (unlikely(!desc->action))
 874                        goto out_eoi;
 875
 876                handle_irq_event(desc);
 877
 878        } while ((desc->istate & IRQS_PENDING) &&
 879                 !irqd_irq_disabled(&desc->irq_data));
 880
 881out_eoi:
 882        chip->irq_eoi(&desc->irq_data);
 883        raw_spin_unlock(&desc->lock);
 884}
 885#endif
 886
 887/**
 888 *      handle_percpu_irq - Per CPU local irq handler
 889 *      @desc:  the interrupt description structure for this irq
 890 *
 891 *      Per CPU interrupts on SMP machines without locking requirements
 892 */
 893void handle_percpu_irq(struct irq_desc *desc)
 894{
 895        struct irq_chip *chip = irq_desc_get_chip(desc);
 896
 897        /*
 898         * PER CPU interrupts are not serialized. Do not touch
 899         * desc->tot_count.
 900         */
 901        __kstat_incr_irqs_this_cpu(desc);
 902
 903        if (chip->irq_ack)
 904                chip->irq_ack(&desc->irq_data);
 905
 906        handle_irq_event_percpu(desc);
 907
 908        if (chip->irq_eoi)
 909                chip->irq_eoi(&desc->irq_data);
 910}
 911
 912/**
 913 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
 914 * @desc:       the interrupt description structure for this irq
 915 *
 916 * Per CPU interrupts on SMP machines without locking requirements. Same as
 917 * handle_percpu_irq() above but with the following extras:
 918 *
 919 * action->percpu_dev_id is a pointer to percpu variables which
 920 * contain the real device id for the cpu on which this handler is
 921 * called
 922 */
 923void handle_percpu_devid_irq(struct irq_desc *desc)
 924{
 925        struct irq_chip *chip = irq_desc_get_chip(desc);
 926        struct irqaction *action = desc->action;
 927        unsigned int irq = irq_desc_get_irq(desc);
 928        irqreturn_t res;
 929
 930        /*
 931         * PER CPU interrupts are not serialized. Do not touch
 932         * desc->tot_count.
 933         */
 934        __kstat_incr_irqs_this_cpu(desc);
 935
 936        if (chip->irq_ack)
 937                chip->irq_ack(&desc->irq_data);
 938
 939        if (likely(action)) {
 940                trace_irq_handler_entry(irq, action);
 941                res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
 942                trace_irq_handler_exit(irq, action, res);
 943        } else {
 944                unsigned int cpu = smp_processor_id();
 945                bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
 946
 947                if (enabled)
 948                        irq_percpu_disable(desc, cpu);
 949
 950                pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n",
 951                            enabled ? " and unmasked" : "", irq, cpu);
 952        }
 953
 954        if (chip->irq_eoi)
 955                chip->irq_eoi(&desc->irq_data);
 956}
 957
 958/**
 959 * handle_percpu_devid_fasteoi_nmi - Per CPU local NMI handler with per cpu
 960 *                                   dev ids
 961 * @desc:       the interrupt description structure for this irq
 962 *
 963 * Similar to handle_fasteoi_nmi, but handling the dev_id cookie
 964 * as a percpu pointer.
 965 */
 966void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc)
 967{
 968        struct irq_chip *chip = irq_desc_get_chip(desc);
 969        struct irqaction *action = desc->action;
 970        unsigned int irq = irq_desc_get_irq(desc);
 971        irqreturn_t res;
 972
 973        __kstat_incr_irqs_this_cpu(desc);
 974
 975        trace_irq_handler_entry(irq, action);
 976        res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
 977        trace_irq_handler_exit(irq, action, res);
 978
 979        if (chip->irq_eoi)
 980                chip->irq_eoi(&desc->irq_data);
 981}
 982
 983static void
 984__irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
 985                     int is_chained, const char *name)
 986{
 987        if (!handle) {
 988                handle = handle_bad_irq;
 989        } else {
 990                struct irq_data *irq_data = &desc->irq_data;
 991#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
 992                /*
 993                 * With hierarchical domains we might run into a
 994                 * situation where the outermost chip is not yet set
 995                 * up, but the inner chips are there.  Instead of
 996                 * bailing we install the handler, but obviously we
 997                 * cannot enable/startup the interrupt at this point.
 998                 */
 999                while (irq_data) {
1000                        if (irq_data->chip != &no_irq_chip)
1001                                break;
1002                        /*
1003                         * Bail out if the outer chip is not set up
1004                         * and the interrupt supposed to be started
1005                         * right away.
1006                         */
1007                        if (WARN_ON(is_chained))
1008                                return;
1009                        /* Try the parent */
1010                        irq_data = irq_data->parent_data;
1011                }
1012#endif
1013                if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip))
1014                        return;
1015        }
1016
1017        /* Uninstall? */
1018        if (handle == handle_bad_irq) {
1019                if (desc->irq_data.chip != &no_irq_chip)
1020                        mask_ack_irq(desc);
1021                irq_state_set_disabled(desc);
1022                if (is_chained)
1023                        desc->action = NULL;
1024                desc->depth = 1;
1025        }
1026        desc->handle_irq = handle;
1027        desc->name = name;
1028
1029        if (handle != handle_bad_irq && is_chained) {
1030                unsigned int type = irqd_get_trigger_type(&desc->irq_data);
1031
1032                /*
1033                 * We're about to start this interrupt immediately,
1034                 * hence the need to set the trigger configuration.
1035                 * But the .set_type callback may have overridden the
1036                 * flow handler, ignoring that we're dealing with a
1037                 * chained interrupt. Reset it immediately because we
1038                 * do know better.
1039                 */
1040                if (type != IRQ_TYPE_NONE) {
1041                        __irq_set_trigger(desc, type);
1042                        desc->handle_irq = handle;
1043                }
1044
1045                irq_settings_set_noprobe(desc);
1046                irq_settings_set_norequest(desc);
1047                irq_settings_set_nothread(desc);
1048                desc->action = &chained_action;
1049                irq_activate_and_startup(desc, IRQ_RESEND);
1050        }
1051}
1052
1053void
1054__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
1055                  const char *name)
1056{
1057        unsigned long flags;
1058        struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
1059
1060        if (!desc)
1061                return;
1062
1063        __irq_do_set_handler(desc, handle, is_chained, name);
1064        irq_put_desc_busunlock(desc, flags);
1065}
1066EXPORT_SYMBOL_GPL(__irq_set_handler);
1067
1068void
1069irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
1070                                 void *data)
1071{
1072        unsigned long flags;
1073        struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
1074
1075        if (!desc)
1076                return;
1077
1078        desc->irq_common_data.handler_data = data;
1079        __irq_do_set_handler(desc, handle, 1, NULL);
1080
1081        irq_put_desc_busunlock(desc, flags);
1082}
1083EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data);
1084
1085void
1086irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
1087                              irq_flow_handler_t handle, const char *name)
1088{
1089        irq_set_chip(irq, chip);
1090        __irq_set_handler(irq, handle, 0, name);
1091}
1092EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
1093
1094void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
1095{
1096        unsigned long flags, trigger, tmp;
1097        struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
1098
1099        if (!desc)
1100                return;
1101
1102        /*
1103         * Warn when a driver sets the no autoenable flag on an already
1104         * active interrupt.
1105         */
1106        WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN));
1107
1108        irq_settings_clr_and_set(desc, clr, set);
1109
1110        trigger = irqd_get_trigger_type(&desc->irq_data);
1111
1112        irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
1113                   IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
1114        if (irq_settings_has_no_balance_set(desc))
1115                irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1116        if (irq_settings_is_per_cpu(desc))
1117                irqd_set(&desc->irq_data, IRQD_PER_CPU);
1118        if (irq_settings_can_move_pcntxt(desc))
1119                irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
1120        if (irq_settings_is_level(desc))
1121                irqd_set(&desc->irq_data, IRQD_LEVEL);
1122
1123        tmp = irq_settings_get_trigger_mask(desc);
1124        if (tmp != IRQ_TYPE_NONE)
1125                trigger = tmp;
1126
1127        irqd_set(&desc->irq_data, trigger);
1128
1129        irq_put_desc_unlock(desc, flags);
1130}
1131EXPORT_SYMBOL_GPL(irq_modify_status);
1132
1133/**
1134 *      irq_cpu_online - Invoke all irq_cpu_online functions.
1135 *
1136 *      Iterate through all irqs and invoke the chip.irq_cpu_online()
1137 *      for each.
1138 */
1139void irq_cpu_online(void)
1140{
1141        struct irq_desc *desc;
1142        struct irq_chip *chip;
1143        unsigned long flags;
1144        unsigned int irq;
1145
1146        for_each_active_irq(irq) {
1147                desc = irq_to_desc(irq);
1148                if (!desc)
1149                        continue;
1150
1151                raw_spin_lock_irqsave(&desc->lock, flags);
1152
1153                chip = irq_data_get_irq_chip(&desc->irq_data);
1154                if (chip && chip->irq_cpu_online &&
1155                    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
1156                     !irqd_irq_disabled(&desc->irq_data)))
1157                        chip->irq_cpu_online(&desc->irq_data);
1158
1159                raw_spin_unlock_irqrestore(&desc->lock, flags);
1160        }
1161}
1162
1163/**
1164 *      irq_cpu_offline - Invoke all irq_cpu_offline functions.
1165 *
1166 *      Iterate through all irqs and invoke the chip.irq_cpu_offline()
1167 *      for each.
1168 */
1169void irq_cpu_offline(void)
1170{
1171        struct irq_desc *desc;
1172        struct irq_chip *chip;
1173        unsigned long flags;
1174        unsigned int irq;
1175
1176        for_each_active_irq(irq) {
1177                desc = irq_to_desc(irq);
1178                if (!desc)
1179                        continue;
1180
1181                raw_spin_lock_irqsave(&desc->lock, flags);
1182
1183                chip = irq_data_get_irq_chip(&desc->irq_data);
1184                if (chip && chip->irq_cpu_offline &&
1185                    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
1186                     !irqd_irq_disabled(&desc->irq_data)))
1187                        chip->irq_cpu_offline(&desc->irq_data);
1188
1189                raw_spin_unlock_irqrestore(&desc->lock, flags);
1190        }
1191}
1192
1193#ifdef  CONFIG_IRQ_DOMAIN_HIERARCHY
1194
1195#ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS
1196/**
1197 *      handle_fasteoi_ack_irq - irq handler for edge hierarchy
1198 *      stacked on transparent controllers
1199 *
1200 *      @desc:  the interrupt description structure for this irq
1201 *
1202 *      Like handle_fasteoi_irq(), but for use with hierarchy where
1203 *      the irq_chip also needs to have its ->irq_ack() function
1204 *      called.
1205 */
1206void handle_fasteoi_ack_irq(struct irq_desc *desc)
1207{
1208        struct irq_chip *chip = desc->irq_data.chip;
1209
1210        raw_spin_lock(&desc->lock);
1211
1212        if (!irq_may_run(desc))
1213                goto out;
1214
1215        desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
1216
1217        /*
1218         * If its disabled or no action available
1219         * then mask it and get out of here:
1220         */
1221        if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
1222                desc->istate |= IRQS_PENDING;
1223                mask_irq(desc);
1224                goto out;
1225        }
1226
1227        kstat_incr_irqs_this_cpu(desc);
1228        if (desc->istate & IRQS_ONESHOT)
1229                mask_irq(desc);
1230
1231        /* Start handling the irq */
1232        desc->irq_data.chip->irq_ack(&desc->irq_data);
1233
1234        preflow_handler(desc);
1235        handle_irq_event(desc);
1236
1237        cond_unmask_eoi_irq(desc, chip);
1238
1239        raw_spin_unlock(&desc->lock);
1240        return;
1241out:
1242        if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
1243                chip->irq_eoi(&desc->irq_data);
1244        raw_spin_unlock(&desc->lock);
1245}
1246EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq);
1247
1248/**
1249 *      handle_fasteoi_mask_irq - irq handler for level hierarchy
1250 *      stacked on transparent controllers
1251 *
1252 *      @desc:  the interrupt description structure for this irq
1253 *
1254 *      Like handle_fasteoi_irq(), but for use with hierarchy where
1255 *      the irq_chip also needs to have its ->irq_mask_ack() function
1256 *      called.
1257 */
1258void handle_fasteoi_mask_irq(struct irq_desc *desc)
1259{
1260        struct irq_chip *chip = desc->irq_data.chip;
1261
1262        raw_spin_lock(&desc->lock);
1263        mask_ack_irq(desc);
1264
1265        if (!irq_may_run(desc))
1266                goto out;
1267
1268        desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
1269
1270        /*
1271         * If its disabled or no action available
1272         * then mask it and get out of here:
1273         */
1274        if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
1275                desc->istate |= IRQS_PENDING;
1276                mask_irq(desc);
1277                goto out;
1278        }
1279
1280        kstat_incr_irqs_this_cpu(desc);
1281        if (desc->istate & IRQS_ONESHOT)
1282                mask_irq(desc);
1283
1284        preflow_handler(desc);
1285        handle_irq_event(desc);
1286
1287        cond_unmask_eoi_irq(desc, chip);
1288
1289        raw_spin_unlock(&desc->lock);
1290        return;
1291out:
1292        if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
1293                chip->irq_eoi(&desc->irq_data);
1294        raw_spin_unlock(&desc->lock);
1295}
1296EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq);
1297
1298#endif /* CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS */
1299
1300/**
1301 * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if
1302 * NULL)
1303 * @data:       Pointer to interrupt specific data
1304 */
1305void irq_chip_enable_parent(struct irq_data *data)
1306{
1307        data = data->parent_data;
1308        if (data->chip->irq_enable)
1309                data->chip->irq_enable(data);
1310        else
1311                data->chip->irq_unmask(data);
1312}
1313EXPORT_SYMBOL_GPL(irq_chip_enable_parent);
1314
1315/**
1316 * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if
1317 * NULL)
1318 * @data:       Pointer to interrupt specific data
1319 */
1320void irq_chip_disable_parent(struct irq_data *data)
1321{
1322        data = data->parent_data;
1323        if (data->chip->irq_disable)
1324                data->chip->irq_disable(data);
1325        else
1326                data->chip->irq_mask(data);
1327}
1328EXPORT_SYMBOL_GPL(irq_chip_disable_parent);
1329
1330/**
1331 * irq_chip_ack_parent - Acknowledge the parent interrupt
1332 * @data:       Pointer to interrupt specific data
1333 */
1334void irq_chip_ack_parent(struct irq_data *data)
1335{
1336        data = data->parent_data;
1337        data->chip->irq_ack(data);
1338}
1339EXPORT_SYMBOL_GPL(irq_chip_ack_parent);
1340
1341/**
1342 * irq_chip_mask_parent - Mask the parent interrupt
1343 * @data:       Pointer to interrupt specific data
1344 */
1345void irq_chip_mask_parent(struct irq_data *data)
1346{
1347        data = data->parent_data;
1348        data->chip->irq_mask(data);
1349}
1350EXPORT_SYMBOL_GPL(irq_chip_mask_parent);
1351
1352/**
1353 * irq_chip_mask_ack_parent - Mask and acknowledge the parent interrupt
1354 * @data:       Pointer to interrupt specific data
1355 */
1356void irq_chip_mask_ack_parent(struct irq_data *data)
1357{
1358        data = data->parent_data;
1359        data->chip->irq_mask_ack(data);
1360}
1361EXPORT_SYMBOL_GPL(irq_chip_mask_ack_parent);
1362
1363/**
1364 * irq_chip_unmask_parent - Unmask the parent interrupt
1365 * @data:       Pointer to interrupt specific data
1366 */
1367void irq_chip_unmask_parent(struct irq_data *data)
1368{
1369        data = data->parent_data;
1370        data->chip->irq_unmask(data);
1371}
1372EXPORT_SYMBOL_GPL(irq_chip_unmask_parent);
1373
1374/**
1375 * irq_chip_eoi_parent - Invoke EOI on the parent interrupt
1376 * @data:       Pointer to interrupt specific data
1377 */
1378void irq_chip_eoi_parent(struct irq_data *data)
1379{
1380        data = data->parent_data;
1381        data->chip->irq_eoi(data);
1382}
1383EXPORT_SYMBOL_GPL(irq_chip_eoi_parent);
1384
1385/**
1386 * irq_chip_set_affinity_parent - Set affinity on the parent interrupt
1387 * @data:       Pointer to interrupt specific data
1388 * @dest:       The affinity mask to set
1389 * @force:      Flag to enforce setting (disable online checks)
1390 *
1391 * Conditinal, as the underlying parent chip might not implement it.
1392 */
1393int irq_chip_set_affinity_parent(struct irq_data *data,
1394                                 const struct cpumask *dest, bool force)
1395{
1396        data = data->parent_data;
1397        if (data->chip->irq_set_affinity)
1398                return data->chip->irq_set_affinity(data, dest, force);
1399
1400        return -ENOSYS;
1401}
1402EXPORT_SYMBOL_GPL(irq_chip_set_affinity_parent);
1403
1404/**
1405 * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
1406 * @data:       Pointer to interrupt specific data
1407 * @type:       IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
1408 *
1409 * Conditional, as the underlying parent chip might not implement it.
1410 */
1411int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
1412{
1413        data = data->parent_data;
1414
1415        if (data->chip->irq_set_type)
1416                return data->chip->irq_set_type(data, type);
1417
1418        return -ENOSYS;
1419}
1420EXPORT_SYMBOL_GPL(irq_chip_set_type_parent);
1421
1422/**
1423 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
1424 * @data:       Pointer to interrupt specific data
1425 *
1426 * Iterate through the domain hierarchy of the interrupt and check
1427 * whether a hw retrigger function exists. If yes, invoke it.
1428 */
1429int irq_chip_retrigger_hierarchy(struct irq_data *data)
1430{
1431        for (data = data->parent_data; data; data = data->parent_data)
1432                if (data->chip && data->chip->irq_retrigger)
1433                        return data->chip->irq_retrigger(data);
1434
1435        return 0;
1436}
1437
1438/**
1439 * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt
1440 * @data:       Pointer to interrupt specific data
1441 * @vcpu_info:  The vcpu affinity information
1442 */
1443int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
1444{
1445        data = data->parent_data;
1446        if (data->chip->irq_set_vcpu_affinity)
1447                return data->chip->irq_set_vcpu_affinity(data, vcpu_info);
1448
1449        return -ENOSYS;
1450}
1451
1452/**
1453 * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt
1454 * @data:       Pointer to interrupt specific data
1455 * @on:         Whether to set or reset the wake-up capability of this irq
1456 *
1457 * Conditional, as the underlying parent chip might not implement it.
1458 */
1459int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
1460{
1461        data = data->parent_data;
1462
1463        if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE)
1464                return 0;
1465
1466        if (data->chip->irq_set_wake)
1467                return data->chip->irq_set_wake(data, on);
1468
1469        return -ENOSYS;
1470}
1471EXPORT_SYMBOL_GPL(irq_chip_set_wake_parent);
1472
1473/**
1474 * irq_chip_request_resources_parent - Request resources on the parent interrupt
1475 * @data:       Pointer to interrupt specific data
1476 */
1477int irq_chip_request_resources_parent(struct irq_data *data)
1478{
1479        data = data->parent_data;
1480
1481        if (data->chip->irq_request_resources)
1482                return data->chip->irq_request_resources(data);
1483
1484        return -ENOSYS;
1485}
1486EXPORT_SYMBOL_GPL(irq_chip_request_resources_parent);
1487
1488/**
1489 * irq_chip_release_resources_parent - Release resources on the parent interrupt
1490 * @data:       Pointer to interrupt specific data
1491 */
1492void irq_chip_release_resources_parent(struct irq_data *data)
1493{
1494        data = data->parent_data;
1495        if (data->chip->irq_release_resources)
1496                data->chip->irq_release_resources(data);
1497}
1498EXPORT_SYMBOL_GPL(irq_chip_release_resources_parent);
1499#endif
1500
1501/**
1502 * irq_chip_compose_msi_msg - Componse msi message for a irq chip
1503 * @data:       Pointer to interrupt specific data
1504 * @msg:        Pointer to the MSI message
1505 *
1506 * For hierarchical domains we find the first chip in the hierarchy
1507 * which implements the irq_compose_msi_msg callback. For non
1508 * hierarchical we use the top level chip.
1509 */
1510int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1511{
1512        struct irq_data *pos = NULL;
1513
1514#ifdef  CONFIG_IRQ_DOMAIN_HIERARCHY
1515        for (; data; data = data->parent_data)
1516#endif
1517                if (data->chip && data->chip->irq_compose_msi_msg)
1518                        pos = data;
1519        if (!pos)
1520                return -ENOSYS;
1521
1522        pos->chip->irq_compose_msi_msg(pos, msg);
1523
1524        return 0;
1525}
1526
1527/**
1528 * irq_chip_pm_get - Enable power for an IRQ chip
1529 * @data:       Pointer to interrupt specific data
1530 *
1531 * Enable the power to the IRQ chip referenced by the interrupt data
1532 * structure.
1533 */
1534int irq_chip_pm_get(struct irq_data *data)
1535{
1536        int retval;
1537
1538        if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device) {
1539                retval = pm_runtime_get_sync(data->chip->parent_device);
1540                if (retval < 0) {
1541                        pm_runtime_put_noidle(data->chip->parent_device);
1542                        return retval;
1543                }
1544        }
1545
1546        return 0;
1547}
1548
1549/**
1550 * irq_chip_pm_put - Disable power for an IRQ chip
1551 * @data:       Pointer to interrupt specific data
1552 *
1553 * Disable the power to the IRQ chip referenced by the interrupt data
1554 * structure, belongs. Note that power will only be disabled, once this
1555 * function has been called for all IRQs that have called irq_chip_pm_get().
1556 */
1557int irq_chip_pm_put(struct irq_data *data)
1558{
1559        int retval = 0;
1560
1561        if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device)
1562                retval = pm_runtime_put(data->chip->parent_device);
1563
1564        return (retval < 0) ? retval : 0;
1565}
1566