linux/kernel/irq/chip.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
   4 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
   5 *
   6 * This file contains the core interrupt handling code, for irq-chip based
   7 * architectures. Detailed information is available in
   8 * Documentation/core-api/genericirq.rst
   9 */
  10
  11#include <linux/irq.h>
  12#include <linux/msi.h>
  13#include <linux/module.h>
  14#include <linux/interrupt.h>
  15#include <linux/kernel_stat.h>
  16#include <linux/irqdomain.h>
  17
  18#include <trace/events/irq.h>
  19
  20#include "internals.h"
  21
  22static irqreturn_t bad_chained_irq(int irq, void *dev_id)
  23{
  24        WARN_ONCE(1, "Chained irq %d should not call an action\n", irq);
  25        return IRQ_NONE;
  26}
  27
  28/*
  29 * Chained handlers should never call action on their IRQ. This default
  30 * action will emit warning if such thing happens.
  31 */
  32struct irqaction chained_action = {
  33        .handler = bad_chained_irq,
  34};
  35
  36/**
  37 *      irq_set_chip - set the irq chip for an irq
  38 *      @irq:   irq number
  39 *      @chip:  pointer to irq chip description structure
  40 */
  41int irq_set_chip(unsigned int irq, struct irq_chip *chip)
  42{
  43        unsigned long flags;
  44        struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
  45
  46        if (!desc)
  47                return -EINVAL;
  48
  49        if (!chip)
  50                chip = &no_irq_chip;
  51
  52        desc->irq_data.chip = chip;
  53        irq_put_desc_unlock(desc, flags);
  54        /*
  55         * For !CONFIG_SPARSE_IRQ make the irq show up in
  56         * allocated_irqs.
  57         */
  58        irq_mark_irq(irq);
  59        return 0;
  60}
  61EXPORT_SYMBOL(irq_set_chip);
  62
  63/**
  64 *      irq_set_type - set the irq trigger type for an irq
  65 *      @irq:   irq number
  66 *      @type:  IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
  67 */
  68int irq_set_irq_type(unsigned int irq, unsigned int type)
  69{
  70        unsigned long flags;
  71        struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
  72        int ret = 0;
  73
  74        if (!desc)
  75                return -EINVAL;
  76
  77        ret = __irq_set_trigger(desc, type);
  78        irq_put_desc_busunlock(desc, flags);
  79        return ret;
  80}
  81EXPORT_SYMBOL(irq_set_irq_type);
  82
  83/**
  84 *      irq_set_handler_data - set irq handler data for an irq
  85 *      @irq:   Interrupt number
  86 *      @data:  Pointer to interrupt specific data
  87 *
  88 *      Set the hardware irq controller data for an irq
  89 */
  90int irq_set_handler_data(unsigned int irq, void *data)
  91{
  92        unsigned long flags;
  93        struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
  94
  95        if (!desc)
  96                return -EINVAL;
  97        desc->irq_common_data.handler_data = data;
  98        irq_put_desc_unlock(desc, flags);
  99        return 0;
 100}
 101EXPORT_SYMBOL(irq_set_handler_data);
 102
 103/**
 104 *      irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
 105 *      @irq_base:      Interrupt number base
 106 *      @irq_offset:    Interrupt number offset
 107 *      @entry:         Pointer to MSI descriptor data
 108 *
 109 *      Set the MSI descriptor entry for an irq at offset
 110 */
 111int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
 112                         struct msi_desc *entry)
 113{
 114        unsigned long flags;
 115        struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 116
 117        if (!desc)
 118                return -EINVAL;
 119        desc->irq_common_data.msi_desc = entry;
 120        if (entry && !irq_offset)
 121                entry->irq = irq_base;
 122        irq_put_desc_unlock(desc, flags);
 123        return 0;
 124}
 125
 126/**
 127 *      irq_set_msi_desc - set MSI descriptor data for an irq
 128 *      @irq:   Interrupt number
 129 *      @entry: Pointer to MSI descriptor data
 130 *
 131 *      Set the MSI descriptor entry for an irq
 132 */
 133int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
 134{
 135        return irq_set_msi_desc_off(irq, 0, entry);
 136}
 137
 138/**
 139 *      irq_set_chip_data - set irq chip data for an irq
 140 *      @irq:   Interrupt number
 141 *      @data:  Pointer to chip specific data
 142 *
 143 *      Set the hardware irq chip data for an irq
 144 */
 145int irq_set_chip_data(unsigned int irq, void *data)
 146{
 147        unsigned long flags;
 148        struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
 149
 150        if (!desc)
 151                return -EINVAL;
 152        desc->irq_data.chip_data = data;
 153        irq_put_desc_unlock(desc, flags);
 154        return 0;
 155}
 156EXPORT_SYMBOL(irq_set_chip_data);
 157
 158struct irq_data *irq_get_irq_data(unsigned int irq)
 159{
 160        struct irq_desc *desc = irq_to_desc(irq);
 161
 162        return desc ? &desc->irq_data : NULL;
 163}
 164EXPORT_SYMBOL_GPL(irq_get_irq_data);
 165
 166static void irq_state_clr_disabled(struct irq_desc *desc)
 167{
 168        irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
 169}
 170
 171static void irq_state_clr_masked(struct irq_desc *desc)
 172{
 173        irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
 174}
 175
 176static void irq_state_clr_started(struct irq_desc *desc)
 177{
 178        irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED);
 179}
 180
 181static void irq_state_set_started(struct irq_desc *desc)
 182{
 183        irqd_set(&desc->irq_data, IRQD_IRQ_STARTED);
 184}
 185
 186enum {
 187        IRQ_STARTUP_NORMAL,
 188        IRQ_STARTUP_MANAGED,
 189        IRQ_STARTUP_ABORT,
 190};
 191
 192#ifdef CONFIG_SMP
 193static int
 194__irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
 195{
 196        struct irq_data *d = irq_desc_get_irq_data(desc);
 197
 198        if (!irqd_affinity_is_managed(d))
 199                return IRQ_STARTUP_NORMAL;
 200
 201        irqd_clr_managed_shutdown(d);
 202
 203        if (cpumask_any_and(aff, cpu_online_mask) >= nr_cpu_ids) {
 204                /*
 205                 * Catch code which fiddles with enable_irq() on a managed
 206                 * and potentially shutdown IRQ. Chained interrupt
 207                 * installment or irq auto probing should not happen on
 208                 * managed irqs either.
 209                 */
 210                if (WARN_ON_ONCE(force))
 211                        return IRQ_STARTUP_ABORT;
 212                /*
 213                 * The interrupt was requested, but there is no online CPU
 214                 * in it's affinity mask. Put it into managed shutdown
 215                 * state and let the cpu hotplug mechanism start it up once
 216                 * a CPU in the mask becomes available.
 217                 */
 218                return IRQ_STARTUP_ABORT;
 219        }
 220        /*
 221         * Managed interrupts have reserved resources, so this should not
 222         * happen.
 223         */
 224        if (WARN_ON(irq_domain_activate_irq(d, false)))
 225                return IRQ_STARTUP_ABORT;
 226        return IRQ_STARTUP_MANAGED;
 227}
 228#else
 229static __always_inline int
 230__irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
 231{
 232        return IRQ_STARTUP_NORMAL;
 233}
 234#endif
 235
 236static int __irq_startup(struct irq_desc *desc)
 237{
 238        struct irq_data *d = irq_desc_get_irq_data(desc);
 239        int ret = 0;
 240
 241        /* Warn if this interrupt is not activated but try nevertheless */
 242        WARN_ON_ONCE(!irqd_is_activated(d));
 243
 244        if (d->chip->irq_startup) {
 245                ret = d->chip->irq_startup(d);
 246                irq_state_clr_disabled(desc);
 247                irq_state_clr_masked(desc);
 248        } else {
 249                irq_enable(desc);
 250        }
 251        irq_state_set_started(desc);
 252        return ret;
 253}
 254
 255int irq_startup(struct irq_desc *desc, bool resend, bool force)
 256{
 257        struct irq_data *d = irq_desc_get_irq_data(desc);
 258        struct cpumask *aff = irq_data_get_affinity_mask(d);
 259        int ret = 0;
 260
 261        desc->depth = 0;
 262
 263        if (irqd_is_started(d)) {
 264                irq_enable(desc);
 265        } else {
 266                switch (__irq_startup_managed(desc, aff, force)) {
 267                case IRQ_STARTUP_NORMAL:
 268                        ret = __irq_startup(desc);
 269                        irq_setup_affinity(desc);
 270                        break;
 271                case IRQ_STARTUP_MANAGED:
 272                        irq_do_set_affinity(d, aff, false);
 273                        ret = __irq_startup(desc);
 274                        break;
 275                case IRQ_STARTUP_ABORT:
 276                        irqd_set_managed_shutdown(d);
 277                        return 0;
 278                }
 279        }
 280        if (resend)
 281                check_irq_resend(desc);
 282
 283        return ret;
 284}
 285
 286int irq_activate(struct irq_desc *desc)
 287{
 288        struct irq_data *d = irq_desc_get_irq_data(desc);
 289
 290        if (!irqd_affinity_is_managed(d))
 291                return irq_domain_activate_irq(d, false);
 292        return 0;
 293}
 294
 295int irq_activate_and_startup(struct irq_desc *desc, bool resend)
 296{
 297        if (WARN_ON(irq_activate(desc)))
 298                return 0;
 299        return irq_startup(desc, resend, IRQ_START_FORCE);
 300}
 301
 302static void __irq_disable(struct irq_desc *desc, bool mask);
 303
 304void irq_shutdown(struct irq_desc *desc)
 305{
 306        if (irqd_is_started(&desc->irq_data)) {
 307                desc->depth = 1;
 308                if (desc->irq_data.chip->irq_shutdown) {
 309                        desc->irq_data.chip->irq_shutdown(&desc->irq_data);
 310                        irq_state_set_disabled(desc);
 311                        irq_state_set_masked(desc);
 312                } else {
 313                        __irq_disable(desc, true);
 314                }
 315                irq_state_clr_started(desc);
 316        }
 317        /*
 318         * This must be called even if the interrupt was never started up,
 319         * because the activation can happen before the interrupt is
 320         * available for request/startup. It has it's own state tracking so
 321         * it's safe to call it unconditionally.
 322         */
 323        irq_domain_deactivate_irq(&desc->irq_data);
 324}
 325
 326void irq_enable(struct irq_desc *desc)
 327{
 328        if (!irqd_irq_disabled(&desc->irq_data)) {
 329                unmask_irq(desc);
 330        } else {
 331                irq_state_clr_disabled(desc);
 332                if (desc->irq_data.chip->irq_enable) {
 333                        desc->irq_data.chip->irq_enable(&desc->irq_data);
 334                        irq_state_clr_masked(desc);
 335                } else {
 336                        unmask_irq(desc);
 337                }
 338        }
 339}
 340
 341static void __irq_disable(struct irq_desc *desc, bool mask)
 342{
 343        if (irqd_irq_disabled(&desc->irq_data)) {
 344                if (mask)
 345                        mask_irq(desc);
 346        } else {
 347                irq_state_set_disabled(desc);
 348                if (desc->irq_data.chip->irq_disable) {
 349                        desc->irq_data.chip->irq_disable(&desc->irq_data);
 350                        irq_state_set_masked(desc);
 351                } else if (mask) {
 352                        mask_irq(desc);
 353                }
 354        }
 355}
 356
 357/**
 358 * irq_disable - Mark interrupt disabled
 359 * @desc:       irq descriptor which should be disabled
 360 *
 361 * If the chip does not implement the irq_disable callback, we
 362 * use a lazy disable approach. That means we mark the interrupt
 363 * disabled, but leave the hardware unmasked. That's an
 364 * optimization because we avoid the hardware access for the
 365 * common case where no interrupt happens after we marked it
 366 * disabled. If an interrupt happens, then the interrupt flow
 367 * handler masks the line at the hardware level and marks it
 368 * pending.
 369 *
 370 * If the interrupt chip does not implement the irq_disable callback,
 371 * a driver can disable the lazy approach for a particular irq line by
 372 * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can
 373 * be used for devices which cannot disable the interrupt at the
 374 * device level under certain circumstances and have to use
 375 * disable_irq[_nosync] instead.
 376 */
 377void irq_disable(struct irq_desc *desc)
 378{
 379        __irq_disable(desc, irq_settings_disable_unlazy(desc));
 380}
 381
 382void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
 383{
 384        if (desc->irq_data.chip->irq_enable)
 385                desc->irq_data.chip->irq_enable(&desc->irq_data);
 386        else
 387                desc->irq_data.chip->irq_unmask(&desc->irq_data);
 388        cpumask_set_cpu(cpu, desc->percpu_enabled);
 389}
 390
 391void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
 392{
 393        if (desc->irq_data.chip->irq_disable)
 394                desc->irq_data.chip->irq_disable(&desc->irq_data);
 395        else
 396                desc->irq_data.chip->irq_mask(&desc->irq_data);
 397        cpumask_clear_cpu(cpu, desc->percpu_enabled);
 398}
 399
 400static inline void mask_ack_irq(struct irq_desc *desc)
 401{
 402        if (desc->irq_data.chip->irq_mask_ack) {
 403                desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
 404                irq_state_set_masked(desc);
 405        } else {
 406                mask_irq(desc);
 407                if (desc->irq_data.chip->irq_ack)
 408                        desc->irq_data.chip->irq_ack(&desc->irq_data);
 409        }
 410}
 411
 412void mask_irq(struct irq_desc *desc)
 413{
 414        if (irqd_irq_masked(&desc->irq_data))
 415                return;
 416
 417        if (desc->irq_data.chip->irq_mask) {
 418                desc->irq_data.chip->irq_mask(&desc->irq_data);
 419                irq_state_set_masked(desc);
 420        }
 421}
 422
 423void unmask_irq(struct irq_desc *desc)
 424{
 425        if (!irqd_irq_masked(&desc->irq_data))
 426                return;
 427
 428        if (desc->irq_data.chip->irq_unmask) {
 429                desc->irq_data.chip->irq_unmask(&desc->irq_data);
 430                irq_state_clr_masked(desc);
 431        }
 432}
 433
 434void unmask_threaded_irq(struct irq_desc *desc)
 435{
 436        struct irq_chip *chip = desc->irq_data.chip;
 437
 438        if (chip->flags & IRQCHIP_EOI_THREADED)
 439                chip->irq_eoi(&desc->irq_data);
 440
 441        unmask_irq(desc);
 442}
 443
 444/*
 445 *      handle_nested_irq - Handle a nested irq from a irq thread
 446 *      @irq:   the interrupt number
 447 *
 448 *      Handle interrupts which are nested into a threaded interrupt
 449 *      handler. The handler function is called inside the calling
 450 *      threads context.
 451 */
 452void handle_nested_irq(unsigned int irq)
 453{
 454        struct irq_desc *desc = irq_to_desc(irq);
 455        struct irqaction *action;
 456        irqreturn_t action_ret;
 457
 458        might_sleep();
 459
 460        raw_spin_lock_irq(&desc->lock);
 461
 462        desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 463
 464        action = desc->action;
 465        if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
 466                desc->istate |= IRQS_PENDING;
 467                goto out_unlock;
 468        }
 469
 470        kstat_incr_irqs_this_cpu(desc);
 471        irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
 472        raw_spin_unlock_irq(&desc->lock);
 473
 474        action_ret = IRQ_NONE;
 475        for_each_action_of_desc(desc, action)
 476                action_ret |= action->thread_fn(action->irq, action->dev_id);
 477
 478        if (!noirqdebug)
 479                note_interrupt(desc, action_ret);
 480
 481        raw_spin_lock_irq(&desc->lock);
 482        irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
 483
 484out_unlock:
 485        raw_spin_unlock_irq(&desc->lock);
 486}
 487EXPORT_SYMBOL_GPL(handle_nested_irq);
 488
 489static bool irq_check_poll(struct irq_desc *desc)
 490{
 491        if (!(desc->istate & IRQS_POLL_INPROGRESS))
 492                return false;
 493        return irq_wait_for_poll(desc);
 494}
 495
 496static bool irq_may_run(struct irq_desc *desc)
 497{
 498        unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED;
 499
 500        /*
 501         * If the interrupt is not in progress and is not an armed
 502         * wakeup interrupt, proceed.
 503         */
 504        if (!irqd_has_set(&desc->irq_data, mask))
 505                return true;
 506
 507        /*
 508         * If the interrupt is an armed wakeup source, mark it pending
 509         * and suspended, disable it and notify the pm core about the
 510         * event.
 511         */
 512        if (irq_pm_check_wakeup(desc))
 513                return false;
 514
 515        /*
 516         * Handle a potential concurrent poll on a different core.
 517         */
 518        return irq_check_poll(desc);
 519}
 520
 521/**
 522 *      handle_simple_irq - Simple and software-decoded IRQs.
 523 *      @desc:  the interrupt description structure for this irq
 524 *
 525 *      Simple interrupts are either sent from a demultiplexing interrupt
 526 *      handler or come from hardware, where no interrupt hardware control
 527 *      is necessary.
 528 *
 529 *      Note: The caller is expected to handle the ack, clear, mask and
 530 *      unmask issues if necessary.
 531 */
 532void handle_simple_irq(struct irq_desc *desc)
 533{
 534        raw_spin_lock(&desc->lock);
 535
 536        if (!irq_may_run(desc))
 537                goto out_unlock;
 538
 539        desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 540
 541        if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
 542                desc->istate |= IRQS_PENDING;
 543                goto out_unlock;
 544        }
 545
 546        kstat_incr_irqs_this_cpu(desc);
 547        handle_irq_event(desc);
 548
 549out_unlock:
 550        raw_spin_unlock(&desc->lock);
 551}
 552EXPORT_SYMBOL_GPL(handle_simple_irq);
 553
 554/**
 555 *      handle_untracked_irq - Simple and software-decoded IRQs.
 556 *      @desc:  the interrupt description structure for this irq
 557 *
 558 *      Untracked interrupts are sent from a demultiplexing interrupt
 559 *      handler when the demultiplexer does not know which device it its
 560 *      multiplexed irq domain generated the interrupt. IRQ's handled
 561 *      through here are not subjected to stats tracking, randomness, or
 562 *      spurious interrupt detection.
 563 *
 564 *      Note: Like handle_simple_irq, the caller is expected to handle
 565 *      the ack, clear, mask and unmask issues if necessary.
 566 */
 567void handle_untracked_irq(struct irq_desc *desc)
 568{
 569        unsigned int flags = 0;
 570
 571        raw_spin_lock(&desc->lock);
 572
 573        if (!irq_may_run(desc))
 574                goto out_unlock;
 575
 576        desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 577
 578        if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
 579                desc->istate |= IRQS_PENDING;
 580                goto out_unlock;
 581        }
 582
 583        desc->istate &= ~IRQS_PENDING;
 584        irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
 585        raw_spin_unlock(&desc->lock);
 586
 587        __handle_irq_event_percpu(desc, &flags);
 588
 589        raw_spin_lock(&desc->lock);
 590        irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
 591
 592out_unlock:
 593        raw_spin_unlock(&desc->lock);
 594}
 595EXPORT_SYMBOL_GPL(handle_untracked_irq);
 596
 597/*
 598 * Called unconditionally from handle_level_irq() and only for oneshot
 599 * interrupts from handle_fasteoi_irq()
 600 */
 601static void cond_unmask_irq(struct irq_desc *desc)
 602{
 603        /*
 604         * We need to unmask in the following cases:
 605         * - Standard level irq (IRQF_ONESHOT is not set)
 606         * - Oneshot irq which did not wake the thread (caused by a
 607         *   spurious interrupt or a primary handler handling it
 608         *   completely).
 609         */
 610        if (!irqd_irq_disabled(&desc->irq_data) &&
 611            irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
 612                unmask_irq(desc);
 613}
 614
 615/**
 616 *      handle_level_irq - Level type irq handler
 617 *      @desc:  the interrupt description structure for this irq
 618 *
 619 *      Level type interrupts are active as long as the hardware line has
 620 *      the active level. This may require to mask the interrupt and unmask
 621 *      it after the associated handler has acknowledged the device, so the
 622 *      interrupt line is back to inactive.
 623 */
 624void handle_level_irq(struct irq_desc *desc)
 625{
 626        raw_spin_lock(&desc->lock);
 627        mask_ack_irq(desc);
 628
 629        if (!irq_may_run(desc))
 630                goto out_unlock;
 631
 632        desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 633
 634        /*
 635         * If its disabled or no action available
 636         * keep it masked and get out of here
 637         */
 638        if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
 639                desc->istate |= IRQS_PENDING;
 640                goto out_unlock;
 641        }
 642
 643        kstat_incr_irqs_this_cpu(desc);
 644        handle_irq_event(desc);
 645
 646        cond_unmask_irq(desc);
 647
 648out_unlock:
 649        raw_spin_unlock(&desc->lock);
 650}
 651EXPORT_SYMBOL_GPL(handle_level_irq);
 652
 653#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
 654static inline void preflow_handler(struct irq_desc *desc)
 655{
 656        if (desc->preflow_handler)
 657                desc->preflow_handler(&desc->irq_data);
 658}
 659#else
 660static inline void preflow_handler(struct irq_desc *desc) { }
 661#endif
 662
 663static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
 664{
 665        if (!(desc->istate & IRQS_ONESHOT)) {
 666                chip->irq_eoi(&desc->irq_data);
 667                return;
 668        }
 669        /*
 670         * We need to unmask in the following cases:
 671         * - Oneshot irq which did not wake the thread (caused by a
 672         *   spurious interrupt or a primary handler handling it
 673         *   completely).
 674         */
 675        if (!irqd_irq_disabled(&desc->irq_data) &&
 676            irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
 677                chip->irq_eoi(&desc->irq_data);
 678                unmask_irq(desc);
 679        } else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
 680                chip->irq_eoi(&desc->irq_data);
 681        }
 682}
 683
 684/**
 685 *      handle_fasteoi_irq - irq handler for transparent controllers
 686 *      @desc:  the interrupt description structure for this irq
 687 *
 688 *      Only a single callback will be issued to the chip: an ->eoi()
 689 *      call when the interrupt has been serviced. This enables support
 690 *      for modern forms of interrupt handlers, which handle the flow
 691 *      details in hardware, transparently.
 692 */
 693void handle_fasteoi_irq(struct irq_desc *desc)
 694{
 695        struct irq_chip *chip = desc->irq_data.chip;
 696
 697        raw_spin_lock(&desc->lock);
 698
 699        if (!irq_may_run(desc))
 700                goto out;
 701
 702        desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 703
 704        /*
 705         * If its disabled or no action available
 706         * then mask it and get out of here:
 707         */
 708        if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
 709                desc->istate |= IRQS_PENDING;
 710                mask_irq(desc);
 711                goto out;
 712        }
 713
 714        kstat_incr_irqs_this_cpu(desc);
 715        if (desc->istate & IRQS_ONESHOT)
 716                mask_irq(desc);
 717
 718        preflow_handler(desc);
 719        handle_irq_event(desc);
 720
 721        cond_unmask_eoi_irq(desc, chip);
 722
 723        raw_spin_unlock(&desc->lock);
 724        return;
 725out:
 726        if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
 727                chip->irq_eoi(&desc->irq_data);
 728        raw_spin_unlock(&desc->lock);
 729}
 730EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
 731
 732/**
 733 *      handle_edge_irq - edge type IRQ handler
 734 *      @desc:  the interrupt description structure for this irq
 735 *
 736 *      Interrupt occures on the falling and/or rising edge of a hardware
 737 *      signal. The occurrence is latched into the irq controller hardware
 738 *      and must be acked in order to be reenabled. After the ack another
 739 *      interrupt can happen on the same source even before the first one
 740 *      is handled by the associated event handler. If this happens it
 741 *      might be necessary to disable (mask) the interrupt depending on the
 742 *      controller hardware. This requires to reenable the interrupt inside
 743 *      of the loop which handles the interrupts which have arrived while
 744 *      the handler was running. If all pending interrupts are handled, the
 745 *      loop is left.
 746 */
 747void handle_edge_irq(struct irq_desc *desc)
 748{
 749        raw_spin_lock(&desc->lock);
 750
 751        desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 752
 753        if (!irq_may_run(desc)) {
 754                desc->istate |= IRQS_PENDING;
 755                mask_ack_irq(desc);
 756                goto out_unlock;
 757        }
 758
 759        /*
 760         * If its disabled or no action available then mask it and get
 761         * out of here.
 762         */
 763        if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
 764                desc->istate |= IRQS_PENDING;
 765                mask_ack_irq(desc);
 766                goto out_unlock;
 767        }
 768
 769        kstat_incr_irqs_this_cpu(desc);
 770
 771        /* Start handling the irq */
 772        desc->irq_data.chip->irq_ack(&desc->irq_data);
 773
 774        do {
 775                if (unlikely(!desc->action)) {
 776                        mask_irq(desc);
 777                        goto out_unlock;
 778                }
 779
 780                /*
 781                 * When another irq arrived while we were handling
 782                 * one, we could have masked the irq.
 783                 * Renable it, if it was not disabled in meantime.
 784                 */
 785                if (unlikely(desc->istate & IRQS_PENDING)) {
 786                        if (!irqd_irq_disabled(&desc->irq_data) &&
 787                            irqd_irq_masked(&desc->irq_data))
 788                                unmask_irq(desc);
 789                }
 790
 791                handle_irq_event(desc);
 792
 793        } while ((desc->istate & IRQS_PENDING) &&
 794                 !irqd_irq_disabled(&desc->irq_data));
 795
 796out_unlock:
 797        raw_spin_unlock(&desc->lock);
 798}
 799EXPORT_SYMBOL(handle_edge_irq);
 800
 801#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
 802/**
 803 *      handle_edge_eoi_irq - edge eoi type IRQ handler
 804 *      @desc:  the interrupt description structure for this irq
 805 *
 806 * Similar as the above handle_edge_irq, but using eoi and w/o the
 807 * mask/unmask logic.
 808 */
 809void handle_edge_eoi_irq(struct irq_desc *desc)
 810{
 811        struct irq_chip *chip = irq_desc_get_chip(desc);
 812
 813        raw_spin_lock(&desc->lock);
 814
 815        desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 816
 817        if (!irq_may_run(desc)) {
 818                desc->istate |= IRQS_PENDING;
 819                goto out_eoi;
 820        }
 821
 822        /*
 823         * If its disabled or no action available then mask it and get
 824         * out of here.
 825         */
 826        if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
 827                desc->istate |= IRQS_PENDING;
 828                goto out_eoi;
 829        }
 830
 831        kstat_incr_irqs_this_cpu(desc);
 832
 833        do {
 834                if (unlikely(!desc->action))
 835                        goto out_eoi;
 836
 837                handle_irq_event(desc);
 838
 839        } while ((desc->istate & IRQS_PENDING) &&
 840                 !irqd_irq_disabled(&desc->irq_data));
 841
 842out_eoi:
 843        chip->irq_eoi(&desc->irq_data);
 844        raw_spin_unlock(&desc->lock);
 845}
 846#endif
 847
 848/**
 849 *      handle_percpu_irq - Per CPU local irq handler
 850 *      @desc:  the interrupt description structure for this irq
 851 *
 852 *      Per CPU interrupts on SMP machines without locking requirements
 853 */
 854void handle_percpu_irq(struct irq_desc *desc)
 855{
 856        struct irq_chip *chip = irq_desc_get_chip(desc);
 857
 858        /*
 859         * PER CPU interrupts are not serialized. Do not touch
 860         * desc->tot_count.
 861         */
 862        __kstat_incr_irqs_this_cpu(desc);
 863
 864        if (chip->irq_ack)
 865                chip->irq_ack(&desc->irq_data);
 866
 867        handle_irq_event_percpu(desc);
 868
 869        if (chip->irq_eoi)
 870                chip->irq_eoi(&desc->irq_data);
 871}
 872
 873/**
 874 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
 875 * @desc:       the interrupt description structure for this irq
 876 *
 877 * Per CPU interrupts on SMP machines without locking requirements. Same as
 878 * handle_percpu_irq() above but with the following extras:
 879 *
 880 * action->percpu_dev_id is a pointer to percpu variables which
 881 * contain the real device id for the cpu on which this handler is
 882 * called
 883 */
 884void handle_percpu_devid_irq(struct irq_desc *desc)
 885{
 886        struct irq_chip *chip = irq_desc_get_chip(desc);
 887        struct irqaction *action = desc->action;
 888        unsigned int irq = irq_desc_get_irq(desc);
 889        irqreturn_t res;
 890
 891        /*
 892         * PER CPU interrupts are not serialized. Do not touch
 893         * desc->tot_count.
 894         */
 895        __kstat_incr_irqs_this_cpu(desc);
 896
 897        if (chip->irq_ack)
 898                chip->irq_ack(&desc->irq_data);
 899
 900        if (likely(action)) {
 901                trace_irq_handler_entry(irq, action);
 902                res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
 903                trace_irq_handler_exit(irq, action, res);
 904        } else {
 905                unsigned int cpu = smp_processor_id();
 906                bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
 907
 908                if (enabled)
 909                        irq_percpu_disable(desc, cpu);
 910
 911                pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n",
 912                            enabled ? " and unmasked" : "", irq, cpu);
 913        }
 914
 915        if (chip->irq_eoi)
 916                chip->irq_eoi(&desc->irq_data);
 917}
 918
 919static void
 920__irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
 921                     int is_chained, const char *name)
 922{
 923        if (!handle) {
 924                handle = handle_bad_irq;
 925        } else {
 926                struct irq_data *irq_data = &desc->irq_data;
 927#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
 928                /*
 929                 * With hierarchical domains we might run into a
 930                 * situation where the outermost chip is not yet set
 931                 * up, but the inner chips are there.  Instead of
 932                 * bailing we install the handler, but obviously we
 933                 * cannot enable/startup the interrupt at this point.
 934                 */
 935                while (irq_data) {
 936                        if (irq_data->chip != &no_irq_chip)
 937                                break;
 938                        /*
 939                         * Bail out if the outer chip is not set up
 940                         * and the interrrupt supposed to be started
 941                         * right away.
 942                         */
 943                        if (WARN_ON(is_chained))
 944                                return;
 945                        /* Try the parent */
 946                        irq_data = irq_data->parent_data;
 947                }
 948#endif
 949                if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip))
 950                        return;
 951        }
 952
 953        /* Uninstall? */
 954        if (handle == handle_bad_irq) {
 955                if (desc->irq_data.chip != &no_irq_chip)
 956                        mask_ack_irq(desc);
 957                irq_state_set_disabled(desc);
 958                if (is_chained)
 959                        desc->action = NULL;
 960                desc->depth = 1;
 961        }
 962        desc->handle_irq = handle;
 963        desc->name = name;
 964
 965        if (handle != handle_bad_irq && is_chained) {
 966                unsigned int type = irqd_get_trigger_type(&desc->irq_data);
 967
 968                /*
 969                 * We're about to start this interrupt immediately,
 970                 * hence the need to set the trigger configuration.
 971                 * But the .set_type callback may have overridden the
 972                 * flow handler, ignoring that we're dealing with a
 973                 * chained interrupt. Reset it immediately because we
 974                 * do know better.
 975                 */
 976                if (type != IRQ_TYPE_NONE) {
 977                        __irq_set_trigger(desc, type);
 978                        desc->handle_irq = handle;
 979                }
 980
 981                irq_settings_set_noprobe(desc);
 982                irq_settings_set_norequest(desc);
 983                irq_settings_set_nothread(desc);
 984                desc->action = &chained_action;
 985                irq_activate_and_startup(desc, IRQ_RESEND);
 986        }
 987}
 988
 989void
 990__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
 991                  const char *name)
 992{
 993        unsigned long flags;
 994        struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
 995
 996        if (!desc)
 997                return;
 998
 999        __irq_do_set_handler(desc, handle, is_chained, name);
1000        irq_put_desc_busunlock(desc, flags);
1001}
1002EXPORT_SYMBOL_GPL(__irq_set_handler);
1003
1004void
1005irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
1006                                 void *data)
1007{
1008        unsigned long flags;
1009        struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
1010
1011        if (!desc)
1012                return;
1013
1014        desc->irq_common_data.handler_data = data;
1015        __irq_do_set_handler(desc, handle, 1, NULL);
1016
1017        irq_put_desc_busunlock(desc, flags);
1018}
1019EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data);
1020
1021void
1022irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
1023                              irq_flow_handler_t handle, const char *name)
1024{
1025        irq_set_chip(irq, chip);
1026        __irq_set_handler(irq, handle, 0, name);
1027}
1028EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
1029
1030void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
1031{
1032        unsigned long flags, trigger, tmp;
1033        struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
1034
1035        if (!desc)
1036                return;
1037
1038        /*
1039         * Warn when a driver sets the no autoenable flag on an already
1040         * active interrupt.
1041         */
1042        WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN));
1043
1044        irq_settings_clr_and_set(desc, clr, set);
1045
1046        trigger = irqd_get_trigger_type(&desc->irq_data);
1047
1048        irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
1049                   IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
1050        if (irq_settings_has_no_balance_set(desc))
1051                irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1052        if (irq_settings_is_per_cpu(desc))
1053                irqd_set(&desc->irq_data, IRQD_PER_CPU);
1054        if (irq_settings_can_move_pcntxt(desc))
1055                irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
1056        if (irq_settings_is_level(desc))
1057                irqd_set(&desc->irq_data, IRQD_LEVEL);
1058
1059        tmp = irq_settings_get_trigger_mask(desc);
1060        if (tmp != IRQ_TYPE_NONE)
1061                trigger = tmp;
1062
1063        irqd_set(&desc->irq_data, trigger);
1064
1065        irq_put_desc_unlock(desc, flags);
1066}
1067EXPORT_SYMBOL_GPL(irq_modify_status);
1068
1069/**
1070 *      irq_cpu_online - Invoke all irq_cpu_online functions.
1071 *
1072 *      Iterate through all irqs and invoke the chip.irq_cpu_online()
1073 *      for each.
1074 */
1075void irq_cpu_online(void)
1076{
1077        struct irq_desc *desc;
1078        struct irq_chip *chip;
1079        unsigned long flags;
1080        unsigned int irq;
1081
1082        for_each_active_irq(irq) {
1083                desc = irq_to_desc(irq);
1084                if (!desc)
1085                        continue;
1086
1087                raw_spin_lock_irqsave(&desc->lock, flags);
1088
1089                chip = irq_data_get_irq_chip(&desc->irq_data);
1090                if (chip && chip->irq_cpu_online &&
1091                    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
1092                     !irqd_irq_disabled(&desc->irq_data)))
1093                        chip->irq_cpu_online(&desc->irq_data);
1094
1095                raw_spin_unlock_irqrestore(&desc->lock, flags);
1096        }
1097}
1098
1099/**
1100 *      irq_cpu_offline - Invoke all irq_cpu_offline functions.
1101 *
1102 *      Iterate through all irqs and invoke the chip.irq_cpu_offline()
1103 *      for each.
1104 */
1105void irq_cpu_offline(void)
1106{
1107        struct irq_desc *desc;
1108        struct irq_chip *chip;
1109        unsigned long flags;
1110        unsigned int irq;
1111
1112        for_each_active_irq(irq) {
1113                desc = irq_to_desc(irq);
1114                if (!desc)
1115                        continue;
1116
1117                raw_spin_lock_irqsave(&desc->lock, flags);
1118
1119                chip = irq_data_get_irq_chip(&desc->irq_data);
1120                if (chip && chip->irq_cpu_offline &&
1121                    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
1122                     !irqd_irq_disabled(&desc->irq_data)))
1123                        chip->irq_cpu_offline(&desc->irq_data);
1124
1125                raw_spin_unlock_irqrestore(&desc->lock, flags);
1126        }
1127}
1128
1129#ifdef  CONFIG_IRQ_DOMAIN_HIERARCHY
1130
1131#ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS
1132/**
1133 *      handle_fasteoi_ack_irq - irq handler for edge hierarchy
1134 *      stacked on transparent controllers
1135 *
1136 *      @desc:  the interrupt description structure for this irq
1137 *
1138 *      Like handle_fasteoi_irq(), but for use with hierarchy where
1139 *      the irq_chip also needs to have its ->irq_ack() function
1140 *      called.
1141 */
1142void handle_fasteoi_ack_irq(struct irq_desc *desc)
1143{
1144        struct irq_chip *chip = desc->irq_data.chip;
1145
1146        raw_spin_lock(&desc->lock);
1147
1148        if (!irq_may_run(desc))
1149                goto out;
1150
1151        desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
1152
1153        /*
1154         * If its disabled or no action available
1155         * then mask it and get out of here:
1156         */
1157        if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
1158                desc->istate |= IRQS_PENDING;
1159                mask_irq(desc);
1160                goto out;
1161        }
1162
1163        kstat_incr_irqs_this_cpu(desc);
1164        if (desc->istate & IRQS_ONESHOT)
1165                mask_irq(desc);
1166
1167        /* Start handling the irq */
1168        desc->irq_data.chip->irq_ack(&desc->irq_data);
1169
1170        preflow_handler(desc);
1171        handle_irq_event(desc);
1172
1173        cond_unmask_eoi_irq(desc, chip);
1174
1175        raw_spin_unlock(&desc->lock);
1176        return;
1177out:
1178        if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
1179                chip->irq_eoi(&desc->irq_data);
1180        raw_spin_unlock(&desc->lock);
1181}
1182EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq);
1183
1184/**
1185 *      handle_fasteoi_mask_irq - irq handler for level hierarchy
1186 *      stacked on transparent controllers
1187 *
1188 *      @desc:  the interrupt description structure for this irq
1189 *
1190 *      Like handle_fasteoi_irq(), but for use with hierarchy where
1191 *      the irq_chip also needs to have its ->irq_mask_ack() function
1192 *      called.
1193 */
1194void handle_fasteoi_mask_irq(struct irq_desc *desc)
1195{
1196        struct irq_chip *chip = desc->irq_data.chip;
1197
1198        raw_spin_lock(&desc->lock);
1199        mask_ack_irq(desc);
1200
1201        if (!irq_may_run(desc))
1202                goto out;
1203
1204        desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
1205
1206        /*
1207         * If its disabled or no action available
1208         * then mask it and get out of here:
1209         */
1210        if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
1211                desc->istate |= IRQS_PENDING;
1212                mask_irq(desc);
1213                goto out;
1214        }
1215
1216        kstat_incr_irqs_this_cpu(desc);
1217        if (desc->istate & IRQS_ONESHOT)
1218                mask_irq(desc);
1219
1220        preflow_handler(desc);
1221        handle_irq_event(desc);
1222
1223        cond_unmask_eoi_irq(desc, chip);
1224
1225        raw_spin_unlock(&desc->lock);
1226        return;
1227out:
1228        if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
1229                chip->irq_eoi(&desc->irq_data);
1230        raw_spin_unlock(&desc->lock);
1231}
1232EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq);
1233
1234#endif /* CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS */
1235
1236/**
1237 * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if
1238 * NULL)
1239 * @data:       Pointer to interrupt specific data
1240 */
1241void irq_chip_enable_parent(struct irq_data *data)
1242{
1243        data = data->parent_data;
1244        if (data->chip->irq_enable)
1245                data->chip->irq_enable(data);
1246        else
1247                data->chip->irq_unmask(data);
1248}
1249EXPORT_SYMBOL_GPL(irq_chip_enable_parent);
1250
1251/**
1252 * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if
1253 * NULL)
1254 * @data:       Pointer to interrupt specific data
1255 */
1256void irq_chip_disable_parent(struct irq_data *data)
1257{
1258        data = data->parent_data;
1259        if (data->chip->irq_disable)
1260                data->chip->irq_disable(data);
1261        else
1262                data->chip->irq_mask(data);
1263}
1264EXPORT_SYMBOL_GPL(irq_chip_disable_parent);
1265
1266/**
1267 * irq_chip_ack_parent - Acknowledge the parent interrupt
1268 * @data:       Pointer to interrupt specific data
1269 */
1270void irq_chip_ack_parent(struct irq_data *data)
1271{
1272        data = data->parent_data;
1273        data->chip->irq_ack(data);
1274}
1275EXPORT_SYMBOL_GPL(irq_chip_ack_parent);
1276
1277/**
1278 * irq_chip_mask_parent - Mask the parent interrupt
1279 * @data:       Pointer to interrupt specific data
1280 */
1281void irq_chip_mask_parent(struct irq_data *data)
1282{
1283        data = data->parent_data;
1284        data->chip->irq_mask(data);
1285}
1286EXPORT_SYMBOL_GPL(irq_chip_mask_parent);
1287
1288/**
1289 * irq_chip_unmask_parent - Unmask the parent interrupt
1290 * @data:       Pointer to interrupt specific data
1291 */
1292void irq_chip_unmask_parent(struct irq_data *data)
1293{
1294        data = data->parent_data;
1295        data->chip->irq_unmask(data);
1296}
1297EXPORT_SYMBOL_GPL(irq_chip_unmask_parent);
1298
1299/**
1300 * irq_chip_eoi_parent - Invoke EOI on the parent interrupt
1301 * @data:       Pointer to interrupt specific data
1302 */
1303void irq_chip_eoi_parent(struct irq_data *data)
1304{
1305        data = data->parent_data;
1306        data->chip->irq_eoi(data);
1307}
1308EXPORT_SYMBOL_GPL(irq_chip_eoi_parent);
1309
1310/**
1311 * irq_chip_set_affinity_parent - Set affinity on the parent interrupt
1312 * @data:       Pointer to interrupt specific data
1313 * @dest:       The affinity mask to set
1314 * @force:      Flag to enforce setting (disable online checks)
1315 *
1316 * Conditinal, as the underlying parent chip might not implement it.
1317 */
1318int irq_chip_set_affinity_parent(struct irq_data *data,
1319                                 const struct cpumask *dest, bool force)
1320{
1321        data = data->parent_data;
1322        if (data->chip->irq_set_affinity)
1323                return data->chip->irq_set_affinity(data, dest, force);
1324
1325        return -ENOSYS;
1326}
1327EXPORT_SYMBOL_GPL(irq_chip_set_affinity_parent);
1328
1329/**
1330 * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
1331 * @data:       Pointer to interrupt specific data
1332 * @type:       IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
1333 *
1334 * Conditional, as the underlying parent chip might not implement it.
1335 */
1336int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
1337{
1338        data = data->parent_data;
1339
1340        if (data->chip->irq_set_type)
1341                return data->chip->irq_set_type(data, type);
1342
1343        return -ENOSYS;
1344}
1345EXPORT_SYMBOL_GPL(irq_chip_set_type_parent);
1346
1347/**
1348 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
1349 * @data:       Pointer to interrupt specific data
1350 *
1351 * Iterate through the domain hierarchy of the interrupt and check
1352 * whether a hw retrigger function exists. If yes, invoke it.
1353 */
1354int irq_chip_retrigger_hierarchy(struct irq_data *data)
1355{
1356        for (data = data->parent_data; data; data = data->parent_data)
1357                if (data->chip && data->chip->irq_retrigger)
1358                        return data->chip->irq_retrigger(data);
1359
1360        return 0;
1361}
1362
1363/**
1364 * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt
1365 * @data:       Pointer to interrupt specific data
1366 * @vcpu_info:  The vcpu affinity information
1367 */
1368int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
1369{
1370        data = data->parent_data;
1371        if (data->chip->irq_set_vcpu_affinity)
1372                return data->chip->irq_set_vcpu_affinity(data, vcpu_info);
1373
1374        return -ENOSYS;
1375}
1376
1377/**
1378 * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt
1379 * @data:       Pointer to interrupt specific data
1380 * @on:         Whether to set or reset the wake-up capability of this irq
1381 *
1382 * Conditional, as the underlying parent chip might not implement it.
1383 */
1384int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
1385{
1386        data = data->parent_data;
1387        if (data->chip->irq_set_wake)
1388                return data->chip->irq_set_wake(data, on);
1389
1390        return -ENOSYS;
1391}
1392#endif
1393
1394/**
1395 * irq_chip_compose_msi_msg - Componse msi message for a irq chip
1396 * @data:       Pointer to interrupt specific data
1397 * @msg:        Pointer to the MSI message
1398 *
1399 * For hierarchical domains we find the first chip in the hierarchy
1400 * which implements the irq_compose_msi_msg callback. For non
1401 * hierarchical we use the top level chip.
1402 */
1403int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1404{
1405        struct irq_data *pos = NULL;
1406
1407#ifdef  CONFIG_IRQ_DOMAIN_HIERARCHY
1408        for (; data; data = data->parent_data)
1409#endif
1410                if (data->chip && data->chip->irq_compose_msi_msg)
1411                        pos = data;
1412        if (!pos)
1413                return -ENOSYS;
1414
1415        pos->chip->irq_compose_msi_msg(pos, msg);
1416
1417        return 0;
1418}
1419
1420/**
1421 * irq_chip_pm_get - Enable power for an IRQ chip
1422 * @data:       Pointer to interrupt specific data
1423 *
1424 * Enable the power to the IRQ chip referenced by the interrupt data
1425 * structure.
1426 */
1427int irq_chip_pm_get(struct irq_data *data)
1428{
1429        int retval;
1430
1431        if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device) {
1432                retval = pm_runtime_get_sync(data->chip->parent_device);
1433                if (retval < 0) {
1434                        pm_runtime_put_noidle(data->chip->parent_device);
1435                        return retval;
1436                }
1437        }
1438
1439        return 0;
1440}
1441
1442/**
1443 * irq_chip_pm_put - Disable power for an IRQ chip
1444 * @data:       Pointer to interrupt specific data
1445 *
1446 * Disable the power to the IRQ chip referenced by the interrupt data
1447 * structure, belongs. Note that power will only be disabled, once this
1448 * function has been called for all IRQs that have called irq_chip_pm_get().
1449 */
1450int irq_chip_pm_put(struct irq_data *data)
1451{
1452        int retval = 0;
1453
1454        if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device)
1455                retval = pm_runtime_put(data->chip->parent_device);
1456
1457        return (retval < 0) ? retval : 0;
1458}
1459