linux/kernel/irq/handle.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
   4 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
   5 *
   6 * This file contains the core interrupt handling code. Detailed
   7 * information is available in Documentation/core-api/genericirq.rst
   8 *
   9 */
  10
  11#include <linux/irq.h>
  12#include <linux/random.h>
  13#include <linux/sched.h>
  14#include <linux/interrupt.h>
  15#include <linux/kernel_stat.h>
  16
  17#include <trace/events/irq.h>
  18
  19#include "internals.h"
  20
  21#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
  22void (*handle_arch_irq)(struct pt_regs *) __ro_after_init;
  23#endif
  24
  25/**
  26 * handle_bad_irq - handle spurious and unhandled irqs
  27 * @desc:      description of the interrupt
  28 *
  29 * Handles spurious and unhandled IRQ's. It also prints a debugmessage.
  30 */
  31void handle_bad_irq(struct irq_desc *desc)
  32{
  33        unsigned int irq = irq_desc_get_irq(desc);
  34
  35        print_irq_desc(irq, desc);
  36        kstat_incr_irqs_this_cpu(desc);
  37        ack_bad_irq(irq);
  38}
  39EXPORT_SYMBOL_GPL(handle_bad_irq);
  40
  41/*
  42 * Special, empty irq handler:
  43 */
  44irqreturn_t no_action(int cpl, void *dev_id)
  45{
  46        return IRQ_NONE;
  47}
  48EXPORT_SYMBOL_GPL(no_action);
  49
  50static void warn_no_thread(unsigned int irq, struct irqaction *action)
  51{
  52        if (test_and_set_bit(IRQTF_WARNED, &action->thread_flags))
  53                return;
  54
  55        printk(KERN_WARNING "IRQ %d device %s returned IRQ_WAKE_THREAD "
  56               "but no thread function available.", irq, action->name);
  57}
  58
  59void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action)
  60{
  61        /*
  62         * In case the thread crashed and was killed we just pretend that
  63         * we handled the interrupt. The hardirq handler has disabled the
  64         * device interrupt, so no irq storm is lurking.
  65         */
  66        if (action->thread->flags & PF_EXITING)
  67                return;
  68
  69        /*
  70         * Wake up the handler thread for this action. If the
  71         * RUNTHREAD bit is already set, nothing to do.
  72         */
  73        if (test_and_set_bit(IRQTF_RUNTHREAD, &action->thread_flags))
  74                return;
  75
  76        /*
  77         * It's safe to OR the mask lockless here. We have only two
  78         * places which write to threads_oneshot: This code and the
  79         * irq thread.
  80         *
  81         * This code is the hard irq context and can never run on two
  82         * cpus in parallel. If it ever does we have more serious
  83         * problems than this bitmask.
  84         *
  85         * The irq threads of this irq which clear their "running" bit
  86         * in threads_oneshot are serialized via desc->lock against
  87         * each other and they are serialized against this code by
  88         * IRQS_INPROGRESS.
  89         *
  90         * Hard irq handler:
  91         *
  92         *      spin_lock(desc->lock);
  93         *      desc->state |= IRQS_INPROGRESS;
  94         *      spin_unlock(desc->lock);
  95         *      set_bit(IRQTF_RUNTHREAD, &action->thread_flags);
  96         *      desc->threads_oneshot |= mask;
  97         *      spin_lock(desc->lock);
  98         *      desc->state &= ~IRQS_INPROGRESS;
  99         *      spin_unlock(desc->lock);
 100         *
 101         * irq thread:
 102         *
 103         * again:
 104         *      spin_lock(desc->lock);
 105         *      if (desc->state & IRQS_INPROGRESS) {
 106         *              spin_unlock(desc->lock);
 107         *              while(desc->state & IRQS_INPROGRESS)
 108         *                      cpu_relax();
 109         *              goto again;
 110         *      }
 111         *      if (!test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
 112         *              desc->threads_oneshot &= ~mask;
 113         *      spin_unlock(desc->lock);
 114         *
 115         * So either the thread waits for us to clear IRQS_INPROGRESS
 116         * or we are waiting in the flow handler for desc->lock to be
 117         * released before we reach this point. The thread also checks
 118         * IRQTF_RUNTHREAD under desc->lock. If set it leaves
 119         * threads_oneshot untouched and runs the thread another time.
 120         */
 121        desc->threads_oneshot |= action->thread_mask;
 122
 123        /*
 124         * We increment the threads_active counter in case we wake up
 125         * the irq thread. The irq thread decrements the counter when
 126         * it returns from the handler or in the exit path and wakes
 127         * up waiters which are stuck in synchronize_irq() when the
 128         * active count becomes zero. synchronize_irq() is serialized
 129         * against this code (hard irq handler) via IRQS_INPROGRESS
 130         * like the finalize_oneshot() code. See comment above.
 131         */
 132        atomic_inc(&desc->threads_active);
 133
 134        wake_up_process(action->thread);
 135}
 136
 137irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags)
 138{
 139        irqreturn_t retval = IRQ_NONE;
 140        unsigned int irq = desc->irq_data.irq;
 141        struct irqaction *action;
 142
 143        record_irq_time(desc);
 144
 145        for_each_action_of_desc(desc, action) {
 146                irqreturn_t res;
 147
 148                /*
 149                 * If this IRQ would be threaded under force_irqthreads, mark it so.
 150                 */
 151                if (irq_settings_can_thread(desc) &&
 152                    !(action->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)))
 153                        lockdep_hardirq_threaded();
 154
 155                trace_irq_handler_entry(irq, action);
 156                res = action->handler(irq, action->dev_id);
 157                trace_irq_handler_exit(irq, action, res);
 158
 159                if (WARN_ONCE(!irqs_disabled(),"irq %u handler %pS enabled interrupts\n",
 160                              irq, action->handler))
 161                        local_irq_disable();
 162
 163                switch (res) {
 164                case IRQ_WAKE_THREAD:
 165                        /*
 166                         * Catch drivers which return WAKE_THREAD but
 167                         * did not set up a thread function
 168                         */
 169                        if (unlikely(!action->thread_fn)) {
 170                                warn_no_thread(irq, action);
 171                                break;
 172                        }
 173
 174                        __irq_wake_thread(desc, action);
 175
 176                        fallthrough;    /* to add to randomness */
 177                case IRQ_HANDLED:
 178                        *flags |= action->flags;
 179                        break;
 180
 181                default:
 182                        break;
 183                }
 184
 185                retval |= res;
 186        }
 187
 188        return retval;
 189}
 190
 191irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
 192{
 193        irqreturn_t retval;
 194        unsigned int flags = 0;
 195
 196        retval = __handle_irq_event_percpu(desc, &flags);
 197
 198        add_interrupt_randomness(desc->irq_data.irq, flags);
 199
 200        if (!irq_settings_no_debug(desc))
 201                note_interrupt(desc, retval);
 202        return retval;
 203}
 204
 205irqreturn_t handle_irq_event(struct irq_desc *desc)
 206{
 207        irqreturn_t ret;
 208
 209        desc->istate &= ~IRQS_PENDING;
 210        irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
 211        raw_spin_unlock(&desc->lock);
 212
 213        ret = handle_irq_event_percpu(desc);
 214
 215        raw_spin_lock(&desc->lock);
 216        irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
 217        return ret;
 218}
 219
 220#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
 221int __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
 222{
 223        if (handle_arch_irq)
 224                return -EBUSY;
 225
 226        handle_arch_irq = handle_irq;
 227        return 0;
 228}
 229#endif
 230