linux/kernel/irq/handle.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
   4 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
   5 *
   6 * This file contains the core interrupt handling code. Detailed
   7 * information is available in Documentation/core-api/genericirq.rst
   8 *
   9 */
  10
  11#include <linux/irq.h>
  12#include <linux/random.h>
  13#include <linux/sched.h>
  14#include <linux/interrupt.h>
  15#include <linux/kernel_stat.h>
  16
  17#include <asm/irq_regs.h>
  18
  19#include <trace/events/irq.h>
  20
  21#include "internals.h"
  22
  23#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
  24void (*handle_arch_irq)(struct pt_regs *) __ro_after_init;
  25#endif
  26
  27/**
  28 * handle_bad_irq - handle spurious and unhandled irqs
  29 * @desc:      description of the interrupt
  30 *
  31 * Handles spurious and unhandled IRQ's. It also prints a debugmessage.
  32 */
  33void handle_bad_irq(struct irq_desc *desc)
  34{
  35        unsigned int irq = irq_desc_get_irq(desc);
  36
  37        print_irq_desc(irq, desc);
  38        kstat_incr_irqs_this_cpu(desc);
  39        ack_bad_irq(irq);
  40}
  41EXPORT_SYMBOL_GPL(handle_bad_irq);
  42
  43/*
  44 * Special, empty irq handler:
  45 */
  46irqreturn_t no_action(int cpl, void *dev_id)
  47{
  48        return IRQ_NONE;
  49}
  50EXPORT_SYMBOL_GPL(no_action);
  51
  52static void warn_no_thread(unsigned int irq, struct irqaction *action)
  53{
  54        if (test_and_set_bit(IRQTF_WARNED, &action->thread_flags))
  55                return;
  56
  57        printk(KERN_WARNING "IRQ %d device %s returned IRQ_WAKE_THREAD "
  58               "but no thread function available.", irq, action->name);
  59}
  60
  61void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action)
  62{
  63        /*
  64         * In case the thread crashed and was killed we just pretend that
  65         * we handled the interrupt. The hardirq handler has disabled the
  66         * device interrupt, so no irq storm is lurking.
  67         */
  68        if (action->thread->flags & PF_EXITING)
  69                return;
  70
  71        /*
  72         * Wake up the handler thread for this action. If the
  73         * RUNTHREAD bit is already set, nothing to do.
  74         */
  75        if (test_and_set_bit(IRQTF_RUNTHREAD, &action->thread_flags))
  76                return;
  77
  78        /*
  79         * It's safe to OR the mask lockless here. We have only two
  80         * places which write to threads_oneshot: This code and the
  81         * irq thread.
  82         *
  83         * This code is the hard irq context and can never run on two
  84         * cpus in parallel. If it ever does we have more serious
  85         * problems than this bitmask.
  86         *
  87         * The irq threads of this irq which clear their "running" bit
  88         * in threads_oneshot are serialized via desc->lock against
  89         * each other and they are serialized against this code by
  90         * IRQS_INPROGRESS.
  91         *
  92         * Hard irq handler:
  93         *
  94         *      spin_lock(desc->lock);
  95         *      desc->state |= IRQS_INPROGRESS;
  96         *      spin_unlock(desc->lock);
  97         *      set_bit(IRQTF_RUNTHREAD, &action->thread_flags);
  98         *      desc->threads_oneshot |= mask;
  99         *      spin_lock(desc->lock);
 100         *      desc->state &= ~IRQS_INPROGRESS;
 101         *      spin_unlock(desc->lock);
 102         *
 103         * irq thread:
 104         *
 105         * again:
 106         *      spin_lock(desc->lock);
 107         *      if (desc->state & IRQS_INPROGRESS) {
 108         *              spin_unlock(desc->lock);
 109         *              while(desc->state & IRQS_INPROGRESS)
 110         *                      cpu_relax();
 111         *              goto again;
 112         *      }
 113         *      if (!test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
 114         *              desc->threads_oneshot &= ~mask;
 115         *      spin_unlock(desc->lock);
 116         *
 117         * So either the thread waits for us to clear IRQS_INPROGRESS
 118         * or we are waiting in the flow handler for desc->lock to be
 119         * released before we reach this point. The thread also checks
 120         * IRQTF_RUNTHREAD under desc->lock. If set it leaves
 121         * threads_oneshot untouched and runs the thread another time.
 122         */
 123        desc->threads_oneshot |= action->thread_mask;
 124
 125        /*
 126         * We increment the threads_active counter in case we wake up
 127         * the irq thread. The irq thread decrements the counter when
 128         * it returns from the handler or in the exit path and wakes
 129         * up waiters which are stuck in synchronize_irq() when the
 130         * active count becomes zero. synchronize_irq() is serialized
 131         * against this code (hard irq handler) via IRQS_INPROGRESS
 132         * like the finalize_oneshot() code. See comment above.
 133         */
 134        atomic_inc(&desc->threads_active);
 135
 136        wake_up_process(action->thread);
 137}
 138
 139irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags)
 140{
 141        irqreturn_t retval = IRQ_NONE;
 142        unsigned int irq = desc->irq_data.irq;
 143        struct irqaction *action;
 144
 145        record_irq_time(desc);
 146
 147        for_each_action_of_desc(desc, action) {
 148                irqreturn_t res;
 149
 150                /*
 151                 * If this IRQ would be threaded under force_irqthreads, mark it so.
 152                 */
 153                if (irq_settings_can_thread(desc) &&
 154                    !(action->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)))
 155                        lockdep_hardirq_threaded();
 156
 157                trace_irq_handler_entry(irq, action);
 158                res = action->handler(irq, action->dev_id);
 159                trace_irq_handler_exit(irq, action, res);
 160
 161                if (WARN_ONCE(!irqs_disabled(),"irq %u handler %pS enabled interrupts\n",
 162                              irq, action->handler))
 163                        local_irq_disable();
 164
 165                switch (res) {
 166                case IRQ_WAKE_THREAD:
 167                        /*
 168                         * Catch drivers which return WAKE_THREAD but
 169                         * did not set up a thread function
 170                         */
 171                        if (unlikely(!action->thread_fn)) {
 172                                warn_no_thread(irq, action);
 173                                break;
 174                        }
 175
 176                        __irq_wake_thread(desc, action);
 177
 178                        fallthrough;    /* to add to randomness */
 179                case IRQ_HANDLED:
 180                        *flags |= action->flags;
 181                        break;
 182
 183                default:
 184                        break;
 185                }
 186
 187                retval |= res;
 188        }
 189
 190        return retval;
 191}
 192
 193irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
 194{
 195        irqreturn_t retval;
 196        unsigned int flags = 0;
 197
 198        retval = __handle_irq_event_percpu(desc, &flags);
 199
 200        add_interrupt_randomness(desc->irq_data.irq, flags);
 201
 202        if (!irq_settings_no_debug(desc))
 203                note_interrupt(desc, retval);
 204        return retval;
 205}
 206
 207irqreturn_t handle_irq_event(struct irq_desc *desc)
 208{
 209        irqreturn_t ret;
 210
 211        desc->istate &= ~IRQS_PENDING;
 212        irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
 213        raw_spin_unlock(&desc->lock);
 214
 215        ret = handle_irq_event_percpu(desc);
 216
 217        raw_spin_lock(&desc->lock);
 218        irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
 219        return ret;
 220}
 221
 222#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
 223int __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
 224{
 225        if (handle_arch_irq)
 226                return -EBUSY;
 227
 228        handle_arch_irq = handle_irq;
 229        return 0;
 230}
 231
 232/**
 233 * generic_handle_arch_irq - root irq handler for architectures which do no
 234 *                           entry accounting themselves
 235 * @regs:       Register file coming from the low-level handling code
 236 */
 237asmlinkage void noinstr generic_handle_arch_irq(struct pt_regs *regs)
 238{
 239        struct pt_regs *old_regs;
 240
 241        irq_enter();
 242        old_regs = set_irq_regs(regs);
 243        handle_arch_irq(regs);
 244        set_irq_regs(old_regs);
 245        irq_exit();
 246}
 247#endif
 248