linux/kernel/irq/migration.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2
   3#include <linux/irq.h>
   4#include <linux/interrupt.h>
   5
   6#include "internals.h"
   7
   8/**
   9 * irq_fixup_move_pending - Cleanup irq move pending from a dying CPU
  10 * @desc:               Interrupt descpriptor to clean up
  11 * @force_clear:        If set clear the move pending bit unconditionally.
  12 *                      If not set, clear it only when the dying CPU is the
  13 *                      last one in the pending mask.
  14 *
  15 * Returns true if the pending bit was set and the pending mask contains an
  16 * online CPU other than the dying CPU.
  17 */
  18bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear)
  19{
  20        struct irq_data *data = irq_desc_get_irq_data(desc);
  21
  22        if (!irqd_is_setaffinity_pending(data))
  23                return false;
  24
  25        /*
  26         * The outgoing CPU might be the last online target in a pending
  27         * interrupt move. If that's the case clear the pending move bit.
  28         */
  29        if (cpumask_any_and(desc->pending_mask, cpu_online_mask) >= nr_cpu_ids) {
  30                irqd_clr_move_pending(data);
  31                return false;
  32        }
  33        if (force_clear)
  34                irqd_clr_move_pending(data);
  35        return true;
  36}
  37
  38void irq_move_masked_irq(struct irq_data *idata)
  39{
  40        struct irq_desc *desc = irq_data_to_desc(idata);
  41        struct irq_chip *chip = desc->irq_data.chip;
  42
  43        if (likely(!irqd_is_setaffinity_pending(&desc->irq_data)))
  44                return;
  45
  46        irqd_clr_move_pending(&desc->irq_data);
  47
  48        /*
  49         * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
  50         */
  51        if (irqd_is_per_cpu(&desc->irq_data)) {
  52                WARN_ON(1);
  53                return;
  54        }
  55
  56        if (unlikely(cpumask_empty(desc->pending_mask)))
  57                return;
  58
  59        if (!chip->irq_set_affinity)
  60                return;
  61
  62        assert_raw_spin_locked(&desc->lock);
  63
  64        /*
  65         * If there was a valid mask to work with, please
  66         * do the disable, re-program, enable sequence.
  67         * This is *not* particularly important for level triggered
  68         * but in a edge trigger case, we might be setting rte
  69         * when an active trigger is coming in. This could
  70         * cause some ioapics to mal-function.
  71         * Being paranoid i guess!
  72         *
  73         * For correct operation this depends on the caller
  74         * masking the irqs.
  75         */
  76        if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids)
  77                irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false);
  78
  79        cpumask_clear(desc->pending_mask);
  80}
  81
  82void irq_move_irq(struct irq_data *idata)
  83{
  84        bool masked;
  85
  86        /*
  87         * Get top level irq_data when CONFIG_IRQ_DOMAIN_HIERARCHY is enabled,
  88         * and it should be optimized away when CONFIG_IRQ_DOMAIN_HIERARCHY is
  89         * disabled. So we avoid an "#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY" here.
  90         */
  91        idata = irq_desc_get_irq_data(irq_data_to_desc(idata));
  92
  93        if (likely(!irqd_is_setaffinity_pending(idata)))
  94                return;
  95
  96        if (unlikely(irqd_irq_disabled(idata)))
  97                return;
  98
  99        /*
 100         * Be careful vs. already masked interrupts. If this is a
 101         * threaded interrupt with ONESHOT set, we can end up with an
 102         * interrupt storm.
 103         */
 104        masked = irqd_irq_masked(idata);
 105        if (!masked)
 106                idata->chip->irq_mask(idata);
 107        irq_move_masked_irq(idata);
 108        if (!masked)
 109                idata->chip->irq_unmask(idata);
 110}
 111