linux/kernel/irq/migration.c
<<
>>
Prefs
   1
   2#include <linux/irq.h>
   3#include <linux/interrupt.h>
   4
   5#include "internals.h"
   6
   7void irq_move_masked_irq(struct irq_data *idata)
   8{
   9        struct irq_desc *desc = irq_data_to_desc(idata);
  10        struct irq_chip *chip = idata->chip;
  11
  12        if (likely(!irqd_is_setaffinity_pending(&desc->irq_data)))
  13                return;
  14
  15        /*
  16         * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
  17         */
  18        if (!irqd_can_balance(&desc->irq_data)) {
  19                WARN_ON(1);
  20                return;
  21        }
  22
  23        irqd_clr_move_pending(&desc->irq_data);
  24
  25        if (unlikely(cpumask_empty(desc->pending_mask)))
  26                return;
  27
  28        if (!chip->irq_set_affinity)
  29                return;
  30
  31        assert_raw_spin_locked(&desc->lock);
  32
  33        /*
  34         * If there was a valid mask to work with, please
  35         * do the disable, re-program, enable sequence.
  36         * This is *not* particularly important for level triggered
  37         * but in a edge trigger case, we might be setting rte
  38         * when an active trigger is coming in. This could
  39         * cause some ioapics to mal-function.
  40         * Being paranoid i guess!
  41         *
  42         * For correct operation this depends on the caller
  43         * masking the irqs.
  44         */
  45        if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids)
  46                irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false);
  47
  48        cpumask_clear(desc->pending_mask);
  49}
  50
  51void irq_move_irq(struct irq_data *idata)
  52{
  53        bool masked;
  54
  55        if (likely(!irqd_is_setaffinity_pending(idata)))
  56                return;
  57
  58        if (unlikely(irqd_irq_disabled(idata)))
  59                return;
  60
  61        /*
  62         * Be careful vs. already masked interrupts. If this is a
  63         * threaded interrupt with ONESHOT set, we can end up with an
  64         * interrupt storm.
  65         */
  66        masked = irqd_irq_masked(idata);
  67        if (!masked)
  68                idata->chip->irq_mask(idata);
  69        irq_move_masked_irq(idata);
  70        if (!masked)
  71                idata->chip->irq_unmask(idata);
  72}
  73