linux/arch/ia64/kernel/irq.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *      linux/arch/ia64/kernel/irq.c
   4 *
   5 *      Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
   6 *
   7 * This file contains the code used by various IRQ handling routines:
   8 * asking for different IRQs should be done through these routines
   9 * instead of just grabbing them. Thus setups with different IRQ numbers
  10 * shouldn't result in any weird surprises, and installing new handlers
  11 * should be easier.
  12 *
  13 * Copyright (C) Ashok Raj<ashok.raj@intel.com>, Intel Corporation 2004
  14 *
  15 * 4/14/2004: Added code to handle cpu migration and do safe irq
  16 *                      migration without losing interrupts for iosapic
  17 *                      architecture.
  18 */
  19
  20#include <asm/delay.h>
  21#include <linux/uaccess.h>
  22#include <linux/module.h>
  23#include <linux/seq_file.h>
  24#include <linux/interrupt.h>
  25#include <linux/kernel_stat.h>
  26
  27#include <asm/mca.h>
  28
  29/*
  30 * 'what should we do if we get a hw irq event on an illegal vector'.
  31 * each architecture has to answer this themselves.
  32 */
  33void ack_bad_irq(unsigned int irq)
  34{
  35        printk(KERN_ERR "Unexpected irq vector 0x%x on CPU %u!\n", irq, smp_processor_id());
  36}
  37
  38#ifdef CONFIG_IA64_GENERIC
  39ia64_vector __ia64_irq_to_vector(int irq)
  40{
  41        return irq_cfg[irq].vector;
  42}
  43
  44unsigned int __ia64_local_vector_to_irq (ia64_vector vec)
  45{
  46        return __this_cpu_read(vector_irq[vec]);
  47}
  48#endif
  49
  50/*
  51 * Interrupt statistics:
  52 */
  53
  54atomic_t irq_err_count;
  55
  56/*
  57 * /proc/interrupts printing:
  58 */
  59int arch_show_interrupts(struct seq_file *p, int prec)
  60{
  61        seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
  62        return 0;
  63}
  64
  65#ifdef CONFIG_SMP
  66static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
  67
  68void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
  69{
  70        if (irq < NR_IRQS) {
  71                cpumask_copy(irq_get_affinity_mask(irq),
  72                             cpumask_of(cpu_logical_id(hwid)));
  73                irq_redir[irq] = (char) (redir & 0xff);
  74        }
  75}
  76
  77bool is_affinity_mask_valid(const struct cpumask *cpumask)
  78{
  79        if (ia64_platform_is("sn2")) {
  80                /* Only allow one CPU to be specified in the smp_affinity mask */
  81                if (cpumask_weight(cpumask) != 1)
  82                        return false;
  83        }
  84        return true;
  85}
  86
  87#endif /* CONFIG_SMP */
  88
  89int __init arch_early_irq_init(void)
  90{
  91        ia64_mca_irq_init();
  92        return 0;
  93}
  94
  95#ifdef CONFIG_HOTPLUG_CPU
  96unsigned int vectors_in_migration[NR_IRQS];
  97
  98/*
  99 * Since cpu_online_mask is already updated, we just need to check for
 100 * affinity that has zeros
 101 */
 102static void migrate_irqs(void)
 103{
 104        int             irq, new_cpu;
 105
 106        for (irq=0; irq < NR_IRQS; irq++) {
 107                struct irq_desc *desc = irq_to_desc(irq);
 108                struct irq_data *data = irq_desc_get_irq_data(desc);
 109                struct irq_chip *chip = irq_data_get_irq_chip(data);
 110
 111                if (irqd_irq_disabled(data))
 112                        continue;
 113
 114                /*
 115                 * No handling for now.
 116                 * TBD: Implement a disable function so we can now
 117                 * tell CPU not to respond to these local intr sources.
 118                 * such as ITV,CPEI,MCA etc.
 119                 */
 120                if (irqd_is_per_cpu(data))
 121                        continue;
 122
 123                if (cpumask_any_and(irq_data_get_affinity_mask(data),
 124                                    cpu_online_mask) >= nr_cpu_ids) {
 125                        /*
 126                         * Save it for phase 2 processing
 127                         */
 128                        vectors_in_migration[irq] = irq;
 129
 130                        new_cpu = cpumask_any(cpu_online_mask);
 131
 132                        /*
 133                         * Al three are essential, currently WARN_ON.. maybe panic?
 134                         */
 135                        if (chip && chip->irq_disable &&
 136                                chip->irq_enable && chip->irq_set_affinity) {
 137                                chip->irq_disable(data);
 138                                chip->irq_set_affinity(data,
 139                                                       cpumask_of(new_cpu), false);
 140                                chip->irq_enable(data);
 141                        } else {
 142                                WARN_ON((!chip || !chip->irq_disable ||
 143                                         !chip->irq_enable ||
 144                                         !chip->irq_set_affinity));
 145                        }
 146                }
 147        }
 148}
 149
 150void fixup_irqs(void)
 151{
 152        unsigned int irq;
 153        extern void ia64_process_pending_intr(void);
 154        extern volatile int time_keeper_id;
 155
 156        /* Mask ITV to disable timer */
 157        ia64_set_itv(1 << 16);
 158
 159        /*
 160         * Find a new timesync master
 161         */
 162        if (smp_processor_id() == time_keeper_id) {
 163                time_keeper_id = cpumask_first(cpu_online_mask);
 164                printk ("CPU %d is now promoted to time-keeper master\n", time_keeper_id);
 165        }
 166
 167        /*
 168         * Phase 1: Locate IRQs bound to this cpu and
 169         * relocate them for cpu removal.
 170         */
 171        migrate_irqs();
 172
 173        /*
 174         * Phase 2: Perform interrupt processing for all entries reported in
 175         * local APIC.
 176         */
 177        ia64_process_pending_intr();
 178
 179        /*
 180         * Phase 3: Now handle any interrupts not captured in local APIC.
 181         * This is to account for cases that device interrupted during the time the
 182         * rte was being disabled and re-programmed.
 183         */
 184        for (irq=0; irq < NR_IRQS; irq++) {
 185                if (vectors_in_migration[irq]) {
 186                        struct pt_regs *old_regs = set_irq_regs(NULL);
 187
 188                        vectors_in_migration[irq]=0;
 189                        generic_handle_irq(irq);
 190                        set_irq_regs(old_regs);
 191                }
 192        }
 193
 194        /*
 195         * Now let processor die. We do irq disable and max_xtp() to
 196         * ensure there is no more interrupts routed to this processor.
 197         * But the local timer interrupt can have 1 pending which we
 198         * take care in timer_interrupt().
 199         */
 200        max_xtp();
 201        local_irq_disable();
 202}
 203#endif
 204