linux/arch/x86/kernel/irq_32.c
<<
>>
Prefs
   1/*
   2 *      Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
   3 *
   4 * This file contains the lowest level x86-specific interrupt
   5 * entry, irq-stacks and irq statistics code. All the remaining
   6 * irq logic is done by the generic kernel/irq/ code and
   7 * by the x86-specific irq controller code. (e.g. i8259.c and
   8 * io_apic.c.)
   9 */
  10
  11#include <linux/module.h>
  12#include <linux/seq_file.h>
  13#include <linux/interrupt.h>
  14#include <linux/kernel_stat.h>
  15#include <linux/notifier.h>
  16#include <linux/cpu.h>
  17#include <linux/delay.h>
  18
  19#include <asm/apic.h>
  20#include <asm/uaccess.h>
  21
  22DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
  23EXPORT_PER_CPU_SYMBOL(irq_stat);
  24
  25DEFINE_PER_CPU(struct pt_regs *, irq_regs);
  26EXPORT_PER_CPU_SYMBOL(irq_regs);
  27
  28/*
  29 * 'what should we do if we get a hw irq event on an illegal vector'.
  30 * each architecture has to answer this themselves.
  31 */
  32void ack_bad_irq(unsigned int irq)
  33{
  34        printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq);
  35
  36#ifdef CONFIG_X86_LOCAL_APIC
  37        /*
  38         * Currently unexpected vectors happen only on SMP and APIC.
  39         * We _must_ ack these because every local APIC has only N
  40         * irq slots per priority level, and a 'hanging, unacked' IRQ
  41         * holds up an irq slot - in excessive cases (when multiple
  42         * unexpected vectors occur) that might lock up the APIC
  43         * completely.
  44         * But only ack when the APIC is enabled -AK
  45         */
  46        if (cpu_has_apic)
  47                ack_APIC_irq();
  48#endif
  49}
  50
  51#ifdef CONFIG_4KSTACKS
  52/*
  53 * per-CPU IRQ handling contexts (thread information and stack)
  54 */
  55union irq_ctx {
  56        struct thread_info      tinfo;
  57        u32                     stack[THREAD_SIZE/sizeof(u32)];
  58};
  59
  60static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
  61static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
  62#endif
  63
  64/*
  65 * do_IRQ handles all normal device IRQ's (the special
  66 * SMP cross-CPU interrupts have their own specific
  67 * handlers).
  68 */
  69fastcall unsigned int do_IRQ(struct pt_regs *regs)
  70{       
  71        struct pt_regs *old_regs;
  72        /* high bit used in ret_from_ code */
  73        int irq = ~regs->orig_eax;
  74        struct irq_desc *desc = irq_desc + irq;
  75#ifdef CONFIG_4KSTACKS
  76        union irq_ctx *curctx, *irqctx;
  77        u32 *isp;
  78#endif
  79
  80        if (unlikely((unsigned)irq >= NR_IRQS)) {
  81                printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
  82                                        __FUNCTION__, irq);
  83                BUG();
  84        }
  85
  86        old_regs = set_irq_regs(regs);
  87        irq_enter();
  88#ifdef CONFIG_DEBUG_STACKOVERFLOW
  89        /* Debugging check for stack overflow: is there less than 1KB free? */
  90        {
  91                long esp;
  92
  93                __asm__ __volatile__("andl %%esp,%0" :
  94                                        "=r" (esp) : "0" (THREAD_SIZE - 1));
  95                if (unlikely(esp < (sizeof(struct thread_info) + STACK_WARN))) {
  96                        printk("do_IRQ: stack overflow: %ld\n",
  97                                esp - sizeof(struct thread_info));
  98                        dump_stack();
  99                }
 100        }
 101#endif
 102
 103#ifdef CONFIG_4KSTACKS
 104
 105        curctx = (union irq_ctx *) current_thread_info();
 106        irqctx = hardirq_ctx[smp_processor_id()];
 107
 108        /*
 109         * this is where we switch to the IRQ stack. However, if we are
 110         * already using the IRQ stack (because we interrupted a hardirq
 111         * handler) we can't do that and just have to keep using the
 112         * current stack (which is the irq stack already after all)
 113         */
 114        if (curctx != irqctx) {
 115                int arg1, arg2, ebx;
 116
 117                /* build the stack frame on the IRQ stack */
 118                isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
 119                irqctx->tinfo.task = curctx->tinfo.task;
 120                irqctx->tinfo.previous_esp = current_stack_pointer;
 121
 122                /*
 123                 * Copy the softirq bits in preempt_count so that the
 124                 * softirq checks work in the hardirq context.
 125                 */
 126                irqctx->tinfo.preempt_count =
 127                        (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
 128                        (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
 129
 130                asm volatile(
 131                        "       xchgl  %%ebx,%%esp      \n"
 132                        "       call   *%%edi           \n"
 133                        "       movl   %%ebx,%%esp      \n"
 134                        : "=a" (arg1), "=d" (arg2), "=b" (ebx)
 135                        :  "0" (irq),   "1" (desc),  "2" (isp),
 136                           "D" (desc->handle_irq)
 137                        : "memory", "cc"
 138                );
 139        } else
 140#endif
 141                desc->handle_irq(irq, desc);
 142
 143        irq_exit();
 144        set_irq_regs(old_regs);
 145        return 1;
 146}
 147
 148#ifdef CONFIG_4KSTACKS
 149
 150static char softirq_stack[NR_CPUS * THREAD_SIZE]
 151                __attribute__((__section__(".bss.page_aligned")));
 152
 153static char hardirq_stack[NR_CPUS * THREAD_SIZE]
 154                __attribute__((__section__(".bss.page_aligned")));
 155
 156/*
 157 * allocate per-cpu stacks for hardirq and for softirq processing
 158 */
 159void irq_ctx_init(int cpu)
 160{
 161        union irq_ctx *irqctx;
 162
 163        if (hardirq_ctx[cpu])
 164                return;
 165
 166        irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE];
 167        irqctx->tinfo.task              = NULL;
 168        irqctx->tinfo.exec_domain       = NULL;
 169        irqctx->tinfo.cpu               = cpu;
 170        irqctx->tinfo.preempt_count     = HARDIRQ_OFFSET;
 171        irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
 172
 173        hardirq_ctx[cpu] = irqctx;
 174
 175        irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE];
 176        irqctx->tinfo.task              = NULL;
 177        irqctx->tinfo.exec_domain       = NULL;
 178        irqctx->tinfo.cpu               = cpu;
 179        irqctx->tinfo.preempt_count     = 0;
 180        irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
 181
 182        softirq_ctx[cpu] = irqctx;
 183
 184        printk("CPU %u irqstacks, hard=%p soft=%p\n",
 185                cpu,hardirq_ctx[cpu],softirq_ctx[cpu]);
 186}
 187
 188void irq_ctx_exit(int cpu)
 189{
 190        hardirq_ctx[cpu] = NULL;
 191}
 192
 193extern asmlinkage void __do_softirq(void);
 194
 195asmlinkage void do_softirq(void)
 196{
 197        unsigned long flags;
 198        struct thread_info *curctx;
 199        union irq_ctx *irqctx;
 200        u32 *isp;
 201
 202        if (in_interrupt())
 203                return;
 204
 205        local_irq_save(flags);
 206
 207        if (local_softirq_pending()) {
 208                curctx = current_thread_info();
 209                irqctx = softirq_ctx[smp_processor_id()];
 210                irqctx->tinfo.task = curctx->task;
 211                irqctx->tinfo.previous_esp = current_stack_pointer;
 212
 213                /* build the stack frame on the softirq stack */
 214                isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
 215
 216                asm volatile(
 217                        "       xchgl   %%ebx,%%esp     \n"
 218                        "       call    __do_softirq    \n"
 219                        "       movl    %%ebx,%%esp     \n"
 220                        : "=b"(isp)
 221                        : "0"(isp)
 222                        : "memory", "cc", "edx", "ecx", "eax"
 223                );
 224                /*
 225                 * Shouldnt happen, we returned above if in_interrupt():
 226                 */
 227                WARN_ON_ONCE(softirq_count());
 228        }
 229
 230        local_irq_restore(flags);
 231}
 232#endif
 233
 234/*
 235 * Interrupt statistics:
 236 */
 237
 238atomic_t irq_err_count;
 239
 240/*
 241 * /proc/interrupts printing:
 242 */
 243
 244int show_interrupts(struct seq_file *p, void *v)
 245{
 246        int i = *(loff_t *) v, j;
 247        struct irqaction * action;
 248        unsigned long flags;
 249
 250        if (i == 0) {
 251                seq_printf(p, "           ");
 252                for_each_online_cpu(j)
 253                        seq_printf(p, "CPU%-8d",j);
 254                seq_putc(p, '\n');
 255        }
 256
 257        if (i < NR_IRQS) {
 258                unsigned any_count = 0;
 259
 260                spin_lock_irqsave(&irq_desc[i].lock, flags);
 261#ifndef CONFIG_SMP
 262                any_count = kstat_irqs(i);
 263#else
 264                for_each_online_cpu(j)
 265                        any_count |= kstat_cpu(j).irqs[i];
 266#endif
 267                action = irq_desc[i].action;
 268                if (!action && !any_count)
 269                        goto skip;
 270                seq_printf(p, "%3d: ",i);
 271#ifndef CONFIG_SMP
 272                seq_printf(p, "%10u ", kstat_irqs(i));
 273#else
 274                for_each_online_cpu(j)
 275                        seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
 276#endif
 277                seq_printf(p, " %8s", irq_desc[i].chip->name);
 278                seq_printf(p, "-%-8s", irq_desc[i].name);
 279
 280                if (action) {
 281                        seq_printf(p, "  %s", action->name);
 282                        while ((action = action->next) != NULL)
 283                                seq_printf(p, ", %s", action->name);
 284                }
 285
 286                seq_putc(p, '\n');
 287skip:
 288                spin_unlock_irqrestore(&irq_desc[i].lock, flags);
 289        } else if (i == NR_IRQS) {
 290                seq_printf(p, "NMI: ");
 291                for_each_online_cpu(j)
 292                        seq_printf(p, "%10u ", nmi_count(j));
 293                seq_printf(p, "  Non-maskable interrupts\n");
 294#ifdef CONFIG_X86_LOCAL_APIC
 295                seq_printf(p, "LOC: ");
 296                for_each_online_cpu(j)
 297                        seq_printf(p, "%10u ",
 298                                per_cpu(irq_stat,j).apic_timer_irqs);
 299                seq_printf(p, "  Local timer interrupts\n");
 300#endif
 301#ifdef CONFIG_SMP
 302                seq_printf(p, "RES: ");
 303                for_each_online_cpu(j)
 304                        seq_printf(p, "%10u ",
 305                                per_cpu(irq_stat,j).irq_resched_count);
 306                seq_printf(p, "  Rescheduling interrupts\n");
 307                seq_printf(p, "CAL: ");
 308                for_each_online_cpu(j)
 309                        seq_printf(p, "%10u ",
 310                                per_cpu(irq_stat,j).irq_call_count);
 311                seq_printf(p, "  function call interrupts\n");
 312                seq_printf(p, "TLB: ");
 313                for_each_online_cpu(j)
 314                        seq_printf(p, "%10u ",
 315                                per_cpu(irq_stat,j).irq_tlb_count);
 316                seq_printf(p, "  TLB shootdowns\n");
 317#endif
 318                seq_printf(p, "TRM: ");
 319                for_each_online_cpu(j)
 320                        seq_printf(p, "%10u ",
 321                                per_cpu(irq_stat,j).irq_thermal_count);
 322                seq_printf(p, "  Thermal event interrupts\n");
 323                seq_printf(p, "SPU: ");
 324                for_each_online_cpu(j)
 325                        seq_printf(p, "%10u ",
 326                                per_cpu(irq_stat,j).irq_spurious_count);
 327                seq_printf(p, "  Spurious interrupts\n");
 328                seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
 329#if defined(CONFIG_X86_IO_APIC)
 330                seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
 331#endif
 332        }
 333        return 0;
 334}
 335
 336#ifdef CONFIG_HOTPLUG_CPU
 337#include <mach_apic.h>
 338
 339void fixup_irqs(cpumask_t map)
 340{
 341        unsigned int irq;
 342        static int warned;
 343
 344        for (irq = 0; irq < NR_IRQS; irq++) {
 345                cpumask_t mask;
 346                if (irq == 2)
 347                        continue;
 348
 349                cpus_and(mask, irq_desc[irq].affinity, map);
 350                if (any_online_cpu(mask) == NR_CPUS) {
 351                        printk("Breaking affinity for irq %i\n", irq);
 352                        mask = map;
 353                }
 354                if (irq_desc[irq].chip->set_affinity)
 355                        irq_desc[irq].chip->set_affinity(irq, mask);
 356                else if (irq_desc[irq].action && !(warned++))
 357                        printk("Cannot set affinity for irq %i\n", irq);
 358        }
 359
 360#if 0
 361        barrier();
 362        /* Ingo Molnar says: "after the IO-APIC masks have been redirected
 363           [note the nop - the interrupt-enable boundary on x86 is two
 364           instructions from sti] - to flush out pending hardirqs and
 365           IPIs. After this point nothing is supposed to reach this CPU." */
 366        __asm__ __volatile__("sti; nop; cli");
 367        barrier();
 368#else
 369        /* That doesn't seem sufficient.  Give it 1ms. */
 370        local_irq_enable();
 371        mdelay(1);
 372        local_irq_disable();
 373#endif
 374}
 375#endif
 376
 377