linux/arch/x86/include/asm/hardirq.h
<<
>>
Prefs
   1#ifndef _ASM_X86_HARDIRQ_H
   2#define _ASM_X86_HARDIRQ_H
   3
   4#include <linux/threads.h>
   5#include <linux/irq.h>
   6
   7typedef struct {
   8        unsigned int __softirq_pending;
   9        unsigned int __nmi_count;       /* arch dependent */
  10        unsigned int irq0_irqs;
  11#ifdef CONFIG_X86_LOCAL_APIC
  12        unsigned int apic_timer_irqs;   /* arch dependent */
  13        unsigned int irq_spurious_count;
  14#endif
  15        unsigned int generic_irqs;      /* arch dependent */
  16        unsigned int apic_perf_irqs;
  17        unsigned int apic_pending_irqs;
  18#ifdef CONFIG_SMP
  19        unsigned int irq_resched_count;
  20        unsigned int irq_call_count;
  21        unsigned int irq_tlb_count;
  22#endif
  23#ifdef CONFIG_X86_MCE
  24        unsigned int irq_thermal_count;
  25# ifdef CONFIG_X86_MCE_THRESHOLD
  26        unsigned int irq_threshold_count;
  27# endif
  28#endif
  29} ____cacheline_aligned irq_cpustat_t;
  30
  31DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
  32
  33/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
  34#define MAX_HARDIRQS_PER_CPU NR_VECTORS
  35
  36#define __ARCH_IRQ_STAT
  37
  38#define inc_irq_stat(member)    percpu_add(irq_stat.member, 1)
  39
  40#define local_softirq_pending() percpu_read(irq_stat.__softirq_pending)
  41
  42#define __ARCH_SET_SOFTIRQ_PENDING
  43
  44#define set_softirq_pending(x)  percpu_write(irq_stat.__softirq_pending, (x))
  45#define or_softirq_pending(x)   percpu_or(irq_stat.__softirq_pending, (x))
  46
  47extern void ack_bad_irq(unsigned int irq);
  48
  49extern u64 arch_irq_stat_cpu(unsigned int cpu);
  50#define arch_irq_stat_cpu       arch_irq_stat_cpu
  51
  52extern u64 arch_irq_stat(void);
  53#define arch_irq_stat           arch_irq_stat
  54
  55#endif /* _ASM_X86_HARDIRQ_H */
  56