1#ifndef LINUX_HARDIRQ_H 2#define LINUX_HARDIRQ_H 3 4#include <linux/preempt_mask.h> 5#include <linux/lockdep.h> 6#include <linux/ftrace_irq.h> 7#include <linux/vtime.h> 8#include <asm/hardirq.h> 9 10 11#if defined(CONFIG_SMP) || defined(CONFIG_GENERIC_HARDIRQS) 12extern void synchronize_irq(unsigned int irq); 13extern void synchronize_hardirq(unsigned int irq); 14#else 15# define synchronize_irq(irq) barrier() 16# define synchronize_hardirq(irq) barrier() 17#endif 18 19#if defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU) 20 21static inline void rcu_nmi_enter(void) 22{ 23} 24 25static inline void rcu_nmi_exit(void) 26{ 27} 28 29#else 30extern void rcu_nmi_enter(void); 31extern void rcu_nmi_exit(void); 32#endif 33 34/* 35 * It is safe to do non-atomic ops on ->hardirq_context, 36 * because NMI handlers may not preempt and the ops are 37 * always balanced, so the interrupted value of ->hardirq_context 38 * will always be restored. 39 */ 40#define __irq_enter() \ 41 do { \ 42 account_irq_enter_time(current); \ 43 add_preempt_count(HARDIRQ_OFFSET); \ 44 trace_hardirq_enter(); \ 45 } while (0) 46 47/* 48 * Enter irq context (on NO_HZ, update jiffies): 49 */ 50extern void irq_enter(void); 51 52/* 53 * Exit irq context without processing softirqs: 54 */ 55#define __irq_exit() \ 56 do { \ 57 trace_hardirq_exit(); \ 58 account_irq_exit_time(current); \ 59 sub_preempt_count(HARDIRQ_OFFSET); \ 60 } while (0) 61 62/* 63 * Exit irq context and process softirqs if needed: 64 */ 65extern void irq_exit(void); 66 67#define nmi_enter() \ 68 do { \ 69 lockdep_off(); \ 70 ftrace_nmi_enter(); \ 71 BUG_ON(in_nmi()); \ 72 add_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ 73 rcu_nmi_enter(); \ 74 trace_hardirq_enter(); \ 75 } while (0) 76 77#define nmi_exit() \ 78 do { \ 79 trace_hardirq_exit(); \ 80 rcu_nmi_exit(); \ 81 BUG_ON(!in_nmi()); \ 82 sub_preempt_count(NMI_OFFSET + HARDIRQ_OFFSET); \ 83 ftrace_nmi_exit(); \ 84 lockdep_on(); \ 85 } while (0) 86 87#endif /* LINUX_HARDIRQ_H */ 88