linux/include/linux/hardirq.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef LINUX_HARDIRQ_H
   3#define LINUX_HARDIRQ_H
   4
   5#include <linux/preempt.h>
   6#include <linux/lockdep.h>
   7#include <linux/ftrace_irq.h>
   8#include <linux/vtime.h>
   9#include <asm/hardirq.h>
  10
  11
  12extern void synchronize_irq(unsigned int irq);
  13extern bool synchronize_hardirq(unsigned int irq);
  14
  15#if defined(CONFIG_TINY_RCU)
  16
  17static inline void rcu_nmi_enter(void)
  18{
  19}
  20
  21static inline void rcu_nmi_exit(void)
  22{
  23}
  24
  25#else
  26extern void rcu_nmi_enter(void);
  27extern void rcu_nmi_exit(void);
  28#endif
  29
  30/*
  31 * It is safe to do non-atomic ops on ->hardirq_context,
  32 * because NMI handlers may not preempt and the ops are
  33 * always balanced, so the interrupted value of ->hardirq_context
  34 * will always be restored.
  35 */
  36#define __irq_enter()                                   \
  37        do {                                            \
  38                account_irq_enter_time(current);        \
  39                preempt_count_add(HARDIRQ_OFFSET);      \
  40                trace_hardirq_enter();                  \
  41        } while (0)
  42
  43/*
  44 * Enter irq context (on NO_HZ, update jiffies):
  45 */
  46extern void irq_enter(void);
  47
  48/*
  49 * Exit irq context without processing softirqs:
  50 */
  51#define __irq_exit()                                    \
  52        do {                                            \
  53                trace_hardirq_exit();                   \
  54                account_irq_exit_time(current);         \
  55                preempt_count_sub(HARDIRQ_OFFSET);      \
  56        } while (0)
  57
  58/*
  59 * Exit irq context and process softirqs if needed:
  60 */
  61extern void irq_exit(void);
  62
  63#ifndef arch_nmi_enter
  64#define arch_nmi_enter()        do { } while (0)
  65#define arch_nmi_exit()         do { } while (0)
  66#endif
  67
  68#define nmi_enter()                                             \
  69        do {                                                    \
  70                arch_nmi_enter();                               \
  71                printk_nmi_enter();                             \
  72                lockdep_off();                                  \
  73                ftrace_nmi_enter();                             \
  74                BUG_ON(in_nmi());                               \
  75                preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
  76                rcu_nmi_enter();                                \
  77                trace_hardirq_enter();                          \
  78        } while (0)
  79
  80#define nmi_exit()                                              \
  81        do {                                                    \
  82                trace_hardirq_exit();                           \
  83                rcu_nmi_exit();                                 \
  84                BUG_ON(!in_nmi());                              \
  85                preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
  86                ftrace_nmi_exit();                              \
  87                lockdep_on();                                   \
  88                printk_nmi_exit();                              \
  89                arch_nmi_exit();                                \
  90        } while (0)
  91
  92#endif /* LINUX_HARDIRQ_H */
  93