linux/arch/x86/include/asm/irqflags.h
<<
>>
Prefs
   1#ifndef _X86_IRQFLAGS_H_
   2#define _X86_IRQFLAGS_H_
   3
   4#include <asm/processor-flags.h>
   5
   6#ifndef __ASSEMBLY__
   7/*
   8 * Interrupt control:
   9 */
  10
  11static inline unsigned long native_save_fl(void)
  12{
  13        unsigned long flags;
  14
  15        /*
  16         * "=rm" is safe here, because "pop" adjusts the stack before
  17         * it evaluates its effective address -- this is part of the
  18         * documented behavior of the "pop" instruction.
  19         */
  20        asm volatile("# __raw_save_flags\n\t"
  21                     "pushf ; pop %0"
  22                     : "=rm" (flags)
  23                     : /* no input */
  24                     : "memory");
  25
  26        return flags;
  27}
  28
  29static inline void native_restore_fl(unsigned long flags)
  30{
  31        asm volatile("push %0 ; popf"
  32                     : /* no output */
  33                     :"g" (flags)
  34                     :"memory", "cc");
  35}
  36
  37static inline void native_irq_disable(void)
  38{
  39        asm volatile("cli": : :"memory");
  40}
  41
  42static inline void native_irq_enable(void)
  43{
  44        asm volatile("sti": : :"memory");
  45}
  46
  47static inline void native_safe_halt(void)
  48{
  49        asm volatile("sti; hlt": : :"memory");
  50}
  51
  52static inline void native_halt(void)
  53{
  54        asm volatile("hlt": : :"memory");
  55}
  56
  57#endif
  58
  59#ifdef CONFIG_PARAVIRT
  60#include <asm/paravirt.h>
  61#else
  62#ifndef __ASSEMBLY__
  63
  64static inline unsigned long __raw_local_save_flags(void)
  65{
  66        return native_save_fl();
  67}
  68
  69static inline void raw_local_irq_restore(unsigned long flags)
  70{
  71        native_restore_fl(flags);
  72}
  73
  74static inline void raw_local_irq_disable(void)
  75{
  76        native_irq_disable();
  77}
  78
  79static inline void raw_local_irq_enable(void)
  80{
  81        native_irq_enable();
  82}
  83
  84/*
  85 * Used in the idle loop; sti takes one instruction cycle
  86 * to complete:
  87 */
  88static inline void raw_safe_halt(void)
  89{
  90        native_safe_halt();
  91}
  92
  93/*
  94 * Used when interrupts are already enabled or to
  95 * shutdown the processor:
  96 */
  97static inline void halt(void)
  98{
  99        native_halt();
 100}
 101
 102/*
 103 * For spinlocks, etc:
 104 */
 105static inline unsigned long __raw_local_irq_save(void)
 106{
 107        unsigned long flags = __raw_local_save_flags();
 108
 109        raw_local_irq_disable();
 110
 111        return flags;
 112}
 113#else
 114
 115#define ENABLE_INTERRUPTS(x)    sti
 116#define DISABLE_INTERRUPTS(x)   cli
 117
 118#ifdef CONFIG_X86_64
 119#define SWAPGS  swapgs
 120/*
 121 * Currently paravirt can't handle swapgs nicely when we
 122 * don't have a stack we can rely on (such as a user space
 123 * stack).  So we either find a way around these or just fault
 124 * and emulate if a guest tries to call swapgs directly.
 125 *
 126 * Either way, this is a good way to document that we don't
 127 * have a reliable stack. x86_64 only.
 128 */
 129#define SWAPGS_UNSAFE_STACK     swapgs
 130
 131#define PARAVIRT_ADJUST_EXCEPTION_FRAME /*  */
 132
 133#define INTERRUPT_RETURN        iretq
 134#define USERGS_SYSRET64                         \
 135        swapgs;                                 \
 136        sysretq;
 137#define USERGS_SYSRET32                         \
 138        swapgs;                                 \
 139        sysretl
 140#define ENABLE_INTERRUPTS_SYSEXIT32             \
 141        swapgs;                                 \
 142        sti;                                    \
 143        sysexit
 144
 145#else
 146#define INTERRUPT_RETURN                iret
 147#define ENABLE_INTERRUPTS_SYSEXIT       sti; sysexit
 148#define GET_CR0_INTO_EAX                movl %cr0, %eax
 149#endif
 150
 151
 152#endif /* __ASSEMBLY__ */
 153#endif /* CONFIG_PARAVIRT */
 154
 155#ifndef __ASSEMBLY__
 156#define raw_local_save_flags(flags)                             \
 157        do { (flags) = __raw_local_save_flags(); } while (0)
 158
 159#define raw_local_irq_save(flags)                               \
 160        do { (flags) = __raw_local_irq_save(); } while (0)
 161
 162static inline int raw_irqs_disabled_flags(unsigned long flags)
 163{
 164        return !(flags & X86_EFLAGS_IF);
 165}
 166
 167static inline int raw_irqs_disabled(void)
 168{
 169        unsigned long flags = __raw_local_save_flags();
 170
 171        return raw_irqs_disabled_flags(flags);
 172}
 173
 174#else
 175
 176#ifdef CONFIG_X86_64
 177#define ARCH_LOCKDEP_SYS_EXIT           call lockdep_sys_exit_thunk
 178#define ARCH_LOCKDEP_SYS_EXIT_IRQ       \
 179        TRACE_IRQS_ON; \
 180        sti; \
 181        SAVE_REST; \
 182        LOCKDEP_SYS_EXIT; \
 183        RESTORE_REST; \
 184        cli; \
 185        TRACE_IRQS_OFF;
 186
 187#else
 188#define ARCH_LOCKDEP_SYS_EXIT                   \
 189        pushl %eax;                             \
 190        pushl %ecx;                             \
 191        pushl %edx;                             \
 192        call lockdep_sys_exit;                  \
 193        popl %edx;                              \
 194        popl %ecx;                              \
 195        popl %eax;
 196
 197#define ARCH_LOCKDEP_SYS_EXIT_IRQ
 198#endif
 199
 200#ifdef CONFIG_TRACE_IRQFLAGS
 201#  define TRACE_IRQS_ON         call trace_hardirqs_on_thunk;
 202#  define TRACE_IRQS_OFF        call trace_hardirqs_off_thunk;
 203#else
 204#  define TRACE_IRQS_ON
 205#  define TRACE_IRQS_OFF
 206#endif
 207#ifdef CONFIG_DEBUG_LOCK_ALLOC
 208#  define LOCKDEP_SYS_EXIT      ARCH_LOCKDEP_SYS_EXIT
 209#  define LOCKDEP_SYS_EXIT_IRQ  ARCH_LOCKDEP_SYS_EXIT_IRQ
 210# else
 211#  define LOCKDEP_SYS_EXIT
 212#  define LOCKDEP_SYS_EXIT_IRQ
 213# endif
 214
 215#endif /* __ASSEMBLY__ */
 216#endif
 217