linux/arch/x86/include/asm/irqflags.h
<<
>>
Prefs
   1#ifndef _X86_IRQFLAGS_H_
   2#define _X86_IRQFLAGS_H_
   3
   4#include <asm/processor-flags.h>
   5
   6#ifndef __ASSEMBLY__
   7/*
   8 * Interrupt control:
   9 */
  10
  11static inline unsigned long native_save_fl(void)
  12{
  13        unsigned long flags;
  14
  15        /*
  16         * "=rm" is safe here, because "pop" adjusts the stack before
  17         * it evaluates its effective address -- this is part of the
  18         * documented behavior of the "pop" instruction.
  19         */
  20        asm volatile("# __raw_save_flags\n\t"
  21                     "pushf ; pop %0"
  22                     : "=rm" (flags)
  23                     : /* no input */
  24                     : "memory");
  25
  26        return flags;
  27}
  28
  29static inline void native_restore_fl(unsigned long flags)
  30{
  31        asm volatile("push %0 ; popf"
  32                     : /* no output */
  33                     :"g" (flags)
  34                     :"memory", "cc");
  35}
  36
  37static inline void native_irq_disable(void)
  38{
  39        asm volatile("cli": : :"memory");
  40}
  41
  42static inline void native_irq_enable(void)
  43{
  44        asm volatile("sti": : :"memory");
  45}
  46
  47static inline void native_safe_halt(void)
  48{
  49        asm volatile("sti; hlt": : :"memory");
  50}
  51
  52static inline void native_halt(void)
  53{
  54        asm volatile("hlt": : :"memory");
  55}
  56
  57#endif
  58
  59#ifdef CONFIG_PARAVIRT
  60#include <asm/paravirt.h>
  61#else
  62#ifndef __ASSEMBLY__
  63
  64static inline unsigned long arch_local_save_flags(void)
  65{
  66        return native_save_fl();
  67}
  68
  69static inline void arch_local_irq_restore(unsigned long flags)
  70{
  71        native_restore_fl(flags);
  72}
  73
  74static inline void arch_local_irq_disable(void)
  75{
  76        native_irq_disable();
  77}
  78
  79static inline void arch_local_irq_enable(void)
  80{
  81        native_irq_enable();
  82}
  83
  84/*
  85 * Used in the idle loop; sti takes one instruction cycle
  86 * to complete:
  87 */
  88static inline void arch_safe_halt(void)
  89{
  90        native_safe_halt();
  91}
  92
  93/*
  94 * Used when interrupts are already enabled or to
  95 * shutdown the processor:
  96 */
  97static inline void halt(void)
  98{
  99        native_halt();
 100}
 101
 102/*
 103 * For spinlocks, etc:
 104 */
 105static inline unsigned long arch_local_irq_save(void)
 106{
 107        unsigned long flags = arch_local_save_flags();
 108        arch_local_irq_disable();
 109        return flags;
 110}
 111#else
 112
 113#define ENABLE_INTERRUPTS(x)    sti
 114#define DISABLE_INTERRUPTS(x)   cli
 115
 116#ifdef CONFIG_X86_64
 117#define SWAPGS  swapgs
 118/*
 119 * Currently paravirt can't handle swapgs nicely when we
 120 * don't have a stack we can rely on (such as a user space
 121 * stack).  So we either find a way around these or just fault
 122 * and emulate if a guest tries to call swapgs directly.
 123 *
 124 * Either way, this is a good way to document that we don't
 125 * have a reliable stack. x86_64 only.
 126 */
 127#define SWAPGS_UNSAFE_STACK     swapgs
 128
 129#define PARAVIRT_ADJUST_EXCEPTION_FRAME /*  */
 130
 131#define INTERRUPT_RETURN        iretq
 132#define USERGS_SYSRET64                         \
 133        swapgs;                                 \
 134        sysretq;
 135#define USERGS_SYSRET32                         \
 136        swapgs;                                 \
 137        sysretl
 138#define ENABLE_INTERRUPTS_SYSEXIT32             \
 139        swapgs;                                 \
 140        sti;                                    \
 141        sysexit
 142
 143#else
 144#define INTERRUPT_RETURN                iret
 145#define ENABLE_INTERRUPTS_SYSEXIT       sti; sysexit
 146#define GET_CR0_INTO_EAX                movl %cr0, %eax
 147#endif
 148
 149
 150#endif /* __ASSEMBLY__ */
 151#endif /* CONFIG_PARAVIRT */
 152
 153#ifndef __ASSEMBLY__
 154static inline int arch_irqs_disabled_flags(unsigned long flags)
 155{
 156        return !(flags & X86_EFLAGS_IF);
 157}
 158
 159static inline int arch_irqs_disabled(void)
 160{
 161        unsigned long flags = arch_local_save_flags();
 162
 163        return arch_irqs_disabled_flags(flags);
 164}
 165
 166#else
 167
 168#ifdef CONFIG_X86_64
 169#define ARCH_LOCKDEP_SYS_EXIT           call lockdep_sys_exit_thunk
 170#define ARCH_LOCKDEP_SYS_EXIT_IRQ       \
 171        TRACE_IRQS_ON; \
 172        sti; \
 173        SAVE_REST; \
 174        LOCKDEP_SYS_EXIT; \
 175        RESTORE_REST; \
 176        cli; \
 177        TRACE_IRQS_OFF;
 178
 179#else
 180#define ARCH_LOCKDEP_SYS_EXIT                   \
 181        pushl %eax;                             \
 182        pushl %ecx;                             \
 183        pushl %edx;                             \
 184        call lockdep_sys_exit;                  \
 185        popl %edx;                              \
 186        popl %ecx;                              \
 187        popl %eax;
 188
 189#define ARCH_LOCKDEP_SYS_EXIT_IRQ
 190#endif
 191
 192#ifdef CONFIG_TRACE_IRQFLAGS
 193#  define TRACE_IRQS_ON         call trace_hardirqs_on_thunk;
 194#  define TRACE_IRQS_OFF        call trace_hardirqs_off_thunk;
 195#else
 196#  define TRACE_IRQS_ON
 197#  define TRACE_IRQS_OFF
 198#endif
 199#ifdef CONFIG_DEBUG_LOCK_ALLOC
 200#  define LOCKDEP_SYS_EXIT      ARCH_LOCKDEP_SYS_EXIT
 201#  define LOCKDEP_SYS_EXIT_IRQ  ARCH_LOCKDEP_SYS_EXIT_IRQ
 202# else
 203#  define LOCKDEP_SYS_EXIT
 204#  define LOCKDEP_SYS_EXIT_IRQ
 205# endif
 206
 207#endif /* __ASSEMBLY__ */
 208#endif
 209