linux/arch/x86/include/asm/irqflags.h
<<
>>
Prefs
   1#ifndef _X86_IRQFLAGS_H_
   2#define _X86_IRQFLAGS_H_
   3
   4#include <asm/processor-flags.h>
   5
   6#ifndef __ASSEMBLY__
   7
   8/* Provide __cpuidle; we can't safely include <linux/cpu.h> */
   9#define __cpuidle __attribute__((__section__(".cpuidle.text")))
  10
  11/*
  12 * Interrupt control:
  13 */
  14
  15static inline unsigned long native_save_fl(void)
  16{
  17        unsigned long flags;
  18
  19        /*
  20         * "=rm" is safe here, because "pop" adjusts the stack before
  21         * it evaluates its effective address -- this is part of the
  22         * documented behavior of the "pop" instruction.
  23         */
  24        asm volatile("# __raw_save_flags\n\t"
  25                     "pushf ; pop %0"
  26                     : "=rm" (flags)
  27                     : /* no input */
  28                     : "memory");
  29
  30        return flags;
  31}
  32
  33static inline void native_restore_fl(unsigned long flags)
  34{
  35        asm volatile("push %0 ; popf"
  36                     : /* no output */
  37                     :"g" (flags)
  38                     :"memory", "cc");
  39}
  40
  41static inline void native_irq_disable(void)
  42{
  43        asm volatile("cli": : :"memory");
  44}
  45
  46static inline void native_irq_enable(void)
  47{
  48        asm volatile("sti": : :"memory");
  49}
  50
  51static inline __cpuidle void native_safe_halt(void)
  52{
  53        asm volatile("sti; hlt": : :"memory");
  54}
  55
  56static inline __cpuidle void native_halt(void)
  57{
  58        asm volatile("hlt": : :"memory");
  59}
  60
  61#endif
  62
  63#ifdef CONFIG_PARAVIRT
  64#include <asm/paravirt.h>
  65#else
  66#ifndef __ASSEMBLY__
  67#include <linux/types.h>
  68
  69static inline notrace unsigned long arch_local_save_flags(void)
  70{
  71        return native_save_fl();
  72}
  73
  74static inline notrace void arch_local_irq_restore(unsigned long flags)
  75{
  76        native_restore_fl(flags);
  77}
  78
  79static inline notrace void arch_local_irq_disable(void)
  80{
  81        native_irq_disable();
  82}
  83
  84static inline notrace void arch_local_irq_enable(void)
  85{
  86        native_irq_enable();
  87}
  88
  89/*
  90 * Used in the idle loop; sti takes one instruction cycle
  91 * to complete:
  92 */
  93static inline __cpuidle void arch_safe_halt(void)
  94{
  95        native_safe_halt();
  96}
  97
  98/*
  99 * Used when interrupts are already enabled or to
 100 * shutdown the processor:
 101 */
 102static inline __cpuidle void halt(void)
 103{
 104        native_halt();
 105}
 106
 107/*
 108 * For spinlocks, etc:
 109 */
 110static inline notrace unsigned long arch_local_irq_save(void)
 111{
 112        unsigned long flags = arch_local_save_flags();
 113        arch_local_irq_disable();
 114        return flags;
 115}
 116#else
 117
 118#define ENABLE_INTERRUPTS(x)    sti
 119#define DISABLE_INTERRUPTS(x)   cli
 120
 121#ifdef CONFIG_X86_64
 122#define SWAPGS  swapgs
 123/*
 124 * Currently paravirt can't handle swapgs nicely when we
 125 * don't have a stack we can rely on (such as a user space
 126 * stack).  So we either find a way around these or just fault
 127 * and emulate if a guest tries to call swapgs directly.
 128 *
 129 * Either way, this is a good way to document that we don't
 130 * have a reliable stack. x86_64 only.
 131 */
 132#define SWAPGS_UNSAFE_STACK     swapgs
 133
 134#define PARAVIRT_ADJUST_EXCEPTION_FRAME /*  */
 135
 136#define INTERRUPT_RETURN        jmp native_iret
 137#define USERGS_SYSRET64                         \
 138        swapgs;                                 \
 139        sysretq;
 140#define USERGS_SYSRET32                         \
 141        swapgs;                                 \
 142        sysretl
 143
 144#else
 145#define INTERRUPT_RETURN                iret
 146#define ENABLE_INTERRUPTS_SYSEXIT       sti; sysexit
 147#define GET_CR0_INTO_EAX                movl %cr0, %eax
 148#endif
 149
 150
 151#endif /* __ASSEMBLY__ */
 152#endif /* CONFIG_PARAVIRT */
 153
 154#ifndef __ASSEMBLY__
 155static inline int arch_irqs_disabled_flags(unsigned long flags)
 156{
 157        return !(flags & X86_EFLAGS_IF);
 158}
 159
 160static inline int arch_irqs_disabled(void)
 161{
 162        unsigned long flags = arch_local_save_flags();
 163
 164        return arch_irqs_disabled_flags(flags);
 165}
 166#endif /* !__ASSEMBLY__ */
 167
 168#ifdef __ASSEMBLY__
 169#ifdef CONFIG_TRACE_IRQFLAGS
 170#  define TRACE_IRQS_ON         call trace_hardirqs_on_thunk;
 171#  define TRACE_IRQS_OFF        call trace_hardirqs_off_thunk;
 172#else
 173#  define TRACE_IRQS_ON
 174#  define TRACE_IRQS_OFF
 175#endif
 176#ifdef CONFIG_DEBUG_LOCK_ALLOC
 177#  ifdef CONFIG_X86_64
 178#    define LOCKDEP_SYS_EXIT            call lockdep_sys_exit_thunk
 179#    define LOCKDEP_SYS_EXIT_IRQ \
 180        TRACE_IRQS_ON; \
 181        sti; \
 182        call lockdep_sys_exit_thunk; \
 183        cli; \
 184        TRACE_IRQS_OFF;
 185#  else
 186#    define LOCKDEP_SYS_EXIT \
 187        pushl %eax;                             \
 188        pushl %ecx;                             \
 189        pushl %edx;                             \
 190        call lockdep_sys_exit;                  \
 191        popl %edx;                              \
 192        popl %ecx;                              \
 193        popl %eax;
 194#    define LOCKDEP_SYS_EXIT_IRQ
 195#  endif
 196#else
 197#  define LOCKDEP_SYS_EXIT
 198#  define LOCKDEP_SYS_EXIT_IRQ
 199#endif
 200#endif /* __ASSEMBLY__ */
 201
 202#endif
 203