linux/arch/x86/include/asm/irqflags.h
<<
>>
Prefs
   1#ifndef _X86_IRQFLAGS_H_
   2#define _X86_IRQFLAGS_H_
   3
   4#include <asm/processor-flags.h>
   5
   6#ifndef __ASSEMBLY__
   7
   8#include <linux/static_key.h>
   9#include <asm/nospec-branch.h>
  10
  11/* Provide __cpuidle; we can't safely include <linux/cpu.h> */
  12#define __cpuidle __attribute__((__section__(".cpuidle.text")))
  13
  14/*
  15 * Interrupt control:
  16 */
  17
  18static inline unsigned long native_save_fl(void)
  19{
  20        unsigned long flags;
  21
  22        /*
  23         * "=rm" is safe here, because "pop" adjusts the stack before
  24         * it evaluates its effective address -- this is part of the
  25         * documented behavior of the "pop" instruction.
  26         */
  27        asm volatile("# __raw_save_flags\n\t"
  28                     "pushf ; pop %0"
  29                     : "=rm" (flags)
  30                     : /* no input */
  31                     : "memory");
  32
  33        return flags;
  34}
  35
  36static inline void native_restore_fl(unsigned long flags)
  37{
  38        asm volatile("push %0 ; popf"
  39                     : /* no output */
  40                     :"g" (flags)
  41                     :"memory", "cc");
  42}
  43
  44static inline void native_irq_disable(void)
  45{
  46        asm volatile("cli": : :"memory");
  47}
  48
  49static inline void native_irq_enable(void)
  50{
  51        asm volatile("sti": : :"memory");
  52}
  53
  54static inline __cpuidle void native_safe_halt(void)
  55{
  56        mds_idle_clear_cpu_buffers();
  57        asm volatile("sti; hlt": : :"memory");
  58}
  59
  60static inline __cpuidle void native_halt(void)
  61{
  62        mds_idle_clear_cpu_buffers();
  63        asm volatile("hlt": : :"memory");
  64}
  65
  66#endif
  67
  68#ifdef CONFIG_PARAVIRT
  69#include <asm/paravirt.h>
  70#else
  71#ifndef __ASSEMBLY__
  72#include <linux/types.h>
  73
  74static inline notrace unsigned long arch_local_save_flags(void)
  75{
  76        return native_save_fl();
  77}
  78
  79static inline notrace void arch_local_irq_restore(unsigned long flags)
  80{
  81        native_restore_fl(flags);
  82}
  83
  84static inline notrace void arch_local_irq_disable(void)
  85{
  86        native_irq_disable();
  87}
  88
  89static inline notrace void arch_local_irq_enable(void)
  90{
  91        native_irq_enable();
  92}
  93
  94/*
  95 * Used in the idle loop; sti takes one instruction cycle
  96 * to complete:
  97 */
  98static inline __cpuidle void arch_safe_halt(void)
  99{
 100        native_safe_halt();
 101}
 102
 103/*
 104 * Used when interrupts are already enabled or to
 105 * shutdown the processor:
 106 */
 107static inline __cpuidle void halt(void)
 108{
 109        native_halt();
 110}
 111
 112/*
 113 * For spinlocks, etc:
 114 */
 115static inline notrace unsigned long arch_local_irq_save(void)
 116{
 117        unsigned long flags = arch_local_save_flags();
 118        arch_local_irq_disable();
 119        return flags;
 120}
 121#else
 122
 123#define ENABLE_INTERRUPTS(x)    sti
 124#define DISABLE_INTERRUPTS(x)   cli
 125
 126#ifdef CONFIG_X86_64
 127#define SWAPGS  swapgs
 128/*
 129 * Currently paravirt can't handle swapgs nicely when we
 130 * don't have a stack we can rely on (such as a user space
 131 * stack).  So we either find a way around these or just fault
 132 * and emulate if a guest tries to call swapgs directly.
 133 *
 134 * Either way, this is a good way to document that we don't
 135 * have a reliable stack. x86_64 only.
 136 */
 137#define SWAPGS_UNSAFE_STACK     swapgs
 138
 139#define PARAVIRT_ADJUST_EXCEPTION_FRAME /*  */
 140
 141#define INTERRUPT_RETURN        iretq
 142#define USERGS_SYSRET64                         \
 143        swapgs;                                 \
 144        sysretq;
 145#define USERGS_SYSRET32                         \
 146        swapgs;                                 \
 147        sysretl
 148#define ENABLE_INTERRUPTS_SYSEXIT32             \
 149        swapgs;                                 \
 150        sti;                                    \
 151        sysexit
 152
 153#else
 154#define INTERRUPT_RETURN                iret
 155#define ENABLE_INTERRUPTS_SYSEXIT       sti; sysexit
 156#define GET_CR0_INTO_EAX                movl %cr0, %eax
 157#endif
 158
 159
 160#endif /* __ASSEMBLY__ */
 161#endif /* CONFIG_PARAVIRT */
 162
 163#ifndef __ASSEMBLY__
 164static inline int arch_irqs_disabled_flags(unsigned long flags)
 165{
 166        return !(flags & X86_EFLAGS_IF);
 167}
 168
 169static inline int arch_irqs_disabled(void)
 170{
 171        unsigned long flags = arch_local_save_flags();
 172
 173        return arch_irqs_disabled_flags(flags);
 174}
 175
 176#else
 177
 178#ifdef CONFIG_X86_64
 179#define ARCH_LOCKDEP_SYS_EXIT           call lockdep_sys_exit_thunk
 180#define ARCH_LOCKDEP_SYS_EXIT_IRQ       \
 181        TRACE_IRQS_ON; \
 182        sti; \
 183        SAVE_REST; \
 184        LOCKDEP_SYS_EXIT; \
 185        RESTORE_REST; \
 186        cli; \
 187        TRACE_IRQS_OFF;
 188
 189#else
 190#define ARCH_LOCKDEP_SYS_EXIT                   \
 191        pushl %eax;                             \
 192        pushl %ecx;                             \
 193        pushl %edx;                             \
 194        call lockdep_sys_exit;                  \
 195        popl %edx;                              \
 196        popl %ecx;                              \
 197        popl %eax;
 198
 199#define ARCH_LOCKDEP_SYS_EXIT_IRQ
 200#endif
 201
 202#ifdef CONFIG_TRACE_IRQFLAGS
 203#  define TRACE_IRQS_ON         call trace_hardirqs_on_thunk;
 204#  define TRACE_IRQS_OFF        call trace_hardirqs_off_thunk;
 205#else
 206#  define TRACE_IRQS_ON
 207#  define TRACE_IRQS_OFF
 208#endif
 209#ifdef CONFIG_DEBUG_LOCK_ALLOC
 210#  define LOCKDEP_SYS_EXIT      ARCH_LOCKDEP_SYS_EXIT
 211#  define LOCKDEP_SYS_EXIT_IRQ  ARCH_LOCKDEP_SYS_EXIT_IRQ
 212# else
 213#  define LOCKDEP_SYS_EXIT
 214#  define LOCKDEP_SYS_EXIT_IRQ
 215# endif
 216
 217#endif /* __ASSEMBLY__ */
 218#endif
 219