linux/arch/x86/include/asm/irqflags.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _X86_IRQFLAGS_H_
   3#define _X86_IRQFLAGS_H_
   4
   5#include <asm/processor-flags.h>
   6
   7#ifndef __ASSEMBLY__
   8
   9/* Provide __cpuidle; we can't safely include <linux/cpu.h> */
  10#define __cpuidle __attribute__((__section__(".cpuidle.text")))
  11
  12/*
  13 * Interrupt control:
  14 */
  15
  16/* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */
  17extern inline unsigned long native_save_fl(void);
  18extern inline unsigned long native_save_fl(void)
  19{
  20        unsigned long flags;
  21
  22        /*
  23         * "=rm" is safe here, because "pop" adjusts the stack before
  24         * it evaluates its effective address -- this is part of the
  25         * documented behavior of the "pop" instruction.
  26         */
  27        asm volatile("# __raw_save_flags\n\t"
  28                     "pushf ; pop %0"
  29                     : "=rm" (flags)
  30                     : /* no input */
  31                     : "memory");
  32
  33        return flags;
  34}
  35
  36extern inline void native_restore_fl(unsigned long flags);
  37extern inline void native_restore_fl(unsigned long flags)
  38{
  39        asm volatile("push %0 ; popf"
  40                     : /* no output */
  41                     :"g" (flags)
  42                     :"memory", "cc");
  43}
  44
  45static inline void native_irq_disable(void)
  46{
  47        asm volatile("cli": : :"memory");
  48}
  49
  50static inline void native_irq_enable(void)
  51{
  52        asm volatile("sti": : :"memory");
  53}
  54
  55static inline __cpuidle void native_safe_halt(void)
  56{
  57        asm volatile("sti; hlt": : :"memory");
  58}
  59
  60static inline __cpuidle void native_halt(void)
  61{
  62        asm volatile("hlt": : :"memory");
  63}
  64
  65#endif
  66
  67#ifdef CONFIG_PARAVIRT
  68#include <asm/paravirt.h>
  69#else
  70#ifndef __ASSEMBLY__
  71#include <linux/types.h>
  72
  73static inline notrace unsigned long arch_local_save_flags(void)
  74{
  75        return native_save_fl();
  76}
  77
  78static inline notrace void arch_local_irq_restore(unsigned long flags)
  79{
  80        native_restore_fl(flags);
  81}
  82
  83static inline notrace void arch_local_irq_disable(void)
  84{
  85        native_irq_disable();
  86}
  87
  88static inline notrace void arch_local_irq_enable(void)
  89{
  90        native_irq_enable();
  91}
  92
  93/*
  94 * Used in the idle loop; sti takes one instruction cycle
  95 * to complete:
  96 */
  97static inline __cpuidle void arch_safe_halt(void)
  98{
  99        native_safe_halt();
 100}
 101
 102/*
 103 * Used when interrupts are already enabled or to
 104 * shutdown the processor:
 105 */
 106static inline __cpuidle void halt(void)
 107{
 108        native_halt();
 109}
 110
 111/*
 112 * For spinlocks, etc:
 113 */
 114static inline notrace unsigned long arch_local_irq_save(void)
 115{
 116        unsigned long flags = arch_local_save_flags();
 117        arch_local_irq_disable();
 118        return flags;
 119}
 120#else
 121
 122#define ENABLE_INTERRUPTS(x)    sti
 123#define DISABLE_INTERRUPTS(x)   cli
 124
 125#ifdef CONFIG_X86_64
 126#define SWAPGS  swapgs
 127/*
 128 * Currently paravirt can't handle swapgs nicely when we
 129 * don't have a stack we can rely on (such as a user space
 130 * stack).  So we either find a way around these or just fault
 131 * and emulate if a guest tries to call swapgs directly.
 132 *
 133 * Either way, this is a good way to document that we don't
 134 * have a reliable stack. x86_64 only.
 135 */
 136#define SWAPGS_UNSAFE_STACK     swapgs
 137
 138#define PARAVIRT_ADJUST_EXCEPTION_FRAME /*  */
 139
 140#define INTERRUPT_RETURN        jmp native_iret
 141#define USERGS_SYSRET64                         \
 142        swapgs;                                 \
 143        sysretq;
 144#define USERGS_SYSRET32                         \
 145        swapgs;                                 \
 146        sysretl
 147
 148#ifdef CONFIG_DEBUG_ENTRY
 149#define SAVE_FLAGS(x)           pushfq; popq %rax
 150#endif
 151#else
 152#define INTERRUPT_RETURN                iret
 153#define ENABLE_INTERRUPTS_SYSEXIT       sti; sysexit
 154#define GET_CR0_INTO_EAX                movl %cr0, %eax
 155#endif
 156
 157
 158#endif /* __ASSEMBLY__ */
 159#endif /* CONFIG_PARAVIRT */
 160
 161#ifndef __ASSEMBLY__
 162static inline int arch_irqs_disabled_flags(unsigned long flags)
 163{
 164        return !(flags & X86_EFLAGS_IF);
 165}
 166
 167static inline int arch_irqs_disabled(void)
 168{
 169        unsigned long flags = arch_local_save_flags();
 170
 171        return arch_irqs_disabled_flags(flags);
 172}
 173#endif /* !__ASSEMBLY__ */
 174
 175#ifdef __ASSEMBLY__
 176#ifdef CONFIG_TRACE_IRQFLAGS
 177#  define TRACE_IRQS_ON         call trace_hardirqs_on_thunk;
 178#  define TRACE_IRQS_OFF        call trace_hardirqs_off_thunk;
 179#else
 180#  define TRACE_IRQS_ON
 181#  define TRACE_IRQS_OFF
 182#endif
 183#ifdef CONFIG_DEBUG_LOCK_ALLOC
 184#  ifdef CONFIG_X86_64
 185#    define LOCKDEP_SYS_EXIT            call lockdep_sys_exit_thunk
 186#    define LOCKDEP_SYS_EXIT_IRQ \
 187        TRACE_IRQS_ON; \
 188        sti; \
 189        call lockdep_sys_exit_thunk; \
 190        cli; \
 191        TRACE_IRQS_OFF;
 192#  else
 193#    define LOCKDEP_SYS_EXIT \
 194        pushl %eax;                             \
 195        pushl %ecx;                             \
 196        pushl %edx;                             \
 197        call lockdep_sys_exit;                  \
 198        popl %edx;                              \
 199        popl %ecx;                              \
 200        popl %eax;
 201#    define LOCKDEP_SYS_EXIT_IRQ
 202#  endif
 203#else
 204#  define LOCKDEP_SYS_EXIT
 205#  define LOCKDEP_SYS_EXIT_IRQ
 206#endif
 207#endif /* __ASSEMBLY__ */
 208
 209#endif
 210