linux/arch/x86/include/asm/mwait.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_X86_MWAIT_H
   3#define _ASM_X86_MWAIT_H
   4
   5#include <linux/sched.h>
   6#include <linux/sched/idle.h>
   7
   8#include <asm/cpufeature.h>
   9
  10#define MWAIT_SUBSTATE_MASK             0xf
  11#define MWAIT_CSTATE_MASK               0xf
  12#define MWAIT_SUBSTATE_SIZE             4
  13#define MWAIT_HINT2CSTATE(hint)         (((hint) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK)
  14#define MWAIT_HINT2SUBSTATE(hint)       ((hint) & MWAIT_CSTATE_MASK)
  15
  16#define CPUID_MWAIT_LEAF                5
  17#define CPUID5_ECX_EXTENSIONS_SUPPORTED 0x1
  18#define CPUID5_ECX_INTERRUPT_BREAK      0x2
  19
  20#define MWAIT_ECX_INTERRUPT_BREAK       0x1
  21#define MWAITX_ECX_TIMER_ENABLE         BIT(1)
  22#define MWAITX_MAX_LOOPS                ((u32)-1)
  23#define MWAITX_DISABLE_CSTATES          0xf
  24
  25static inline void __monitor(const void *eax, unsigned long ecx,
  26                             unsigned long edx)
  27{
  28        /* "monitor %eax, %ecx, %edx;" */
  29        asm volatile(".byte 0x0f, 0x01, 0xc8;"
  30                     :: "a" (eax), "c" (ecx), "d"(edx));
  31}
  32
  33static inline void __monitorx(const void *eax, unsigned long ecx,
  34                              unsigned long edx)
  35{
  36        /* "monitorx %eax, %ecx, %edx;" */
  37        asm volatile(".byte 0x0f, 0x01, 0xfa;"
  38                     :: "a" (eax), "c" (ecx), "d"(edx));
  39}
  40
  41static inline void __mwait(unsigned long eax, unsigned long ecx)
  42{
  43        /* "mwait %eax, %ecx;" */
  44        asm volatile(".byte 0x0f, 0x01, 0xc9;"
  45                     :: "a" (eax), "c" (ecx));
  46}
  47
  48/*
  49 * MWAITX allows for a timer expiration to get the core out a wait state in
  50 * addition to the default MWAIT exit condition of a store appearing at a
  51 * monitored virtual address.
  52 *
  53 * Registers:
  54 *
  55 * MWAITX ECX[1]: enable timer if set
  56 * MWAITX EBX[31:0]: max wait time expressed in SW P0 clocks. The software P0
  57 * frequency is the same as the TSC frequency.
  58 *
  59 * Below is a comparison between MWAIT and MWAITX on AMD processors:
  60 *
  61 *                 MWAIT                           MWAITX
  62 * opcode          0f 01 c9           |            0f 01 fb
  63 * ECX[0]                  value of RFLAGS.IF seen by instruction
  64 * ECX[1]          unused/#GP if set  |            enable timer if set
  65 * ECX[31:2]                     unused/#GP if set
  66 * EAX                           unused (reserve for hint)
  67 * EBX[31:0]       unused             |            max wait time (P0 clocks)
  68 *
  69 *                 MONITOR                         MONITORX
  70 * opcode          0f 01 c8           |            0f 01 fa
  71 * EAX                     (logical) address to monitor
  72 * ECX                     #GP if not zero
  73 */
  74static inline void __mwaitx(unsigned long eax, unsigned long ebx,
  75                            unsigned long ecx)
  76{
  77        /* "mwaitx %eax, %ebx, %ecx;" */
  78        asm volatile(".byte 0x0f, 0x01, 0xfb;"
  79                     :: "a" (eax), "b" (ebx), "c" (ecx));
  80}
  81
  82static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
  83{
  84        trace_hardirqs_on();
  85        /* "mwait %eax, %ecx;" */
  86        asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
  87                     :: "a" (eax), "c" (ecx));
  88}
  89
  90/*
  91 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
  92 * which can obviate IPI to trigger checking of need_resched.
  93 * We execute MONITOR against need_resched and enter optimized wait state
  94 * through MWAIT. Whenever someone changes need_resched, we would be woken
  95 * up from MWAIT (without an IPI).
  96 *
  97 * New with Core Duo processors, MWAIT can take some hints based on CPU
  98 * capability.
  99 */
 100static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
 101{
 102        if (static_cpu_has_bug(X86_BUG_MONITOR) || !current_set_polling_and_test()) {
 103                if (static_cpu_has_bug(X86_BUG_CLFLUSH_MONITOR)) {
 104                        mb();
 105                        clflush((void *)&current_thread_info()->flags);
 106                        mb();
 107                }
 108
 109                __monitor((void *)&current_thread_info()->flags, 0, 0);
 110                if (!need_resched())
 111                        __mwait(eax, ecx);
 112        }
 113        current_clr_polling();
 114}
 115
 116#endif /* _ASM_X86_MWAIT_H */
 117