linux/arch/x86/include/asm/barrier.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_X86_BARRIER_H
   3#define _ASM_X86_BARRIER_H
   4
   5#include <asm/alternative.h>
   6#include <asm/nops.h>
   7
   8/*
   9 * Force strict CPU ordering.
  10 * And yes, this might be required on UP too when we're talking
  11 * to devices.
  12 */
  13
  14#ifdef CONFIG_X86_32
  15#define mb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "mfence", \
  16                                      X86_FEATURE_XMM2) ::: "memory", "cc")
  17#define rmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "lfence", \
  18                                       X86_FEATURE_XMM2) ::: "memory", "cc")
  19#define wmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "sfence", \
  20                                       X86_FEATURE_XMM2) ::: "memory", "cc")
  21#else
  22#define mb()    asm volatile("mfence":::"memory")
  23#define rmb()   asm volatile("lfence":::"memory")
  24#define wmb()   asm volatile("sfence" ::: "memory")
  25#endif
  26
  27/**
  28 * array_index_mask_nospec() - generate a mask that is ~0UL when the
  29 *      bounds check succeeds and 0 otherwise
  30 * @index: array element index
  31 * @size: number of elements in array
  32 *
  33 * Returns:
  34 *     0 - (index < size)
  35 */
  36static inline unsigned long array_index_mask_nospec(unsigned long index,
  37                unsigned long size)
  38{
  39        unsigned long mask;
  40
  41        asm volatile ("cmp %1,%2; sbb %0,%0;"
  42                        :"=r" (mask)
  43                        :"g"(size),"r" (index)
  44                        :"cc");
  45        return mask;
  46}
  47
  48/* Override the default implementation from linux/nospec.h. */
  49#define array_index_mask_nospec array_index_mask_nospec
  50
  51/* Prevent speculative execution past this barrier. */
  52#define barrier_nospec() alternative("", "lfence", X86_FEATURE_LFENCE_RDTSC)
  53
  54#define dma_rmb()       barrier()
  55#define dma_wmb()       barrier()
  56
  57#define __smp_mb()      asm volatile("lock; addl $0,-4(%%" _ASM_SP ")" ::: "memory", "cc")
  58
  59#define __smp_rmb()     dma_rmb()
  60#define __smp_wmb()     barrier()
  61#define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
  62
  63#define __smp_store_release(p, v)                                       \
  64do {                                                                    \
  65        compiletime_assert_atomic_type(*p);                             \
  66        barrier();                                                      \
  67        WRITE_ONCE(*p, v);                                              \
  68} while (0)
  69
  70#define __smp_load_acquire(p)                                           \
  71({                                                                      \
  72        typeof(*p) ___p1 = READ_ONCE(*p);                               \
  73        compiletime_assert_atomic_type(*p);                             \
  74        barrier();                                                      \
  75        ___p1;                                                          \
  76})
  77
  78/* Atomic operations are already serializing on x86 */
  79#define __smp_mb__before_atomic()       do { } while (0)
  80#define __smp_mb__after_atomic()        do { } while (0)
  81
  82#include <asm-generic/barrier.h>
  83
  84/*
  85 * Make previous memory operations globally visible before
  86 * a WRMSR.
  87 *
  88 * MFENCE makes writes visible, but only affects load/store
  89 * instructions.  WRMSR is unfortunately not a load/store
  90 * instruction and is unaffected by MFENCE.  The LFENCE ensures
  91 * that the WRMSR is not reordered.
  92 *
  93 * Most WRMSRs are full serializing instructions themselves and
  94 * do not require this barrier.  This is only required for the
  95 * IA32_TSC_DEADLINE and X2APIC MSRs.
  96 */
  97static inline void weak_wrmsr_fence(void)
  98{
  99        asm volatile("mfence; lfence" : : : "memory");
 100}
 101
 102#endif /* _ASM_X86_BARRIER_H */
 103