linux/include/asm-generic/barrier.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/*
   3 * Generic barrier definitions.
   4 *
   5 * It should be possible to use these on really simple architectures,
   6 * but it serves more as a starting point for new ports.
   7 *
   8 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
   9 * Written by David Howells (dhowells@redhat.com)
  10 */
  11#ifndef __ASM_GENERIC_BARRIER_H
  12#define __ASM_GENERIC_BARRIER_H
  13
  14#ifndef __ASSEMBLY__
  15
  16#include <linux/compiler.h>
  17#include <asm/rwonce.h>
  18
  19#ifndef nop
  20#define nop()   asm volatile ("nop")
  21#endif
  22
  23/*
  24 * Force strict CPU ordering. And yes, this is required on UP too when we're
  25 * talking to devices.
  26 *
  27 * Fall back to compiler barriers if nothing better is provided.
  28 */
  29
  30#ifndef mb
  31#define mb()    barrier()
  32#endif
  33
  34#ifndef rmb
  35#define rmb()   mb()
  36#endif
  37
  38#ifndef wmb
  39#define wmb()   mb()
  40#endif
  41
  42#ifndef dma_rmb
  43#define dma_rmb()       rmb()
  44#endif
  45
  46#ifndef dma_wmb
  47#define dma_wmb()       wmb()
  48#endif
  49
  50#ifndef __smp_mb
  51#define __smp_mb()      mb()
  52#endif
  53
  54#ifndef __smp_rmb
  55#define __smp_rmb()     rmb()
  56#endif
  57
  58#ifndef __smp_wmb
  59#define __smp_wmb()     wmb()
  60#endif
  61
  62#ifdef CONFIG_SMP
  63
  64#ifndef smp_mb
  65#define smp_mb()        __smp_mb()
  66#endif
  67
  68#ifndef smp_rmb
  69#define smp_rmb()       __smp_rmb()
  70#endif
  71
  72#ifndef smp_wmb
  73#define smp_wmb()       __smp_wmb()
  74#endif
  75
  76#else   /* !CONFIG_SMP */
  77
  78#ifndef smp_mb
  79#define smp_mb()        barrier()
  80#endif
  81
  82#ifndef smp_rmb
  83#define smp_rmb()       barrier()
  84#endif
  85
  86#ifndef smp_wmb
  87#define smp_wmb()       barrier()
  88#endif
  89
  90#endif  /* CONFIG_SMP */
  91
  92#ifndef __smp_store_mb
  93#define __smp_store_mb(var, value)  do { WRITE_ONCE(var, value); __smp_mb(); } while (0)
  94#endif
  95
  96#ifndef __smp_mb__before_atomic
  97#define __smp_mb__before_atomic()       __smp_mb()
  98#endif
  99
 100#ifndef __smp_mb__after_atomic
 101#define __smp_mb__after_atomic()        __smp_mb()
 102#endif
 103
 104#ifndef __smp_store_release
 105#define __smp_store_release(p, v)                                       \
 106do {                                                                    \
 107        compiletime_assert_atomic_type(*p);                             \
 108        __smp_mb();                                                     \
 109        WRITE_ONCE(*p, v);                                              \
 110} while (0)
 111#endif
 112
 113#ifndef __smp_load_acquire
 114#define __smp_load_acquire(p)                                           \
 115({                                                                      \
 116        __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p);               \
 117        compiletime_assert_atomic_type(*p);                             \
 118        __smp_mb();                                                     \
 119        (typeof(*p))___p1;                                              \
 120})
 121#endif
 122
 123#ifdef CONFIG_SMP
 124
 125#ifndef smp_store_mb
 126#define smp_store_mb(var, value)  __smp_store_mb(var, value)
 127#endif
 128
 129#ifndef smp_mb__before_atomic
 130#define smp_mb__before_atomic() __smp_mb__before_atomic()
 131#endif
 132
 133#ifndef smp_mb__after_atomic
 134#define smp_mb__after_atomic()  __smp_mb__after_atomic()
 135#endif
 136
 137#ifndef smp_store_release
 138#define smp_store_release(p, v) __smp_store_release(p, v)
 139#endif
 140
 141#ifndef smp_load_acquire
 142#define smp_load_acquire(p) __smp_load_acquire(p)
 143#endif
 144
 145#else   /* !CONFIG_SMP */
 146
 147#ifndef smp_store_mb
 148#define smp_store_mb(var, value)  do { WRITE_ONCE(var, value); barrier(); } while (0)
 149#endif
 150
 151#ifndef smp_mb__before_atomic
 152#define smp_mb__before_atomic() barrier()
 153#endif
 154
 155#ifndef smp_mb__after_atomic
 156#define smp_mb__after_atomic()  barrier()
 157#endif
 158
 159#ifndef smp_store_release
 160#define smp_store_release(p, v)                                         \
 161do {                                                                    \
 162        compiletime_assert_atomic_type(*p);                             \
 163        barrier();                                                      \
 164        WRITE_ONCE(*p, v);                                              \
 165} while (0)
 166#endif
 167
 168#ifndef smp_load_acquire
 169#define smp_load_acquire(p)                                             \
 170({                                                                      \
 171        __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p);               \
 172        compiletime_assert_atomic_type(*p);                             \
 173        barrier();                                                      \
 174        (typeof(*p))___p1;                                              \
 175})
 176#endif
 177
 178#endif  /* CONFIG_SMP */
 179
 180/* Barriers for virtual machine guests when talking to an SMP host */
 181#define virt_mb() __smp_mb()
 182#define virt_rmb() __smp_rmb()
 183#define virt_wmb() __smp_wmb()
 184#define virt_store_mb(var, value) __smp_store_mb(var, value)
 185#define virt_mb__before_atomic() __smp_mb__before_atomic()
 186#define virt_mb__after_atomic() __smp_mb__after_atomic()
 187#define virt_store_release(p, v) __smp_store_release(p, v)
 188#define virt_load_acquire(p) __smp_load_acquire(p)
 189
 190/**
 191 * smp_acquire__after_ctrl_dep() - Provide ACQUIRE ordering after a control dependency
 192 *
 193 * A control dependency provides a LOAD->STORE order, the additional RMB
 194 * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order,
 195 * aka. (load)-ACQUIRE.
 196 *
 197 * Architectures that do not do load speculation can have this be barrier().
 198 */
 199#ifndef smp_acquire__after_ctrl_dep
 200#define smp_acquire__after_ctrl_dep()           smp_rmb()
 201#endif
 202
 203/**
 204 * smp_cond_load_relaxed() - (Spin) wait for cond with no ordering guarantees
 205 * @ptr: pointer to the variable to wait on
 206 * @cond: boolean expression to wait for
 207 *
 208 * Equivalent to using READ_ONCE() on the condition variable.
 209 *
 210 * Due to C lacking lambda expressions we load the value of *ptr into a
 211 * pre-named variable @VAL to be used in @cond.
 212 */
 213#ifndef smp_cond_load_relaxed
 214#define smp_cond_load_relaxed(ptr, cond_expr) ({                \
 215        typeof(ptr) __PTR = (ptr);                              \
 216        __unqual_scalar_typeof(*ptr) VAL;                       \
 217        for (;;) {                                              \
 218                VAL = READ_ONCE(*__PTR);                        \
 219                if (cond_expr)                                  \
 220                        break;                                  \
 221                cpu_relax();                                    \
 222        }                                                       \
 223        (typeof(*ptr))VAL;                                      \
 224})
 225#endif
 226
 227/**
 228 * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering
 229 * @ptr: pointer to the variable to wait on
 230 * @cond: boolean expression to wait for
 231 *
 232 * Equivalent to using smp_load_acquire() on the condition variable but employs
 233 * the control dependency of the wait to reduce the barrier on many platforms.
 234 */
 235#ifndef smp_cond_load_acquire
 236#define smp_cond_load_acquire(ptr, cond_expr) ({                \
 237        __unqual_scalar_typeof(*ptr) _val;                      \
 238        _val = smp_cond_load_relaxed(ptr, cond_expr);           \
 239        smp_acquire__after_ctrl_dep();                          \
 240        (typeof(*ptr))_val;                                     \
 241})
 242#endif
 243
 244/*
 245 * pmem_wmb() ensures that all stores for which the modification
 246 * are written to persistent storage by preceding instructions have
 247 * updated persistent storage before any data  access or data transfer
 248 * caused by subsequent instructions is initiated.
 249 */
 250#ifndef pmem_wmb
 251#define pmem_wmb()      wmb()
 252#endif
 253
 254#endif /* !__ASSEMBLY__ */
 255#endif /* __ASM_GENERIC_BARRIER_H */
 256