linux/include/asm-generic/barrier.h
<<
>>
Prefs
   1/*
   2 * Generic barrier definitions, originally based on MN10300 definitions.
   3 *
   4 * It should be possible to use these on really simple architectures,
   5 * but it serves more as a starting point for new ports.
   6 *
   7 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
   8 * Written by David Howells (dhowells@redhat.com)
   9 *
  10 * This program is free software; you can redistribute it and/or
  11 * modify it under the terms of the GNU General Public Licence
  12 * as published by the Free Software Foundation; either version
  13 * 2 of the Licence, or (at your option) any later version.
  14 */
  15#ifndef __ASM_GENERIC_BARRIER_H
  16#define __ASM_GENERIC_BARRIER_H
  17
  18#ifndef __ASSEMBLY__
  19
  20#include <linux/compiler.h>
  21
  22#ifndef nop
  23#define nop()   asm volatile ("nop")
  24#endif
  25
  26/*
  27 * Force strict CPU ordering. And yes, this is required on UP too when we're
  28 * talking to devices.
  29 *
  30 * Fall back to compiler barriers if nothing better is provided.
  31 */
  32
  33#ifndef mb
  34#define mb()    barrier()
  35#endif
  36
  37#ifndef rmb
  38#define rmb()   mb()
  39#endif
  40
  41#ifndef wmb
  42#define wmb()   mb()
  43#endif
  44
  45#ifndef dma_rmb
  46#define dma_rmb()       rmb()
  47#endif
  48
  49#ifndef dma_wmb
  50#define dma_wmb()       wmb()
  51#endif
  52
  53#ifndef read_barrier_depends
  54#define read_barrier_depends()          do { } while (0)
  55#endif
  56
  57#ifndef __smp_mb
  58#define __smp_mb()      mb()
  59#endif
  60
  61#ifndef __smp_rmb
  62#define __smp_rmb()     rmb()
  63#endif
  64
  65#ifndef __smp_wmb
  66#define __smp_wmb()     wmb()
  67#endif
  68
  69#ifndef __smp_read_barrier_depends
  70#define __smp_read_barrier_depends()    read_barrier_depends()
  71#endif
  72
  73#ifdef CONFIG_SMP
  74
  75#ifndef smp_mb
  76#define smp_mb()        __smp_mb()
  77#endif
  78
  79#ifndef smp_rmb
  80#define smp_rmb()       __smp_rmb()
  81#endif
  82
  83#ifndef smp_wmb
  84#define smp_wmb()       __smp_wmb()
  85#endif
  86
  87#ifndef smp_read_barrier_depends
  88#define smp_read_barrier_depends()      __smp_read_barrier_depends()
  89#endif
  90
  91#else   /* !CONFIG_SMP */
  92
  93#ifndef smp_mb
  94#define smp_mb()        barrier()
  95#endif
  96
  97#ifndef smp_rmb
  98#define smp_rmb()       barrier()
  99#endif
 100
 101#ifndef smp_wmb
 102#define smp_wmb()       barrier()
 103#endif
 104
 105#ifndef smp_read_barrier_depends
 106#define smp_read_barrier_depends()      do { } while (0)
 107#endif
 108
 109#endif  /* CONFIG_SMP */
 110
 111#ifndef __smp_store_mb
 112#define __smp_store_mb(var, value)  do { WRITE_ONCE(var, value); __smp_mb(); } while (0)
 113#endif
 114
 115#ifndef __smp_mb__before_atomic
 116#define __smp_mb__before_atomic()       __smp_mb()
 117#endif
 118
 119#ifndef __smp_mb__after_atomic
 120#define __smp_mb__after_atomic()        __smp_mb()
 121#endif
 122
 123#ifndef __smp_store_release
 124#define __smp_store_release(p, v)                                       \
 125do {                                                                    \
 126        compiletime_assert_atomic_type(*p);                             \
 127        __smp_mb();                                                     \
 128        WRITE_ONCE(*p, v);                                              \
 129} while (0)
 130#endif
 131
 132#ifndef __smp_load_acquire
 133#define __smp_load_acquire(p)                                           \
 134({                                                                      \
 135        typeof(*p) ___p1 = READ_ONCE(*p);                               \
 136        compiletime_assert_atomic_type(*p);                             \
 137        __smp_mb();                                                     \
 138        ___p1;                                                          \
 139})
 140#endif
 141
 142#ifdef CONFIG_SMP
 143
 144#ifndef smp_store_mb
 145#define smp_store_mb(var, value)  __smp_store_mb(var, value)
 146#endif
 147
 148#ifndef smp_mb__before_atomic
 149#define smp_mb__before_atomic() __smp_mb__before_atomic()
 150#endif
 151
 152#ifndef smp_mb__after_atomic
 153#define smp_mb__after_atomic()  __smp_mb__after_atomic()
 154#endif
 155
 156#ifndef smp_store_release
 157#define smp_store_release(p, v) __smp_store_release(p, v)
 158#endif
 159
 160#ifndef smp_load_acquire
 161#define smp_load_acquire(p) __smp_load_acquire(p)
 162#endif
 163
 164#else   /* !CONFIG_SMP */
 165
 166#ifndef smp_store_mb
 167#define smp_store_mb(var, value)  do { WRITE_ONCE(var, value); barrier(); } while (0)
 168#endif
 169
 170#ifndef smp_mb__before_atomic
 171#define smp_mb__before_atomic() barrier()
 172#endif
 173
 174#ifndef smp_mb__after_atomic
 175#define smp_mb__after_atomic()  barrier()
 176#endif
 177
 178#ifndef smp_store_release
 179#define smp_store_release(p, v)                                         \
 180do {                                                                    \
 181        compiletime_assert_atomic_type(*p);                             \
 182        barrier();                                                      \
 183        WRITE_ONCE(*p, v);                                              \
 184} while (0)
 185#endif
 186
 187#ifndef smp_load_acquire
 188#define smp_load_acquire(p)                                             \
 189({                                                                      \
 190        typeof(*p) ___p1 = READ_ONCE(*p);                               \
 191        compiletime_assert_atomic_type(*p);                             \
 192        barrier();                                                      \
 193        ___p1;                                                          \
 194})
 195#endif
 196
 197#endif  /* CONFIG_SMP */
 198
 199/* Barriers for virtual machine guests when talking to an SMP host */
 200#define virt_mb() __smp_mb()
 201#define virt_rmb() __smp_rmb()
 202#define virt_wmb() __smp_wmb()
 203#define virt_read_barrier_depends() __smp_read_barrier_depends()
 204#define virt_store_mb(var, value) __smp_store_mb(var, value)
 205#define virt_mb__before_atomic() __smp_mb__before_atomic()
 206#define virt_mb__after_atomic() __smp_mb__after_atomic()
 207#define virt_store_release(p, v) __smp_store_release(p, v)
 208#define virt_load_acquire(p) __smp_load_acquire(p)
 209
 210/**
 211 * smp_acquire__after_ctrl_dep() - Provide ACQUIRE ordering after a control dependency
 212 *
 213 * A control dependency provides a LOAD->STORE order, the additional RMB
 214 * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order,
 215 * aka. (load)-ACQUIRE.
 216 *
 217 * Architectures that do not do load speculation can have this be barrier().
 218 */
 219#ifndef smp_acquire__after_ctrl_dep
 220#define smp_acquire__after_ctrl_dep()           smp_rmb()
 221#endif
 222
 223/**
 224 * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering
 225 * @ptr: pointer to the variable to wait on
 226 * @cond: boolean expression to wait for
 227 *
 228 * Equivalent to using smp_load_acquire() on the condition variable but employs
 229 * the control dependency of the wait to reduce the barrier on many platforms.
 230 *
 231 * Due to C lacking lambda expressions we load the value of *ptr into a
 232 * pre-named variable @VAL to be used in @cond.
 233 */
 234#ifndef smp_cond_load_acquire
 235#define smp_cond_load_acquire(ptr, cond_expr) ({                \
 236        typeof(ptr) __PTR = (ptr);                              \
 237        typeof(*ptr) VAL;                                       \
 238        for (;;) {                                              \
 239                VAL = READ_ONCE(*__PTR);                        \
 240                if (cond_expr)                                  \
 241                        break;                                  \
 242                cpu_relax();                                    \
 243        }                                                       \
 244        smp_acquire__after_ctrl_dep();                          \
 245        VAL;                                                    \
 246})
 247#endif
 248
 249#endif /* !__ASSEMBLY__ */
 250#endif /* __ASM_GENERIC_BARRIER_H */
 251