linux/arch/powerpc/include/asm/barrier.h
<<
>>
Prefs
   1/*
   2 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
   3 */
   4#ifndef _ASM_POWERPC_BARRIER_H
   5#define _ASM_POWERPC_BARRIER_H
   6
   7/*
   8 * Memory barrier.
   9 * The sync instruction guarantees that all memory accesses initiated
  10 * by this processor have been performed (with respect to all other
  11 * mechanisms that access memory).  The eieio instruction is a barrier
  12 * providing an ordering (separately) for (a) cacheable stores and (b)
  13 * loads and stores to non-cacheable memory (e.g. I/O devices).
  14 *
  15 * mb() prevents loads and stores being reordered across this point.
  16 * rmb() prevents loads being reordered across this point.
  17 * wmb() prevents stores being reordered across this point.
  18 * read_barrier_depends() prevents data-dependent loads being reordered
  19 *      across this point (nop on PPC).
  20 *
  21 * *mb() variants without smp_ prefix must order all types of memory
  22 * operations with one another. sync is the only instruction sufficient
  23 * to do this.
  24 *
  25 * For the smp_ barriers, ordering is for cacheable memory operations
  26 * only. We have to use the sync instruction for smp_mb(), since lwsync
  27 * doesn't order loads with respect to previous stores.  Lwsync can be
  28 * used for smp_rmb() and smp_wmb().
  29 *
  30 * However, on CPUs that don't support lwsync, lwsync actually maps to a
  31 * heavy-weight sync, so smp_wmb() can be a lighter-weight eieio.
  32 */
  33#define mb()   __asm__ __volatile__ ("sync" : : : "memory")
  34#define rmb()  __asm__ __volatile__ ("sync" : : : "memory")
  35#define wmb()  __asm__ __volatile__ ("sync" : : : "memory")
  36
  37#define smp_store_mb(var, value)        do { WRITE_ONCE(var, value); mb(); } while (0)
  38
  39#ifdef __SUBARCH_HAS_LWSYNC
  40#    define SMPWMB      LWSYNC
  41#else
  42#    define SMPWMB      eieio
  43#endif
  44
  45#define __lwsync()      __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
  46#define dma_rmb()       __lwsync()
  47#define dma_wmb()       __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
  48
  49#ifdef CONFIG_SMP
  50#define smp_lwsync()    __lwsync()
  51
  52#define smp_mb()        mb()
  53#define smp_rmb()       __lwsync()
  54#define smp_wmb()       __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
  55#else
  56#define smp_lwsync()    barrier()
  57
  58#define smp_mb()        barrier()
  59#define smp_rmb()       barrier()
  60#define smp_wmb()       barrier()
  61#endif /* CONFIG_SMP */
  62
  63#define read_barrier_depends()          do { } while (0)
  64#define smp_read_barrier_depends()      do { } while (0)
  65
  66/*
  67 * This is a barrier which prevents following instructions from being
  68 * started until the value of the argument x is known.  For example, if
  69 * x is a variable loaded from memory, this prevents following
  70 * instructions from being executed until the load has been performed.
  71 */
  72#define data_barrier(x) \
  73        asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory");
  74
  75#define smp_store_release(p, v)                                         \
  76do {                                                                    \
  77        compiletime_assert_atomic_type(*p);                             \
  78        smp_lwsync();                                                   \
  79        ACCESS_ONCE(*p) = (v);                                          \
  80} while (0)
  81
  82#define smp_load_acquire(p)                                             \
  83({                                                                      \
  84        typeof(*p) ___p1 = ACCESS_ONCE(*p);                             \
  85        compiletime_assert_atomic_type(*p);                             \
  86        smp_lwsync();                                                   \
  87        ___p1;                                                          \
  88})
  89
  90#define smp_mb__before_atomic()     smp_mb()
  91#define smp_mb__after_atomic()      smp_mb()
  92#define smp_mb__before_spinlock()   smp_mb()
  93
  94#endif /* _ASM_POWERPC_BARRIER_H */
  95