linux/arch/metag/include/asm/barrier.h
<<
>>
Prefs
   1#ifndef _ASM_METAG_BARRIER_H
   2#define _ASM_METAG_BARRIER_H
   3
   4#include <asm/metag_mem.h>
   5
   6#define nop()           asm volatile ("NOP")
   7
   8#ifdef CONFIG_METAG_META21
   9
  10/* HTP and above have a system event to fence writes */
  11static inline void wr_fence(void)
  12{
  13        volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_FENCE;
  14        barrier();
  15        *flushptr = 0;
  16        barrier();
  17}
  18
  19#else /* CONFIG_METAG_META21 */
  20
  21/*
  22 * ATP doesn't have system event to fence writes, so it is necessary to flush
  23 * the processor write queues as well as possibly the write combiner (depending
  24 * on the page being written).
  25 * To ensure the write queues are flushed we do 4 writes to a system event
  26 * register (in this case write combiner flush) which will also flush the write
  27 * combiner.
  28 */
  29static inline void wr_fence(void)
  30{
  31        volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_COMBINE_FLUSH;
  32        barrier();
  33        *flushptr = 0;
  34        *flushptr = 0;
  35        *flushptr = 0;
  36        *flushptr = 0;
  37        barrier();
  38}
  39
  40#endif /* !CONFIG_METAG_META21 */
  41
  42/* flush writes through the write combiner */
  43#define mb()            wr_fence()
  44#define rmb()           barrier()
  45#define wmb()           mb()
  46
  47#define dma_rmb()       rmb()
  48#define dma_wmb()       wmb()
  49
  50#ifndef CONFIG_SMP
  51#define fence()         do { } while (0)
  52#define smp_mb()        barrier()
  53#define smp_rmb()       barrier()
  54#define smp_wmb()       barrier()
  55#else
  56
  57#ifdef CONFIG_METAG_SMP_WRITE_REORDERING
  58/*
  59 * Write to the atomic memory unlock system event register (command 0). This is
  60 * needed before a write to shared memory in a critical section, to prevent
  61 * external reordering of writes before the fence on other threads with writes
  62 * after the fence on this thread (and to prevent the ensuing cache-memory
  63 * incoherence). It is therefore ineffective if used after and on the same
  64 * thread as a write.
  65 */
  66static inline void fence(void)
  67{
  68        volatile int *flushptr = (volatile int *) LINSYSEVENT_WR_ATOMIC_UNLOCK;
  69        barrier();
  70        *flushptr = 0;
  71        barrier();
  72}
  73#define smp_mb()        fence()
  74#define smp_rmb()       fence()
  75#define smp_wmb()       barrier()
  76#else
  77#define fence()         do { } while (0)
  78#define smp_mb()        barrier()
  79#define smp_rmb()       barrier()
  80#define smp_wmb()       barrier()
  81#endif
  82#endif
  83
  84#define read_barrier_depends()          do { } while (0)
  85#define smp_read_barrier_depends()      do { } while (0)
  86
  87#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
  88
  89#define smp_store_release(p, v)                                         \
  90do {                                                                    \
  91        compiletime_assert_atomic_type(*p);                             \
  92        smp_mb();                                                       \
  93        ACCESS_ONCE(*p) = (v);                                          \
  94} while (0)
  95
  96#define smp_load_acquire(p)                                             \
  97({                                                                      \
  98        typeof(*p) ___p1 = ACCESS_ONCE(*p);                             \
  99        compiletime_assert_atomic_type(*p);                             \
 100        smp_mb();                                                       \
 101        ___p1;                                                          \
 102})
 103
 104#define smp_mb__before_atomic() barrier()
 105#define smp_mb__after_atomic()  barrier()
 106
 107#endif /* _ASM_METAG_BARRIER_H */
 108