linux/tools/arch/ia64/include/asm/barrier.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Copied from the kernel sources to tools/:
   4 *
   5 * Memory barrier definitions.  This is based on information published
   6 * in the Processor Abstraction Layer and the System Abstraction Layer
   7 * manual.
   8 *
   9 * Copyright (C) 1998-2003 Hewlett-Packard Co
  10 *      David Mosberger-Tang <davidm@hpl.hp.com>
  11 * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
  12 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
  13 */
  14#ifndef _TOOLS_LINUX_ASM_IA64_BARRIER_H
  15#define _TOOLS_LINUX_ASM_IA64_BARRIER_H
  16
  17#include <linux/compiler.h>
  18
  19/*
  20 * Macros to force memory ordering.  In these descriptions, "previous"
  21 * and "subsequent" refer to program order; "visible" means that all
  22 * architecturally visible effects of a memory access have occurred
  23 * (at a minimum, this means the memory has been read or written).
  24 *
  25 *   wmb():     Guarantees that all preceding stores to memory-
  26 *              like regions are visible before any subsequent
  27 *              stores and that all following stores will be
  28 *              visible only after all previous stores.
  29 *   rmb():     Like wmb(), but for reads.
  30 *   mb():      wmb()/rmb() combo, i.e., all previous memory
  31 *              accesses are visible before all subsequent
  32 *              accesses and vice versa.  This is also known as
  33 *              a "fence."
  34 *
  35 * Note: "mb()" and its variants cannot be used as a fence to order
  36 * accesses to memory mapped I/O registers.  For that, mf.a needs to
  37 * be used.  However, we don't want to always use mf.a because (a)
  38 * it's (presumably) much slower than mf and (b) mf.a is supported for
  39 * sequential memory pages only.
  40 */
  41
  42/* XXX From arch/ia64/include/uapi/asm/gcc_intrin.h */
  43#define ia64_mf()       asm volatile ("mf" ::: "memory")
  44
  45#define mb()            ia64_mf()
  46#define rmb()           mb()
  47#define wmb()           mb()
  48
  49#define smp_store_release(p, v)                 \
  50do {                                            \
  51        barrier();                              \
  52        WRITE_ONCE(*p, v);                      \
  53} while (0)
  54
  55#define smp_load_acquire(p)                     \
  56({                                              \
  57        typeof(*p) ___p1 = READ_ONCE(*p);       \
  58        barrier();                              \
  59        ___p1;                                  \
  60})
  61
  62#endif /* _TOOLS_LINUX_ASM_IA64_BARRIER_H */
  63