uboot/arch/riscv/include/asm/barrier.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * Copyright (C) 2012 ARM Ltd.
   4 * Copyright (C) 2013 Regents of the University of California
   5 * Copyright (C) 2017 SiFive
   6 *
   7 * Taken from Linux arch/riscv/include/asm/barrier.h, which is based on
   8 * arch/arm/include/asm/barrier.h
   9 */
  10
  11#ifndef _ASM_RISCV_BARRIER_H
  12#define _ASM_RISCV_BARRIER_H
  13
  14#ifndef __ASSEMBLY__
  15
  16#define nop()           __asm__ __volatile__ ("nop")
  17
  18#define RISCV_FENCE(p, s) \
  19        __asm__ __volatile__ ("fence " #p "," #s : : : "memory")
  20
  21/* These barriers need to enforce ordering on both devices or memory. */
  22#define mb()            RISCV_FENCE(iorw,iorw)
  23#define rmb()           RISCV_FENCE(ir,ir)
  24#define wmb()           RISCV_FENCE(ow,ow)
  25
  26/* These barriers do not need to enforce ordering on devices, just memory. */
  27#define __smp_mb()      RISCV_FENCE(rw,rw)
  28#define __smp_rmb()     RISCV_FENCE(r,r)
  29#define __smp_wmb()     RISCV_FENCE(w,w)
  30
  31#define __smp_store_release(p, v)                                       \
  32do {                                                                    \
  33        compiletime_assert_atomic_type(*p);                             \
  34        RISCV_FENCE(rw,w);                                              \
  35        WRITE_ONCE(*p, v);                                              \
  36} while (0)
  37
  38#define __smp_load_acquire(p)                                           \
  39({                                                                      \
  40        typeof(*p) ___p1 = READ_ONCE(*p);                               \
  41        compiletime_assert_atomic_type(*p);                             \
  42        RISCV_FENCE(r,rw);                                              \
  43        ___p1;                                                          \
  44})
  45
  46/*
  47 * This is a very specific barrier: it's currently only used in two places in
  48 * the kernel, both in the scheduler.  See include/linux/spinlock.h for the two
  49 * orderings it guarantees, but the "critical section is RCsc" guarantee
  50 * mandates a barrier on RISC-V.  The sequence looks like:
  51 *
  52 *    lr.aq lock
  53 *    sc    lock <= LOCKED
  54 *    smp_mb__after_spinlock()
  55 *    // critical section
  56 *    lr    lock
  57 *    sc.rl lock <= UNLOCKED
  58 *
  59 * The AQ/RL pair provides a RCpc critical section, but there's not really any
  60 * way we can take advantage of that here because the ordering is only enforced
  61 * on that one lock.  Thus, we're just doing a full fence.
  62 */
  63#define smp_mb__after_spinlock()        RISCV_FENCE(rw,rw)
  64
  65#endif /* __ASSEMBLY__ */
  66
  67#endif /* _ASM_RISCV_BARRIER_H */
  68