linux/arch/sparc/include/asm/barrier_64.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef __SPARC64_BARRIER_H
   3#define __SPARC64_BARRIER_H
   4
   5/* These are here in an effort to more fully work around Spitfire Errata
   6 * #51.  Essentially, if a memory barrier occurs soon after a mispredicted
   7 * branch, the chip can stop executing instructions until a trap occurs.
   8 * Therefore, if interrupts are disabled, the chip can hang forever.
   9 *
  10 * It used to be believed that the memory barrier had to be right in the
  11 * delay slot, but a case has been traced recently wherein the memory barrier
  12 * was one instruction after the branch delay slot and the chip still hung.
  13 * The offending sequence was the following in sym_wakeup_done() of the
  14 * sym53c8xx_2 driver:
  15 *
  16 *      call    sym_ccb_from_dsa, 0
  17 *       movge  %icc, 0, %l0
  18 *      brz,pn  %o0, .LL1303
  19 *       mov    %o0, %l2
  20 *      membar  #LoadLoad
  21 *
  22 * The branch has to be mispredicted for the bug to occur.  Therefore, we put
  23 * the memory barrier explicitly into a "branch always, predicted taken"
  24 * delay slot to avoid the problem case.
  25 */
  26#define membar_safe(type) \
  27do {    __asm__ __volatile__("ba,pt     %%xcc, 1f\n\t" \
  28                             " membar   " type "\n" \
  29                             "1:\n" \
  30                             : : : "memory"); \
  31} while (0)
  32
  33/* The kernel always executes in TSO memory model these days,
  34 * and furthermore most sparc64 chips implement more stringent
  35 * memory ordering than required by the specifications.
  36 */
  37#define mb()    membar_safe("#StoreLoad")
  38#define rmb()   __asm__ __volatile__("":::"memory")
  39#define wmb()   __asm__ __volatile__("":::"memory")
  40
  41#define __smp_store_release(p, v)                                               \
  42do {                                                                    \
  43        compiletime_assert_atomic_type(*p);                             \
  44        barrier();                                                      \
  45        WRITE_ONCE(*p, v);                                              \
  46} while (0)
  47
  48#define __smp_load_acquire(p)                                           \
  49({                                                                      \
  50        typeof(*p) ___p1 = READ_ONCE(*p);                               \
  51        compiletime_assert_atomic_type(*p);                             \
  52        barrier();                                                      \
  53        ___p1;                                                          \
  54})
  55
  56#define __smp_mb__before_atomic()       barrier()
  57#define __smp_mb__after_atomic()        barrier()
  58
  59#include <asm-generic/barrier.h>
  60
  61#endif /* !(__SPARC64_BARRIER_H) */
  62