1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> 4 */ 5#ifndef _ASM_POWERPC_BARRIER_H 6#define _ASM_POWERPC_BARRIER_H 7 8#include <asm/asm-const.h> 9 10/* 11 * Memory barrier. 12 * The sync instruction guarantees that all memory accesses initiated 13 * by this processor have been performed (with respect to all other 14 * mechanisms that access memory). The eieio instruction is a barrier 15 * providing an ordering (separately) for (a) cacheable stores and (b) 16 * loads and stores to non-cacheable memory (e.g. I/O devices). 17 * 18 * mb() prevents loads and stores being reordered across this point. 19 * rmb() prevents loads being reordered across this point. 20 * wmb() prevents stores being reordered across this point. 21 * read_barrier_depends() prevents data-dependent loads being reordered 22 * across this point (nop on PPC). 23 * 24 * *mb() variants without smp_ prefix must order all types of memory 25 * operations with one another. sync is the only instruction sufficient 26 * to do this. 27 * 28 * For the smp_ barriers, ordering is for cacheable memory operations 29 * only. We have to use the sync instruction for smp_mb(), since lwsync 30 * doesn't order loads with respect to previous stores. Lwsync can be 31 * used for smp_rmb() and smp_wmb(). 32 * 33 * However, on CPUs that don't support lwsync, lwsync actually maps to a 34 * heavy-weight sync, so smp_wmb() can be a lighter-weight eieio. 35 */ 36#define mb() __asm__ __volatile__ ("sync" : : : "memory") 37#define rmb() __asm__ __volatile__ ("sync" : : : "memory") 38#define wmb() __asm__ __volatile__ ("sync" : : : "memory") 39 40/* The sub-arch has lwsync */ 41#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC) 42# define SMPWMB LWSYNC 43#else 44# define SMPWMB eieio 45#endif 46 47#define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory") 48#define dma_rmb() __lwsync() 49#define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") 50 51#define __smp_lwsync() __lwsync() 52 53#define __smp_mb() mb() 54#define __smp_rmb() __lwsync() 55#define __smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") 56 57/* 58 * This is a barrier which prevents following instructions from being 59 * started until the value of the argument x is known. For example, if 60 * x is a variable loaded from memory, this prevents following 61 * instructions from being executed until the load has been performed. 62 */ 63#define data_barrier(x) \ 64 asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory"); 65 66#define __smp_store_release(p, v) \ 67do { \ 68 compiletime_assert_atomic_type(*p); \ 69 __smp_lwsync(); \ 70 WRITE_ONCE(*p, v); \ 71} while (0) 72 73#define __smp_load_acquire(p) \ 74({ \ 75 typeof(*p) ___p1 = READ_ONCE(*p); \ 76 compiletime_assert_atomic_type(*p); \ 77 __smp_lwsync(); \ 78 ___p1; \ 79}) 80 81#ifdef CONFIG_PPC_BOOK3S_64 82#define NOSPEC_BARRIER_SLOT nop 83#elif defined(CONFIG_PPC_FSL_BOOK3E) 84#define NOSPEC_BARRIER_SLOT nop; nop 85#endif 86 87#ifdef CONFIG_PPC_BARRIER_NOSPEC 88/* 89 * Prevent execution of subsequent instructions until preceding branches have 90 * been fully resolved and are no longer executing speculatively. 91 */ 92#define barrier_nospec_asm NOSPEC_BARRIER_FIXUP_SECTION; NOSPEC_BARRIER_SLOT 93 94// This also acts as a compiler barrier due to the memory clobber. 95#define barrier_nospec() asm (stringify_in_c(barrier_nospec_asm) ::: "memory") 96 97#else /* !CONFIG_PPC_BARRIER_NOSPEC */ 98#define barrier_nospec_asm 99#define barrier_nospec() 100#endif /* CONFIG_PPC_BARRIER_NOSPEC */ 101 102#include <asm-generic/barrier.h> 103 104#endif /* _ASM_POWERPC_BARRIER_H */ 105