linux/arch/powerpc/include/asm/simple_spinlock.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2#ifndef _ASM_POWERPC_SIMPLE_SPINLOCK_H
   3#define _ASM_POWERPC_SIMPLE_SPINLOCK_H
   4
   5/*
   6 * Simple spin lock operations.
   7 *
   8 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
   9 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
  10 * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
  11 *      Rework to support virtual processors
  12 *
  13 * Type of int is used as a full 64b word is not necessary.
  14 *
  15 * (the type definitions are in asm/simple_spinlock_types.h)
  16 */
  17#include <linux/irqflags.h>
  18#include <asm/paravirt.h>
  19#include <asm/paca.h>
  20#include <asm/synch.h>
  21#include <asm/ppc-opcode.h>
  22
  23#ifdef CONFIG_PPC64
  24/* use 0x800000yy when locked, where yy == CPU number */
  25#ifdef __BIG_ENDIAN__
  26#define LOCK_TOKEN      (*(u32 *)(&get_paca()->lock_token))
  27#else
  28#define LOCK_TOKEN      (*(u32 *)(&get_paca()->paca_index))
  29#endif
  30#else
  31#define LOCK_TOKEN      1
  32#endif
  33
  34static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
  35{
  36        return lock.slock == 0;
  37}
  38
  39static inline int arch_spin_is_locked(arch_spinlock_t *lock)
  40{
  41        smp_mb();
  42        return !arch_spin_value_unlocked(*lock);
  43}
  44
  45/*
  46 * This returns the old value in the lock, so we succeeded
  47 * in getting the lock if the return value is 0.
  48 */
  49static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
  50{
  51        unsigned long tmp, token;
  52
  53        token = LOCK_TOKEN;
  54        __asm__ __volatile__(
  55"1:     " PPC_LWARX(%0,0,%2,1) "\n\
  56        cmpwi           0,%0,0\n\
  57        bne-            2f\n\
  58        stwcx.          %1,0,%2\n\
  59        bne-            1b\n"
  60        PPC_ACQUIRE_BARRIER
  61"2:"
  62        : "=&r" (tmp)
  63        : "r" (token), "r" (&lock->slock)
  64        : "cr0", "memory");
  65
  66        return tmp;
  67}
  68
  69static inline int arch_spin_trylock(arch_spinlock_t *lock)
  70{
  71        return __arch_spin_trylock(lock) == 0;
  72}
  73
  74/*
  75 * On a system with shared processors (that is, where a physical
  76 * processor is multiplexed between several virtual processors),
  77 * there is no point spinning on a lock if the holder of the lock
  78 * isn't currently scheduled on a physical processor.  Instead
  79 * we detect this situation and ask the hypervisor to give the
  80 * rest of our timeslice to the lock holder.
  81 *
  82 * So that we can tell which virtual processor is holding a lock,
  83 * we put 0x80000000 | smp_processor_id() in the lock when it is
  84 * held.  Conveniently, we have a word in the paca that holds this
  85 * value.
  86 */
  87
  88#if defined(CONFIG_PPC_SPLPAR)
  89/* We only yield to the hypervisor if we are in shared processor mode */
  90void splpar_spin_yield(arch_spinlock_t *lock);
  91void splpar_rw_yield(arch_rwlock_t *lock);
  92#else /* SPLPAR */
  93static inline void splpar_spin_yield(arch_spinlock_t *lock) {};
  94static inline void splpar_rw_yield(arch_rwlock_t *lock) {};
  95#endif
  96
  97static inline void spin_yield(arch_spinlock_t *lock)
  98{
  99        if (is_shared_processor())
 100                splpar_spin_yield(lock);
 101        else
 102                barrier();
 103}
 104
 105static inline void rw_yield(arch_rwlock_t *lock)
 106{
 107        if (is_shared_processor())
 108                splpar_rw_yield(lock);
 109        else
 110                barrier();
 111}
 112
 113static inline void arch_spin_lock(arch_spinlock_t *lock)
 114{
 115        while (1) {
 116                if (likely(__arch_spin_trylock(lock) == 0))
 117                        break;
 118                do {
 119                        HMT_low();
 120                        if (is_shared_processor())
 121                                splpar_spin_yield(lock);
 122                } while (unlikely(lock->slock != 0));
 123                HMT_medium();
 124        }
 125}
 126
 127static inline
 128void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
 129{
 130        unsigned long flags_dis;
 131
 132        while (1) {
 133                if (likely(__arch_spin_trylock(lock) == 0))
 134                        break;
 135                local_save_flags(flags_dis);
 136                local_irq_restore(flags);
 137                do {
 138                        HMT_low();
 139                        if (is_shared_processor())
 140                                splpar_spin_yield(lock);
 141                } while (unlikely(lock->slock != 0));
 142                HMT_medium();
 143                local_irq_restore(flags_dis);
 144        }
 145}
 146#define arch_spin_lock_flags arch_spin_lock_flags
 147
 148static inline void arch_spin_unlock(arch_spinlock_t *lock)
 149{
 150        __asm__ __volatile__("# arch_spin_unlock\n\t"
 151                                PPC_RELEASE_BARRIER: : :"memory");
 152        lock->slock = 0;
 153}
 154
 155/*
 156 * Read-write spinlocks, allowing multiple readers
 157 * but only one writer.
 158 *
 159 * NOTE! it is quite common to have readers in interrupts
 160 * but no interrupt writers. For those circumstances we
 161 * can "mix" irq-safe locks - any writer needs to get a
 162 * irq-safe write-lock, but readers can get non-irqsafe
 163 * read-locks.
 164 */
 165
 166#ifdef CONFIG_PPC64
 167#define __DO_SIGN_EXTEND        "extsw  %0,%0\n"
 168#define WRLOCK_TOKEN            LOCK_TOKEN      /* it's negative */
 169#else
 170#define __DO_SIGN_EXTEND
 171#define WRLOCK_TOKEN            (-1)
 172#endif
 173
 174/*
 175 * This returns the old value in the lock + 1,
 176 * so we got a read lock if the return value is > 0.
 177 */
 178static inline long __arch_read_trylock(arch_rwlock_t *rw)
 179{
 180        long tmp;
 181
 182        __asm__ __volatile__(
 183"1:     " PPC_LWARX(%0,0,%1,1) "\n"
 184        __DO_SIGN_EXTEND
 185"       addic.          %0,%0,1\n\
 186        ble-            2f\n"
 187"       stwcx.          %0,0,%1\n\
 188        bne-            1b\n"
 189        PPC_ACQUIRE_BARRIER
 190"2:"    : "=&r" (tmp)
 191        : "r" (&rw->lock)
 192        : "cr0", "xer", "memory");
 193
 194        return tmp;
 195}
 196
 197/*
 198 * This returns the old value in the lock,
 199 * so we got the write lock if the return value is 0.
 200 */
 201static inline long __arch_write_trylock(arch_rwlock_t *rw)
 202{
 203        long tmp, token;
 204
 205        token = WRLOCK_TOKEN;
 206        __asm__ __volatile__(
 207"1:     " PPC_LWARX(%0,0,%2,1) "\n\
 208        cmpwi           0,%0,0\n\
 209        bne-            2f\n"
 210"       stwcx.          %1,0,%2\n\
 211        bne-            1b\n"
 212        PPC_ACQUIRE_BARRIER
 213"2:"    : "=&r" (tmp)
 214        : "r" (token), "r" (&rw->lock)
 215        : "cr0", "memory");
 216
 217        return tmp;
 218}
 219
 220static inline void arch_read_lock(arch_rwlock_t *rw)
 221{
 222        while (1) {
 223                if (likely(__arch_read_trylock(rw) > 0))
 224                        break;
 225                do {
 226                        HMT_low();
 227                        if (is_shared_processor())
 228                                splpar_rw_yield(rw);
 229                } while (unlikely(rw->lock < 0));
 230                HMT_medium();
 231        }
 232}
 233
 234static inline void arch_write_lock(arch_rwlock_t *rw)
 235{
 236        while (1) {
 237                if (likely(__arch_write_trylock(rw) == 0))
 238                        break;
 239                do {
 240                        HMT_low();
 241                        if (is_shared_processor())
 242                                splpar_rw_yield(rw);
 243                } while (unlikely(rw->lock != 0));
 244                HMT_medium();
 245        }
 246}
 247
 248static inline int arch_read_trylock(arch_rwlock_t *rw)
 249{
 250        return __arch_read_trylock(rw) > 0;
 251}
 252
 253static inline int arch_write_trylock(arch_rwlock_t *rw)
 254{
 255        return __arch_write_trylock(rw) == 0;
 256}
 257
 258static inline void arch_read_unlock(arch_rwlock_t *rw)
 259{
 260        long tmp;
 261
 262        __asm__ __volatile__(
 263        "# read_unlock\n\t"
 264        PPC_RELEASE_BARRIER
 265"1:     lwarx           %0,0,%1\n\
 266        addic           %0,%0,-1\n"
 267"       stwcx.          %0,0,%1\n\
 268        bne-            1b"
 269        : "=&r"(tmp)
 270        : "r"(&rw->lock)
 271        : "cr0", "xer", "memory");
 272}
 273
 274static inline void arch_write_unlock(arch_rwlock_t *rw)
 275{
 276        __asm__ __volatile__("# write_unlock\n\t"
 277                                PPC_RELEASE_BARRIER: : :"memory");
 278        rw->lock = 0;
 279}
 280
 281#define arch_spin_relax(lock)   spin_yield(lock)
 282#define arch_read_relax(lock)   rw_yield(lock)
 283#define arch_write_relax(lock)  rw_yield(lock)
 284
 285/* See include/linux/spinlock.h */
 286#define smp_mb__after_spinlock()   smp_mb()
 287
 288#endif /* _ASM_POWERPC_SIMPLE_SPINLOCK_H */
 289