linux/arch/s390/include/asm/spinlock.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 *  S390 version
   4 *    Copyright IBM Corp. 1999
   5 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
   6 *
   7 *  Derived from "include/asm-i386/spinlock.h"
   8 */
   9
  10#ifndef __ASM_SPINLOCK_H
  11#define __ASM_SPINLOCK_H
  12
  13#include <linux/smp.h>
  14#include <asm/atomic_ops.h>
  15#include <asm/barrier.h>
  16#include <asm/processor.h>
  17#include <asm/alternative.h>
  18
  19#define SPINLOCK_LOCKVAL (S390_lowcore.spinlock_lockval)
  20
  21extern int spin_retry;
  22
  23#ifndef CONFIG_SMP
  24static inline bool arch_vcpu_is_preempted(int cpu) { return false; }
  25#else
  26bool arch_vcpu_is_preempted(int cpu);
  27#endif
  28
  29#define vcpu_is_preempted arch_vcpu_is_preempted
  30
  31/*
  32 * Simple spin lock operations.  There are two variants, one clears IRQ's
  33 * on the local processor, one does not.
  34 *
  35 * We make no fairness assumptions. They have a cost.
  36 *
  37 * (the type definitions are in asm/spinlock_types.h)
  38 */
  39
  40void arch_spin_relax(arch_spinlock_t *lock);
  41#define arch_spin_relax arch_spin_relax
  42
  43void arch_spin_lock_wait(arch_spinlock_t *);
  44int arch_spin_trylock_retry(arch_spinlock_t *);
  45void arch_spin_lock_setup(int cpu);
  46
  47static inline u32 arch_spin_lockval(int cpu)
  48{
  49        return cpu + 1;
  50}
  51
  52static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
  53{
  54        return lock.lock == 0;
  55}
  56
  57static inline int arch_spin_is_locked(arch_spinlock_t *lp)
  58{
  59        return READ_ONCE(lp->lock) != 0;
  60}
  61
  62static inline int arch_spin_trylock_once(arch_spinlock_t *lp)
  63{
  64        barrier();
  65        return likely(__atomic_cmpxchg_bool(&lp->lock, 0, SPINLOCK_LOCKVAL));
  66}
  67
  68static inline void arch_spin_lock(arch_spinlock_t *lp)
  69{
  70        if (!arch_spin_trylock_once(lp))
  71                arch_spin_lock_wait(lp);
  72}
  73
  74static inline void arch_spin_lock_flags(arch_spinlock_t *lp,
  75                                        unsigned long flags)
  76{
  77        if (!arch_spin_trylock_once(lp))
  78                arch_spin_lock_wait(lp);
  79}
  80#define arch_spin_lock_flags    arch_spin_lock_flags
  81
  82static inline int arch_spin_trylock(arch_spinlock_t *lp)
  83{
  84        if (!arch_spin_trylock_once(lp))
  85                return arch_spin_trylock_retry(lp);
  86        return 1;
  87}
  88
  89static inline void arch_spin_unlock(arch_spinlock_t *lp)
  90{
  91        typecheck(int, lp->lock);
  92        asm volatile(
  93                ALTERNATIVE("", ".long 0xb2fa0070", 49) /* NIAI 7 */
  94                "       sth     %1,%0\n"
  95                : "=Q" (((unsigned short *) &lp->lock)[1])
  96                : "d" (0) : "cc", "memory");
  97}
  98
  99/*
 100 * Read-write spinlocks, allowing multiple readers
 101 * but only one writer.
 102 *
 103 * NOTE! it is quite common to have readers in interrupts
 104 * but no interrupt writers. For those circumstances we
 105 * can "mix" irq-safe locks - any writer needs to get a
 106 * irq-safe write-lock, but readers can get non-irqsafe
 107 * read-locks.
 108 */
 109
 110#define arch_read_relax(rw) barrier()
 111#define arch_write_relax(rw) barrier()
 112
 113void arch_read_lock_wait(arch_rwlock_t *lp);
 114void arch_write_lock_wait(arch_rwlock_t *lp);
 115
 116static inline void arch_read_lock(arch_rwlock_t *rw)
 117{
 118        int old;
 119
 120        old = __atomic_add(1, &rw->cnts);
 121        if (old & 0xffff0000)
 122                arch_read_lock_wait(rw);
 123}
 124
 125static inline void arch_read_unlock(arch_rwlock_t *rw)
 126{
 127        __atomic_add_const_barrier(-1, &rw->cnts);
 128}
 129
 130static inline void arch_write_lock(arch_rwlock_t *rw)
 131{
 132        if (!__atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000))
 133                arch_write_lock_wait(rw);
 134}
 135
 136static inline void arch_write_unlock(arch_rwlock_t *rw)
 137{
 138        __atomic_add_barrier(-0x30000, &rw->cnts);
 139}
 140
 141
 142static inline int arch_read_trylock(arch_rwlock_t *rw)
 143{
 144        int old;
 145
 146        old = READ_ONCE(rw->cnts);
 147        return (!(old & 0xffff0000) &&
 148                __atomic_cmpxchg_bool(&rw->cnts, old, old + 1));
 149}
 150
 151static inline int arch_write_trylock(arch_rwlock_t *rw)
 152{
 153        int old;
 154
 155        old = READ_ONCE(rw->cnts);
 156        return !old && __atomic_cmpxchg_bool(&rw->cnts, 0, 0x30000);
 157}
 158
 159#endif /* __ASM_SPINLOCK_H */
 160