linux/arch/sh/include/asm/spinlock-cas.h
<<
>>
Prefs
   1/*
   2 * include/asm-sh/spinlock-cas.h
   3 *
   4 * Copyright (C) 2015 SEI
   5 *
   6 * This file is subject to the terms and conditions of the GNU General Public
   7 * License.  See the file "COPYING" in the main directory of this archive
   8 * for more details.
   9 */
  10#ifndef __ASM_SH_SPINLOCK_CAS_H
  11#define __ASM_SH_SPINLOCK_CAS_H
  12
  13#include <asm/barrier.h>
  14#include <asm/processor.h>
  15
  16static inline unsigned __sl_cas(volatile unsigned *p, unsigned old, unsigned new)
  17{
  18        __asm__ __volatile__("cas.l %1,%0,@r0"
  19                : "+r"(new)
  20                : "r"(old), "z"(p)
  21                : "t", "memory" );
  22        return new;
  23}
  24
  25/*
  26 * Your basic SMP spinlocks, allowing only a single CPU anywhere
  27 */
  28
  29#define arch_spin_is_locked(x)          ((x)->lock <= 0)
  30#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
  31
  32static inline void arch_spin_lock(arch_spinlock_t *lock)
  33{
  34        while (!__sl_cas(&lock->lock, 1, 0));
  35}
  36
  37static inline void arch_spin_unlock(arch_spinlock_t *lock)
  38{
  39        __sl_cas(&lock->lock, 0, 1);
  40}
  41
  42static inline int arch_spin_trylock(arch_spinlock_t *lock)
  43{
  44        return __sl_cas(&lock->lock, 1, 0);
  45}
  46
  47/*
  48 * Read-write spinlocks, allowing multiple readers but only one writer.
  49 *
  50 * NOTE! it is quite common to have readers in interrupts but no interrupt
  51 * writers. For those circumstances we can "mix" irq-safe locks - any writer
  52 * needs to get a irq-safe write-lock, but readers can get non-irqsafe
  53 * read-locks.
  54 */
  55
  56/**
  57 * read_can_lock - would read_trylock() succeed?
  58 * @lock: the rwlock in question.
  59 */
  60#define arch_read_can_lock(x)   ((x)->lock > 0)
  61
  62/**
  63 * write_can_lock - would write_trylock() succeed?
  64 * @lock: the rwlock in question.
  65 */
  66#define arch_write_can_lock(x)  ((x)->lock == RW_LOCK_BIAS)
  67
  68static inline void arch_read_lock(arch_rwlock_t *rw)
  69{
  70        unsigned old;
  71        do old = rw->lock;
  72        while (!old || __sl_cas(&rw->lock, old, old-1) != old);
  73}
  74
  75static inline void arch_read_unlock(arch_rwlock_t *rw)
  76{
  77        unsigned old;
  78        do old = rw->lock;
  79        while (__sl_cas(&rw->lock, old, old+1) != old);
  80}
  81
  82static inline void arch_write_lock(arch_rwlock_t *rw)
  83{
  84        while (__sl_cas(&rw->lock, RW_LOCK_BIAS, 0) != RW_LOCK_BIAS);
  85}
  86
  87static inline void arch_write_unlock(arch_rwlock_t *rw)
  88{
  89        __sl_cas(&rw->lock, 0, RW_LOCK_BIAS);
  90}
  91
  92static inline int arch_read_trylock(arch_rwlock_t *rw)
  93{
  94        unsigned old;
  95        do old = rw->lock;
  96        while (old && __sl_cas(&rw->lock, old, old-1) != old);
  97        return !!old;
  98}
  99
 100static inline int arch_write_trylock(arch_rwlock_t *rw)
 101{
 102        return __sl_cas(&rw->lock, RW_LOCK_BIAS, 0) == RW_LOCK_BIAS;
 103}
 104
 105#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
 106#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
 107
 108#define arch_spin_relax(lock)   cpu_relax()
 109#define arch_read_relax(lock)   cpu_relax()
 110#define arch_write_relax(lock)  cpu_relax()
 111
 112#endif /* __ASM_SH_SPINLOCK_CAS_H */
 113