linux/arch/mn10300/include/asm/spinlock.h
<<
>>
Prefs
   1/* MN10300 spinlock support
   2 *
   3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
   4 * Written by David Howells (dhowells@redhat.com)
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public Licence
   8 * as published by the Free Software Foundation; either version
   9 * 2 of the Licence, or (at your option) any later version.
  10 */
  11#ifndef _ASM_SPINLOCK_H
  12#define _ASM_SPINLOCK_H
  13
  14#include <linux/atomic.h>
  15#include <asm/barrier.h>
  16#include <asm/processor.h>
  17#include <asm/rwlock.h>
  18#include <asm/page.h>
  19
  20/*
  21 * Simple spin lock operations.  There are two variants, one clears IRQ's
  22 * on the local processor, one does not.
  23 *
  24 * We make no fairness assumptions. They have a cost.
  25 */
  26
  27#define arch_spin_is_locked(x)  (*(volatile signed char *)(&(x)->slock) != 0)
  28
  29static inline void arch_spin_unlock(arch_spinlock_t *lock)
  30{
  31        asm volatile(
  32                "       bclr    1,(0,%0)        \n"
  33                :
  34                : "a"(&lock->slock)
  35                : "memory", "cc");
  36}
  37
  38static inline int arch_spin_trylock(arch_spinlock_t *lock)
  39{
  40        int ret;
  41
  42        asm volatile(
  43                "       mov     1,%0            \n"
  44                "       bset    %0,(%1)         \n"
  45                "       bne     1f              \n"
  46                "       clr     %0              \n"
  47                "1:     xor     1,%0            \n"
  48                : "=d"(ret)
  49                : "a"(&lock->slock)
  50                : "memory", "cc");
  51
  52        return ret;
  53}
  54
  55static inline void arch_spin_lock(arch_spinlock_t *lock)
  56{
  57        asm volatile(
  58                "1:     bset    1,(0,%0)        \n"
  59                "       bne     1b              \n"
  60                :
  61                : "a"(&lock->slock)
  62                : "memory", "cc");
  63}
  64
  65static inline void arch_spin_lock_flags(arch_spinlock_t *lock,
  66                                         unsigned long flags)
  67{
  68        int temp;
  69
  70        asm volatile(
  71                "1:     bset    1,(0,%2)        \n"
  72                "       beq     3f              \n"
  73                "       mov     %1,epsw         \n"
  74                "2:     mov     (0,%2),%0       \n"
  75                "       or      %0,%0           \n"
  76                "       bne     2b              \n"
  77                "       mov     %3,%0           \n"
  78                "       mov     %0,epsw         \n"
  79                "       nop                     \n"
  80                "       nop                     \n"
  81                "       bra     1b\n"
  82                "3:                             \n"
  83                : "=&d" (temp)
  84                : "d" (flags), "a"(&lock->slock), "i"(EPSW_IE | MN10300_CLI_LEVEL)
  85                : "memory", "cc");
  86}
  87#define arch_spin_lock_flags    arch_spin_lock_flags
  88
  89#ifdef __KERNEL__
  90
  91/*
  92 * Read-write spinlocks, allowing multiple readers
  93 * but only one writer.
  94 *
  95 * NOTE! it is quite common to have readers in interrupts
  96 * but no interrupt writers. For those circumstances we
  97 * can "mix" irq-safe locks - any writer needs to get a
  98 * irq-safe write-lock, but readers can get non-irqsafe
  99 * read-locks.
 100 */
 101
 102/*
 103 * On mn10300, we implement read-write locks as a 32-bit counter
 104 * with the high bit (sign) being the "contended" bit.
 105 */
 106static inline void arch_read_lock(arch_rwlock_t *rw)
 107{
 108#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
 109        __build_read_lock(rw, "__read_lock_failed");
 110#else
 111        {
 112                atomic_t *count = (atomic_t *)rw;
 113                while (atomic_dec_return(count) < 0)
 114                        atomic_inc(count);
 115        }
 116#endif
 117}
 118
 119static inline void arch_write_lock(arch_rwlock_t *rw)
 120{
 121#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
 122        __build_write_lock(rw, "__write_lock_failed");
 123#else
 124        {
 125                atomic_t *count = (atomic_t *)rw;
 126                while (!atomic_sub_and_test(RW_LOCK_BIAS, count))
 127                        atomic_add(RW_LOCK_BIAS, count);
 128        }
 129#endif
 130}
 131
 132static inline void arch_read_unlock(arch_rwlock_t *rw)
 133{
 134#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
 135        __build_read_unlock(rw);
 136#else
 137        {
 138                atomic_t *count = (atomic_t *)rw;
 139                atomic_inc(count);
 140        }
 141#endif
 142}
 143
 144static inline void arch_write_unlock(arch_rwlock_t *rw)
 145{
 146#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
 147        __build_write_unlock(rw);
 148#else
 149        {
 150                atomic_t *count = (atomic_t *)rw;
 151                atomic_add(RW_LOCK_BIAS, count);
 152        }
 153#endif
 154}
 155
 156static inline int arch_read_trylock(arch_rwlock_t *lock)
 157{
 158        atomic_t *count = (atomic_t *)lock;
 159        atomic_dec(count);
 160        if (atomic_read(count) >= 0)
 161                return 1;
 162        atomic_inc(count);
 163        return 0;
 164}
 165
 166static inline int arch_write_trylock(arch_rwlock_t *lock)
 167{
 168        atomic_t *count = (atomic_t *)lock;
 169        if (atomic_sub_and_test(RW_LOCK_BIAS, count))
 170                return 1;
 171        atomic_add(RW_LOCK_BIAS, count);
 172        return 0;
 173}
 174
 175#define _raw_spin_relax(lock)   cpu_relax()
 176#define _raw_read_relax(lock)   cpu_relax()
 177#define _raw_write_relax(lock)  cpu_relax()
 178
 179#endif /* __KERNEL__ */
 180#endif /* _ASM_SPINLOCK_H */
 181