linux/arch/powerpc/include/asm/spinlock.h
<<
>>
Prefs
   1#ifndef __ASM_SPINLOCK_H
   2#define __ASM_SPINLOCK_H
   3#ifdef __KERNEL__
   4
   5/*
   6 * Simple spin lock operations.  
   7 *
   8 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
   9 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
  10 * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
  11 *      Rework to support virtual processors
  12 *
  13 * Type of int is used as a full 64b word is not necessary.
  14 *
  15 * This program is free software; you can redistribute it and/or
  16 * modify it under the terms of the GNU General Public License
  17 * as published by the Free Software Foundation; either version
  18 * 2 of the License, or (at your option) any later version.
  19 *
  20 * (the type definitions are in asm/spinlock_types.h)
  21 */
  22#include <linux/irqflags.h>
  23#ifdef CONFIG_PPC64
  24#include <asm/paca.h>
  25#include <asm/hvcall.h>
  26#include <asm/iseries/hv_call.h>
  27#endif
  28#include <asm/asm-compat.h>
  29#include <asm/synch.h>
  30
  31#define __raw_spin_is_locked(x)         ((x)->slock != 0)
  32
  33#ifdef CONFIG_PPC64
  34/* use 0x800000yy when locked, where yy == CPU number */
  35#define LOCK_TOKEN      (*(u32 *)(&get_paca()->lock_token))
  36#else
  37#define LOCK_TOKEN      1
  38#endif
  39
  40#if defined(CONFIG_PPC64) && defined(CONFIG_SMP)
  41#define CLEAR_IO_SYNC   (get_paca()->io_sync = 0)
  42#define SYNC_IO         do {                                            \
  43                                if (unlikely(get_paca()->io_sync)) {    \
  44                                        mb();                           \
  45                                        get_paca()->io_sync = 0;        \
  46                                }                                       \
  47                        } while (0)
  48#else
  49#define CLEAR_IO_SYNC
  50#define SYNC_IO
  51#endif
  52
  53/*
  54 * This returns the old value in the lock, so we succeeded
  55 * in getting the lock if the return value is 0.
  56 */
  57static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock)
  58{
  59        unsigned long tmp, token;
  60
  61        token = LOCK_TOKEN;
  62        __asm__ __volatile__(
  63"1:     lwarx           %0,0,%2\n\
  64        cmpwi           0,%0,0\n\
  65        bne-            2f\n\
  66        stwcx.          %1,0,%2\n\
  67        bne-            1b\n\
  68        isync\n\
  692:"     : "=&r" (tmp)
  70        : "r" (token), "r" (&lock->slock)
  71        : "cr0", "memory");
  72
  73        return tmp;
  74}
  75
  76static inline int __raw_spin_trylock(raw_spinlock_t *lock)
  77{
  78        CLEAR_IO_SYNC;
  79        return arch_spin_trylock(lock) == 0;
  80}
  81
  82/*
  83 * On a system with shared processors (that is, where a physical
  84 * processor is multiplexed between several virtual processors),
  85 * there is no point spinning on a lock if the holder of the lock
  86 * isn't currently scheduled on a physical processor.  Instead
  87 * we detect this situation and ask the hypervisor to give the
  88 * rest of our timeslice to the lock holder.
  89 *
  90 * So that we can tell which virtual processor is holding a lock,
  91 * we put 0x80000000 | smp_processor_id() in the lock when it is
  92 * held.  Conveniently, we have a word in the paca that holds this
  93 * value.
  94 */
  95
  96#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
  97/* We only yield to the hypervisor if we are in shared processor mode */
  98#define SHARED_PROCESSOR (get_lppaca()->shared_proc)
  99extern void __spin_yield(raw_spinlock_t *lock);
 100extern void __rw_yield(raw_rwlock_t *lock);
 101#else /* SPLPAR || ISERIES */
 102#define __spin_yield(x) barrier()
 103#define __rw_yield(x)   barrier()
 104#define SHARED_PROCESSOR        0
 105#endif
 106
 107static inline void __raw_spin_lock(raw_spinlock_t *lock)
 108{
 109        CLEAR_IO_SYNC;
 110        while (1) {
 111                if (likely(arch_spin_trylock(lock) == 0))
 112                        break;
 113                do {
 114                        HMT_low();
 115                        if (SHARED_PROCESSOR)
 116                                __spin_yield(lock);
 117                } while (unlikely(lock->slock != 0));
 118                HMT_medium();
 119        }
 120}
 121
 122static inline
 123void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
 124{
 125        unsigned long flags_dis;
 126
 127        CLEAR_IO_SYNC;
 128        while (1) {
 129                if (likely(arch_spin_trylock(lock) == 0))
 130                        break;
 131                local_save_flags(flags_dis);
 132                local_irq_restore(flags);
 133                do {
 134                        HMT_low();
 135                        if (SHARED_PROCESSOR)
 136                                __spin_yield(lock);
 137                } while (unlikely(lock->slock != 0));
 138                HMT_medium();
 139                local_irq_restore(flags_dis);
 140        }
 141}
 142
 143static inline void __raw_spin_unlock(raw_spinlock_t *lock)
 144{
 145        SYNC_IO;
 146        __asm__ __volatile__("# __raw_spin_unlock\n\t"
 147                                LWSYNC_ON_SMP: : :"memory");
 148        lock->slock = 0;
 149}
 150
 151#ifdef CONFIG_PPC64
 152extern void __raw_spin_unlock_wait(raw_spinlock_t *lock);
 153#else
 154#define __raw_spin_unlock_wait(lock) \
 155        do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
 156#endif
 157
 158/*
 159 * Read-write spinlocks, allowing multiple readers
 160 * but only one writer.
 161 *
 162 * NOTE! it is quite common to have readers in interrupts
 163 * but no interrupt writers. For those circumstances we
 164 * can "mix" irq-safe locks - any writer needs to get a
 165 * irq-safe write-lock, but readers can get non-irqsafe
 166 * read-locks.
 167 */
 168
 169#define __raw_read_can_lock(rw)         ((rw)->lock >= 0)
 170#define __raw_write_can_lock(rw)        (!(rw)->lock)
 171
 172#ifdef CONFIG_PPC64
 173#define __DO_SIGN_EXTEND        "extsw  %0,%0\n"
 174#define WRLOCK_TOKEN            LOCK_TOKEN      /* it's negative */
 175#else
 176#define __DO_SIGN_EXTEND
 177#define WRLOCK_TOKEN            (-1)
 178#endif
 179
 180/*
 181 * This returns the old value in the lock + 1,
 182 * so we got a read lock if the return value is > 0.
 183 */
 184static inline long arch_read_trylock(raw_rwlock_t *rw)
 185{
 186        long tmp;
 187
 188        __asm__ __volatile__(
 189"1:     lwarx           %0,0,%1\n"
 190        __DO_SIGN_EXTEND
 191"       addic.          %0,%0,1\n\
 192        ble-            2f\n"
 193        PPC405_ERR77(0,%1)
 194"       stwcx.          %0,0,%1\n\
 195        bne-            1b\n\
 196        isync\n\
 1972:"     : "=&r" (tmp)
 198        : "r" (&rw->lock)
 199        : "cr0", "xer", "memory");
 200
 201        return tmp;
 202}
 203
 204/*
 205 * This returns the old value in the lock,
 206 * so we got the write lock if the return value is 0.
 207 */
 208static inline long arch_write_trylock(raw_rwlock_t *rw)
 209{
 210        long tmp, token;
 211
 212        token = WRLOCK_TOKEN;
 213        __asm__ __volatile__(
 214"1:     lwarx           %0,0,%2\n\
 215        cmpwi           0,%0,0\n\
 216        bne-            2f\n"
 217        PPC405_ERR77(0,%1)
 218"       stwcx.          %1,0,%2\n\
 219        bne-            1b\n\
 220        isync\n\
 2212:"     : "=&r" (tmp)
 222        : "r" (token), "r" (&rw->lock)
 223        : "cr0", "memory");
 224
 225        return tmp;
 226}
 227
 228static inline void __raw_read_lock(raw_rwlock_t *rw)
 229{
 230        while (1) {
 231                if (likely(arch_read_trylock(rw) > 0))
 232                        break;
 233                do {
 234                        HMT_low();
 235                        if (SHARED_PROCESSOR)
 236                                __rw_yield(rw);
 237                } while (unlikely(rw->lock < 0));
 238                HMT_medium();
 239        }
 240}
 241
 242static inline void __raw_write_lock(raw_rwlock_t *rw)
 243{
 244        while (1) {
 245                if (likely(arch_write_trylock(rw) == 0))
 246                        break;
 247                do {
 248                        HMT_low();
 249                        if (SHARED_PROCESSOR)
 250                                __rw_yield(rw);
 251                } while (unlikely(rw->lock != 0));
 252                HMT_medium();
 253        }
 254}
 255
 256static inline int __raw_read_trylock(raw_rwlock_t *rw)
 257{
 258        return arch_read_trylock(rw) > 0;
 259}
 260
 261static inline int __raw_write_trylock(raw_rwlock_t *rw)
 262{
 263        return arch_write_trylock(rw) == 0;
 264}
 265
 266static inline void __raw_read_unlock(raw_rwlock_t *rw)
 267{
 268        long tmp;
 269
 270        __asm__ __volatile__(
 271        "# read_unlock\n\t"
 272        LWSYNC_ON_SMP
 273"1:     lwarx           %0,0,%1\n\
 274        addic           %0,%0,-1\n"
 275        PPC405_ERR77(0,%1)
 276"       stwcx.          %0,0,%1\n\
 277        bne-            1b"
 278        : "=&r"(tmp)
 279        : "r"(&rw->lock)
 280        : "cr0", "xer", "memory");
 281}
 282
 283static inline void __raw_write_unlock(raw_rwlock_t *rw)
 284{
 285        __asm__ __volatile__("# write_unlock\n\t"
 286                                LWSYNC_ON_SMP: : :"memory");
 287        rw->lock = 0;
 288}
 289
 290#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
 291#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
 292
 293#define _raw_spin_relax(lock)   __spin_yield(lock)
 294#define _raw_read_relax(lock)   __rw_yield(lock)
 295#define _raw_write_relax(lock)  __rw_yield(lock)
 296
 297#endif /* __KERNEL__ */
 298#endif /* __ASM_SPINLOCK_H */
 299