linux/arch/powerpc/include/asm/spinlock.h
<<
>>
Prefs
   1#ifndef __ASM_SPINLOCK_H
   2#define __ASM_SPINLOCK_H
   3#ifdef __KERNEL__
   4
   5/*
   6 * Simple spin lock operations.  
   7 *
   8 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
   9 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
  10 * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
  11 *      Rework to support virtual processors
  12 *
  13 * Type of int is used as a full 64b word is not necessary.
  14 *
  15 * This program is free software; you can redistribute it and/or
  16 * modify it under the terms of the GNU General Public License
  17 * as published by the Free Software Foundation; either version
  18 * 2 of the License, or (at your option) any later version.
  19 *
  20 * (the type definitions are in asm/spinlock_types.h)
  21 */
  22#include <linux/irqflags.h>
  23#ifdef CONFIG_PPC64
  24#include <asm/paca.h>
  25#include <asm/hvcall.h>
  26#endif
  27#include <asm/synch.h>
  28#include <asm/ppc-opcode.h>
  29#include <asm/asm-405.h>
  30
  31#ifdef CONFIG_PPC64
  32/* use 0x800000yy when locked, where yy == CPU number */
  33#ifdef __BIG_ENDIAN__
  34#define LOCK_TOKEN      (*(u32 *)(&get_paca()->lock_token))
  35#else
  36#define LOCK_TOKEN      (*(u32 *)(&get_paca()->paca_index))
  37#endif
  38#else
  39#define LOCK_TOKEN      1
  40#endif
  41
  42#if defined(CONFIG_PPC64) && defined(CONFIG_SMP)
  43#define CLEAR_IO_SYNC   (get_paca()->io_sync = 0)
  44#define SYNC_IO         do {                                            \
  45                                if (unlikely(get_paca()->io_sync)) {    \
  46                                        mb();                           \
  47                                        get_paca()->io_sync = 0;        \
  48                                }                                       \
  49                        } while (0)
  50#else
  51#define CLEAR_IO_SYNC
  52#define SYNC_IO
  53#endif
  54
  55#ifdef CONFIG_PPC_PSERIES
  56#define vcpu_is_preempted vcpu_is_preempted
  57static inline bool vcpu_is_preempted(int cpu)
  58{
  59        if (!firmware_has_feature(FW_FEATURE_SPLPAR))
  60                return false;
  61        return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1);
  62}
  63#endif
  64
  65static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
  66{
  67        return lock.slock == 0;
  68}
  69
  70static inline int arch_spin_is_locked(arch_spinlock_t *lock)
  71{
  72        smp_mb();
  73        return !arch_spin_value_unlocked(*lock);
  74}
  75
  76/*
  77 * This returns the old value in the lock, so we succeeded
  78 * in getting the lock if the return value is 0.
  79 */
  80static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
  81{
  82        unsigned long tmp, token;
  83
  84        token = LOCK_TOKEN;
  85        __asm__ __volatile__(
  86"1:     " PPC_LWARX(%0,0,%2,1) "\n\
  87        cmpwi           0,%0,0\n\
  88        bne-            2f\n\
  89        stwcx.          %1,0,%2\n\
  90        bne-            1b\n"
  91        PPC_ACQUIRE_BARRIER
  92"2:"
  93        : "=&r" (tmp)
  94        : "r" (token), "r" (&lock->slock)
  95        : "cr0", "memory");
  96
  97        return tmp;
  98}
  99
 100static inline int arch_spin_trylock(arch_spinlock_t *lock)
 101{
 102        CLEAR_IO_SYNC;
 103        return __arch_spin_trylock(lock) == 0;
 104}
 105
 106/*
 107 * On a system with shared processors (that is, where a physical
 108 * processor is multiplexed between several virtual processors),
 109 * there is no point spinning on a lock if the holder of the lock
 110 * isn't currently scheduled on a physical processor.  Instead
 111 * we detect this situation and ask the hypervisor to give the
 112 * rest of our timeslice to the lock holder.
 113 *
 114 * So that we can tell which virtual processor is holding a lock,
 115 * we put 0x80000000 | smp_processor_id() in the lock when it is
 116 * held.  Conveniently, we have a word in the paca that holds this
 117 * value.
 118 */
 119
 120#if defined(CONFIG_PPC_SPLPAR)
 121/* We only yield to the hypervisor if we are in shared processor mode */
 122#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr))
 123extern void __spin_yield(arch_spinlock_t *lock);
 124extern void __rw_yield(arch_rwlock_t *lock);
 125#else /* SPLPAR */
 126#define __spin_yield(x) barrier()
 127#define __rw_yield(x)   barrier()
 128#define SHARED_PROCESSOR        0
 129#endif
 130
 131static inline void arch_spin_lock(arch_spinlock_t *lock)
 132{
 133        CLEAR_IO_SYNC;
 134        while (1) {
 135                if (likely(__arch_spin_trylock(lock) == 0))
 136                        break;
 137                do {
 138                        HMT_low();
 139                        if (SHARED_PROCESSOR)
 140                                __spin_yield(lock);
 141                } while (unlikely(lock->slock != 0));
 142                HMT_medium();
 143        }
 144}
 145
 146static inline
 147void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
 148{
 149        unsigned long flags_dis;
 150
 151        CLEAR_IO_SYNC;
 152        while (1) {
 153                if (likely(__arch_spin_trylock(lock) == 0))
 154                        break;
 155                local_save_flags(flags_dis);
 156                local_irq_restore(flags);
 157                do {
 158                        HMT_low();
 159                        if (SHARED_PROCESSOR)
 160                                __spin_yield(lock);
 161                } while (unlikely(lock->slock != 0));
 162                HMT_medium();
 163                local_irq_restore(flags_dis);
 164        }
 165}
 166#define arch_spin_lock_flags arch_spin_lock_flags
 167
 168static inline void arch_spin_unlock(arch_spinlock_t *lock)
 169{
 170        SYNC_IO;
 171        __asm__ __volatile__("# arch_spin_unlock\n\t"
 172                                PPC_RELEASE_BARRIER: : :"memory");
 173        lock->slock = 0;
 174}
 175
 176/*
 177 * Read-write spinlocks, allowing multiple readers
 178 * but only one writer.
 179 *
 180 * NOTE! it is quite common to have readers in interrupts
 181 * but no interrupt writers. For those circumstances we
 182 * can "mix" irq-safe locks - any writer needs to get a
 183 * irq-safe write-lock, but readers can get non-irqsafe
 184 * read-locks.
 185 */
 186
 187#ifdef CONFIG_PPC64
 188#define __DO_SIGN_EXTEND        "extsw  %0,%0\n"
 189#define WRLOCK_TOKEN            LOCK_TOKEN      /* it's negative */
 190#else
 191#define __DO_SIGN_EXTEND
 192#define WRLOCK_TOKEN            (-1)
 193#endif
 194
 195/*
 196 * This returns the old value in the lock + 1,
 197 * so we got a read lock if the return value is > 0.
 198 */
 199static inline long __arch_read_trylock(arch_rwlock_t *rw)
 200{
 201        long tmp;
 202
 203        __asm__ __volatile__(
 204"1:     " PPC_LWARX(%0,0,%1,1) "\n"
 205        __DO_SIGN_EXTEND
 206"       addic.          %0,%0,1\n\
 207        ble-            2f\n"
 208        PPC405_ERR77(0,%1)
 209"       stwcx.          %0,0,%1\n\
 210        bne-            1b\n"
 211        PPC_ACQUIRE_BARRIER
 212"2:"    : "=&r" (tmp)
 213        : "r" (&rw->lock)
 214        : "cr0", "xer", "memory");
 215
 216        return tmp;
 217}
 218
 219/*
 220 * This returns the old value in the lock,
 221 * so we got the write lock if the return value is 0.
 222 */
 223static inline long __arch_write_trylock(arch_rwlock_t *rw)
 224{
 225        long tmp, token;
 226
 227        token = WRLOCK_TOKEN;
 228        __asm__ __volatile__(
 229"1:     " PPC_LWARX(%0,0,%2,1) "\n\
 230        cmpwi           0,%0,0\n\
 231        bne-            2f\n"
 232        PPC405_ERR77(0,%1)
 233"       stwcx.          %1,0,%2\n\
 234        bne-            1b\n"
 235        PPC_ACQUIRE_BARRIER
 236"2:"    : "=&r" (tmp)
 237        : "r" (token), "r" (&rw->lock)
 238        : "cr0", "memory");
 239
 240        return tmp;
 241}
 242
 243static inline void arch_read_lock(arch_rwlock_t *rw)
 244{
 245        while (1) {
 246                if (likely(__arch_read_trylock(rw) > 0))
 247                        break;
 248                do {
 249                        HMT_low();
 250                        if (SHARED_PROCESSOR)
 251                                __rw_yield(rw);
 252                } while (unlikely(rw->lock < 0));
 253                HMT_medium();
 254        }
 255}
 256
 257static inline void arch_write_lock(arch_rwlock_t *rw)
 258{
 259        while (1) {
 260                if (likely(__arch_write_trylock(rw) == 0))
 261                        break;
 262                do {
 263                        HMT_low();
 264                        if (SHARED_PROCESSOR)
 265                                __rw_yield(rw);
 266                } while (unlikely(rw->lock != 0));
 267                HMT_medium();
 268        }
 269}
 270
 271static inline int arch_read_trylock(arch_rwlock_t *rw)
 272{
 273        return __arch_read_trylock(rw) > 0;
 274}
 275
 276static inline int arch_write_trylock(arch_rwlock_t *rw)
 277{
 278        return __arch_write_trylock(rw) == 0;
 279}
 280
 281static inline void arch_read_unlock(arch_rwlock_t *rw)
 282{
 283        long tmp;
 284
 285        __asm__ __volatile__(
 286        "# read_unlock\n\t"
 287        PPC_RELEASE_BARRIER
 288"1:     lwarx           %0,0,%1\n\
 289        addic           %0,%0,-1\n"
 290        PPC405_ERR77(0,%1)
 291"       stwcx.          %0,0,%1\n\
 292        bne-            1b"
 293        : "=&r"(tmp)
 294        : "r"(&rw->lock)
 295        : "cr0", "xer", "memory");
 296}
 297
 298static inline void arch_write_unlock(arch_rwlock_t *rw)
 299{
 300        __asm__ __volatile__("# write_unlock\n\t"
 301                                PPC_RELEASE_BARRIER: : :"memory");
 302        rw->lock = 0;
 303}
 304
 305#define arch_spin_relax(lock)   __spin_yield(lock)
 306#define arch_read_relax(lock)   __rw_yield(lock)
 307#define arch_write_relax(lock)  __rw_yield(lock)
 308
 309/* See include/linux/spinlock.h */
 310#define smp_mb__after_spinlock()   smp_mb()
 311
 312#endif /* __KERNEL__ */
 313#endif /* __ASM_SPINLOCK_H */
 314