linux/arch/powerpc/include/asm/spinlock.h
<<
>>
Prefs
   1#ifndef __ASM_SPINLOCK_H
   2#define __ASM_SPINLOCK_H
   3#ifdef __KERNEL__
   4
   5/*
   6 * Simple spin lock operations.  
   7 *
   8 * Copyright (C) 2001-2004 Paul Mackerras <paulus@au.ibm.com>, IBM
   9 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
  10 * Copyright (C) 2002 Dave Engebretsen <engebret@us.ibm.com>, IBM
  11 *      Rework to support virtual processors
  12 *
  13 * Type of int is used as a full 64b word is not necessary.
  14 *
  15 * This program is free software; you can redistribute it and/or
  16 * modify it under the terms of the GNU General Public License
  17 * as published by the Free Software Foundation; either version
  18 * 2 of the License, or (at your option) any later version.
  19 *
  20 * (the type definitions are in asm/spinlock_types.h)
  21 */
  22#include <linux/irqflags.h>
  23#ifdef CONFIG_PPC64
  24#include <asm/paca.h>
  25#include <asm/hvcall.h>
  26#endif
  27#include <asm/asm-compat.h>
  28#include <asm/synch.h>
  29#include <asm/ppc-opcode.h>
  30
  31#ifdef CONFIG_PPC64
  32/* use 0x800000yy when locked, where yy == CPU number */
  33#ifdef __BIG_ENDIAN__
  34#define LOCK_TOKEN      (*(u32 *)(&get_paca()->lock_token))
  35#else
  36#define LOCK_TOKEN      (*(u32 *)(&get_paca()->paca_index))
  37#endif
  38#else
  39#define LOCK_TOKEN      1
  40#endif
  41
  42#if defined(CONFIG_PPC64) && defined(CONFIG_SMP)
  43#define CLEAR_IO_SYNC   (get_paca()->io_sync = 0)
  44#define SYNC_IO         do {                                            \
  45                                if (unlikely(get_paca()->io_sync)) {    \
  46                                        mb();                           \
  47                                        get_paca()->io_sync = 0;        \
  48                                }                                       \
  49                        } while (0)
  50#else
  51#define CLEAR_IO_SYNC
  52#define SYNC_IO
  53#endif
  54
  55#ifdef CONFIG_PPC_PSERIES
  56#define vcpu_is_preempted vcpu_is_preempted
  57static inline bool vcpu_is_preempted(int cpu)
  58{
  59        return !!(be32_to_cpu(lppaca_of(cpu).yield_count) & 1);
  60}
  61#endif
  62
  63static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
  64{
  65        return lock.slock == 0;
  66}
  67
  68static inline int arch_spin_is_locked(arch_spinlock_t *lock)
  69{
  70        smp_mb();
  71        return !arch_spin_value_unlocked(*lock);
  72}
  73
  74/*
  75 * This returns the old value in the lock, so we succeeded
  76 * in getting the lock if the return value is 0.
  77 */
  78static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock)
  79{
  80        unsigned long tmp, token;
  81
  82        token = LOCK_TOKEN;
  83        __asm__ __volatile__(
  84"1:     " PPC_LWARX(%0,0,%2,1) "\n\
  85        cmpwi           0,%0,0\n\
  86        bne-            2f\n\
  87        stwcx.          %1,0,%2\n\
  88        bne-            1b\n"
  89        PPC_ACQUIRE_BARRIER
  90"2:"
  91        : "=&r" (tmp)
  92        : "r" (token), "r" (&lock->slock)
  93        : "cr0", "memory");
  94
  95        return tmp;
  96}
  97
  98static inline int arch_spin_trylock(arch_spinlock_t *lock)
  99{
 100        CLEAR_IO_SYNC;
 101        return __arch_spin_trylock(lock) == 0;
 102}
 103
 104/*
 105 * On a system with shared processors (that is, where a physical
 106 * processor is multiplexed between several virtual processors),
 107 * there is no point spinning on a lock if the holder of the lock
 108 * isn't currently scheduled on a physical processor.  Instead
 109 * we detect this situation and ask the hypervisor to give the
 110 * rest of our timeslice to the lock holder.
 111 *
 112 * So that we can tell which virtual processor is holding a lock,
 113 * we put 0x80000000 | smp_processor_id() in the lock when it is
 114 * held.  Conveniently, we have a word in the paca that holds this
 115 * value.
 116 */
 117
 118#if defined(CONFIG_PPC_SPLPAR)
 119/* We only yield to the hypervisor if we are in shared processor mode */
 120#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr))
 121extern void __spin_yield(arch_spinlock_t *lock);
 122extern void __rw_yield(arch_rwlock_t *lock);
 123#else /* SPLPAR */
 124#define __spin_yield(x) barrier()
 125#define __rw_yield(x)   barrier()
 126#define SHARED_PROCESSOR        0
 127#endif
 128
 129static inline void arch_spin_lock(arch_spinlock_t *lock)
 130{
 131        CLEAR_IO_SYNC;
 132        while (1) {
 133                if (likely(__arch_spin_trylock(lock) == 0))
 134                        break;
 135                do {
 136                        HMT_low();
 137                        if (SHARED_PROCESSOR)
 138                                __spin_yield(lock);
 139                } while (unlikely(lock->slock != 0));
 140                HMT_medium();
 141        }
 142}
 143
 144static inline
 145void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
 146{
 147        unsigned long flags_dis;
 148
 149        CLEAR_IO_SYNC;
 150        while (1) {
 151                if (likely(__arch_spin_trylock(lock) == 0))
 152                        break;
 153                local_save_flags(flags_dis);
 154                local_irq_restore(flags);
 155                do {
 156                        HMT_low();
 157                        if (SHARED_PROCESSOR)
 158                                __spin_yield(lock);
 159                } while (unlikely(lock->slock != 0));
 160                HMT_medium();
 161                local_irq_restore(flags_dis);
 162        }
 163}
 164#define arch_spin_lock_flags arch_spin_lock_flags
 165
 166static inline void arch_spin_unlock(arch_spinlock_t *lock)
 167{
 168        SYNC_IO;
 169        __asm__ __volatile__("# arch_spin_unlock\n\t"
 170                                PPC_RELEASE_BARRIER: : :"memory");
 171        lock->slock = 0;
 172}
 173
 174/*
 175 * Read-write spinlocks, allowing multiple readers
 176 * but only one writer.
 177 *
 178 * NOTE! it is quite common to have readers in interrupts
 179 * but no interrupt writers. For those circumstances we
 180 * can "mix" irq-safe locks - any writer needs to get a
 181 * irq-safe write-lock, but readers can get non-irqsafe
 182 * read-locks.
 183 */
 184
 185#ifdef CONFIG_PPC64
 186#define __DO_SIGN_EXTEND        "extsw  %0,%0\n"
 187#define WRLOCK_TOKEN            LOCK_TOKEN      /* it's negative */
 188#else
 189#define __DO_SIGN_EXTEND
 190#define WRLOCK_TOKEN            (-1)
 191#endif
 192
 193/*
 194 * This returns the old value in the lock + 1,
 195 * so we got a read lock if the return value is > 0.
 196 */
 197static inline long __arch_read_trylock(arch_rwlock_t *rw)
 198{
 199        long tmp;
 200
 201        __asm__ __volatile__(
 202"1:     " PPC_LWARX(%0,0,%1,1) "\n"
 203        __DO_SIGN_EXTEND
 204"       addic.          %0,%0,1\n\
 205        ble-            2f\n"
 206        PPC405_ERR77(0,%1)
 207"       stwcx.          %0,0,%1\n\
 208        bne-            1b\n"
 209        PPC_ACQUIRE_BARRIER
 210"2:"    : "=&r" (tmp)
 211        : "r" (&rw->lock)
 212        : "cr0", "xer", "memory");
 213
 214        return tmp;
 215}
 216
 217/*
 218 * This returns the old value in the lock,
 219 * so we got the write lock if the return value is 0.
 220 */
 221static inline long __arch_write_trylock(arch_rwlock_t *rw)
 222{
 223        long tmp, token;
 224
 225        token = WRLOCK_TOKEN;
 226        __asm__ __volatile__(
 227"1:     " PPC_LWARX(%0,0,%2,1) "\n\
 228        cmpwi           0,%0,0\n\
 229        bne-            2f\n"
 230        PPC405_ERR77(0,%1)
 231"       stwcx.          %1,0,%2\n\
 232        bne-            1b\n"
 233        PPC_ACQUIRE_BARRIER
 234"2:"    : "=&r" (tmp)
 235        : "r" (token), "r" (&rw->lock)
 236        : "cr0", "memory");
 237
 238        return tmp;
 239}
 240
 241static inline void arch_read_lock(arch_rwlock_t *rw)
 242{
 243        while (1) {
 244                if (likely(__arch_read_trylock(rw) > 0))
 245                        break;
 246                do {
 247                        HMT_low();
 248                        if (SHARED_PROCESSOR)
 249                                __rw_yield(rw);
 250                } while (unlikely(rw->lock < 0));
 251                HMT_medium();
 252        }
 253}
 254
 255static inline void arch_write_lock(arch_rwlock_t *rw)
 256{
 257        while (1) {
 258                if (likely(__arch_write_trylock(rw) == 0))
 259                        break;
 260                do {
 261                        HMT_low();
 262                        if (SHARED_PROCESSOR)
 263                                __rw_yield(rw);
 264                } while (unlikely(rw->lock != 0));
 265                HMT_medium();
 266        }
 267}
 268
 269static inline int arch_read_trylock(arch_rwlock_t *rw)
 270{
 271        return __arch_read_trylock(rw) > 0;
 272}
 273
 274static inline int arch_write_trylock(arch_rwlock_t *rw)
 275{
 276        return __arch_write_trylock(rw) == 0;
 277}
 278
 279static inline void arch_read_unlock(arch_rwlock_t *rw)
 280{
 281        long tmp;
 282
 283        __asm__ __volatile__(
 284        "# read_unlock\n\t"
 285        PPC_RELEASE_BARRIER
 286"1:     lwarx           %0,0,%1\n\
 287        addic           %0,%0,-1\n"
 288        PPC405_ERR77(0,%1)
 289"       stwcx.          %0,0,%1\n\
 290        bne-            1b"
 291        : "=&r"(tmp)
 292        : "r"(&rw->lock)
 293        : "cr0", "xer", "memory");
 294}
 295
 296static inline void arch_write_unlock(arch_rwlock_t *rw)
 297{
 298        __asm__ __volatile__("# write_unlock\n\t"
 299                                PPC_RELEASE_BARRIER: : :"memory");
 300        rw->lock = 0;
 301}
 302
 303#define arch_spin_relax(lock)   __spin_yield(lock)
 304#define arch_read_relax(lock)   __rw_yield(lock)
 305#define arch_write_relax(lock)  __rw_yield(lock)
 306
 307/* See include/linux/spinlock.h */
 308#define smp_mb__after_spinlock()   smp_mb()
 309
 310#endif /* __KERNEL__ */
 311#endif /* __ASM_SPINLOCK_H */
 312