linux/arch/tile/include/asm/spinlock_64.h
<<
>>
Prefs
   1/*
   2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
   3 *
   4 *   This program is free software; you can redistribute it and/or
   5 *   modify it under the terms of the GNU General Public License
   6 *   as published by the Free Software Foundation, version 2.
   7 *
   8 *   This program is distributed in the hope that it will be useful, but
   9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
  10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11 *   NON INFRINGEMENT.  See the GNU General Public License for
  12 *   more details.
  13 *
  14 * 64-bit SMP ticket spinlocks, allowing only a single CPU anywhere
  15 * (the type definitions are in asm/spinlock_types.h)
  16 */
  17
  18#ifndef _ASM_TILE_SPINLOCK_64_H
  19#define _ASM_TILE_SPINLOCK_64_H
  20
  21#include <linux/compiler.h>
  22
  23/* Shifts and masks for the various fields in "lock". */
  24#define __ARCH_SPIN_CURRENT_SHIFT       17
  25#define __ARCH_SPIN_NEXT_MASK           0x7fff
  26#define __ARCH_SPIN_NEXT_OVERFLOW       0x8000
  27
  28/*
  29 * Return the "current" portion of a ticket lock value,
  30 * i.e. the number that currently owns the lock.
  31 */
  32static inline u32 arch_spin_current(u32 val)
  33{
  34        return val >> __ARCH_SPIN_CURRENT_SHIFT;
  35}
  36
  37/*
  38 * Return the "next" portion of a ticket lock value,
  39 * i.e. the number that the next task to try to acquire the lock will get.
  40 */
  41static inline u32 arch_spin_next(u32 val)
  42{
  43        return val & __ARCH_SPIN_NEXT_MASK;
  44}
  45
  46/* The lock is locked if a task would have to wait to get it. */
  47static inline int arch_spin_is_locked(arch_spinlock_t *lock)
  48{
  49        /* Use READ_ONCE() to ensure that calling this in a loop is OK. */
  50        u32 val = READ_ONCE(lock->lock);
  51        return arch_spin_current(val) != arch_spin_next(val);
  52}
  53
  54/* Bump the current ticket so the next task owns the lock. */
  55static inline void arch_spin_unlock(arch_spinlock_t *lock)
  56{
  57        wmb();  /* guarantee anything modified under the lock is visible */
  58        __insn_fetchadd4(&lock->lock, 1U << __ARCH_SPIN_CURRENT_SHIFT);
  59}
  60
  61void arch_spin_lock_slow(arch_spinlock_t *lock, u32 val);
  62
  63/* Grab the "next" ticket number and bump it atomically.
  64 * If the current ticket is not ours, go to the slow path.
  65 * We also take the slow path if the "next" value overflows.
  66 */
  67static inline void arch_spin_lock(arch_spinlock_t *lock)
  68{
  69        u32 val = __insn_fetchadd4(&lock->lock, 1);
  70        u32 ticket = val & (__ARCH_SPIN_NEXT_MASK | __ARCH_SPIN_NEXT_OVERFLOW);
  71        if (unlikely(arch_spin_current(val) != ticket))
  72                arch_spin_lock_slow(lock, ticket);
  73}
  74
  75/* Try to get the lock, and return whether we succeeded. */
  76int arch_spin_trylock(arch_spinlock_t *lock);
  77
  78/* We cannot take an interrupt after getting a ticket, so don't enable them. */
  79#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
  80
  81/*
  82 * Read-write spinlocks, allowing multiple readers
  83 * but only one writer.
  84 *
  85 * We use fetchadd() for readers, and fetchor() with the sign bit
  86 * for writers.
  87 */
  88
  89#define __WRITE_LOCK_BIT (1 << 31)
  90
  91static inline int arch_write_val_locked(int val)
  92{
  93        return val < 0;  /* Optimize "val & __WRITE_LOCK_BIT". */
  94}
  95
  96/**
  97 * read_can_lock - would read_trylock() succeed?
  98 * @lock: the rwlock in question.
  99 */
 100static inline int arch_read_can_lock(arch_rwlock_t *rw)
 101{
 102        return !arch_write_val_locked(rw->lock);
 103}
 104
 105/**
 106 * write_can_lock - would write_trylock() succeed?
 107 * @lock: the rwlock in question.
 108 */
 109static inline int arch_write_can_lock(arch_rwlock_t *rw)
 110{
 111        return rw->lock == 0;
 112}
 113
 114extern void __read_lock_failed(arch_rwlock_t *rw);
 115
 116static inline void arch_read_lock(arch_rwlock_t *rw)
 117{
 118        u32 val = __insn_fetchaddgez4(&rw->lock, 1);
 119        if (unlikely(arch_write_val_locked(val)))
 120                __read_lock_failed(rw);
 121}
 122
 123extern void __write_lock_failed(arch_rwlock_t *rw, u32 val);
 124
 125static inline void arch_write_lock(arch_rwlock_t *rw)
 126{
 127        u32 val = __insn_fetchor4(&rw->lock, __WRITE_LOCK_BIT);
 128        if (unlikely(val != 0))
 129                __write_lock_failed(rw, val);
 130}
 131
 132static inline void arch_read_unlock(arch_rwlock_t *rw)
 133{
 134        __insn_mf();
 135        __insn_fetchadd4(&rw->lock, -1);
 136}
 137
 138static inline void arch_write_unlock(arch_rwlock_t *rw)
 139{
 140        __insn_mf();
 141        __insn_exch4(&rw->lock, 0);  /* Avoid waiting in the write buffer. */
 142}
 143
 144static inline int arch_read_trylock(arch_rwlock_t *rw)
 145{
 146        return !arch_write_val_locked(__insn_fetchaddgez4(&rw->lock, 1));
 147}
 148
 149static inline int arch_write_trylock(arch_rwlock_t *rw)
 150{
 151        u32 val = __insn_fetchor4(&rw->lock, __WRITE_LOCK_BIT);
 152        if (likely(val == 0))
 153                return 1;
 154        if (!arch_write_val_locked(val))
 155                __insn_fetchand4(&rw->lock, ~__WRITE_LOCK_BIT);
 156        return 0;
 157}
 158
 159#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
 160#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
 161
 162#endif /* _ASM_TILE_SPINLOCK_64_H */
 163