linux/arch/tile/include/asm/spinlock_32.h
<<
>>
Prefs
   1/*
   2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
   3 *
   4 *   This program is free software; you can redistribute it and/or
   5 *   modify it under the terms of the GNU General Public License
   6 *   as published by the Free Software Foundation, version 2.
   7 *
   8 *   This program is distributed in the hope that it will be useful, but
   9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
  10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11 *   NON INFRINGEMENT.  See the GNU General Public License for
  12 *   more details.
  13 *
  14 * 32-bit SMP spinlocks.
  15 */
  16
  17#ifndef _ASM_TILE_SPINLOCK_32_H
  18#define _ASM_TILE_SPINLOCK_32_H
  19
  20#include <asm/atomic.h>
  21#include <asm/page.h>
  22#include <asm/system.h>
  23#include <linux/compiler.h>
  24
  25/*
  26 * We only use even ticket numbers so the '1' inserted by a tns is
  27 * an unambiguous "ticket is busy" flag.
  28 */
  29#define TICKET_QUANTUM 2
  30
  31
  32/*
  33 * SMP ticket spinlocks, allowing only a single CPU anywhere
  34 *
  35 * (the type definitions are in asm/spinlock_types.h)
  36 */
  37static inline int arch_spin_is_locked(arch_spinlock_t *lock)
  38{
  39        /*
  40         * Note that even if a new ticket is in the process of being
  41         * acquired, so lock->next_ticket is 1, it's still reasonable
  42         * to claim the lock is held, since it will be momentarily
  43         * if not already.  There's no need to wait for a "valid"
  44         * lock->next_ticket to become available.
  45         */
  46        return lock->next_ticket != lock->current_ticket;
  47}
  48
  49void arch_spin_lock(arch_spinlock_t *lock);
  50
  51/* We cannot take an interrupt after getting a ticket, so don't enable them. */
  52#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
  53
  54int arch_spin_trylock(arch_spinlock_t *lock);
  55
  56static inline void arch_spin_unlock(arch_spinlock_t *lock)
  57{
  58        /* For efficiency, overlap fetching the old ticket with the wmb(). */
  59        int old_ticket = lock->current_ticket;
  60        wmb();  /* guarantee anything modified under the lock is visible */
  61        lock->current_ticket = old_ticket + TICKET_QUANTUM;
  62}
  63
  64void arch_spin_unlock_wait(arch_spinlock_t *lock);
  65
  66/*
  67 * Read-write spinlocks, allowing multiple readers
  68 * but only one writer.
  69 *
  70 * We use a "tns/store-back" technique on a single word to manage
  71 * the lock state, looping around to retry if the tns returns 1.
  72 */
  73
  74/* Internal layout of the word; do not use. */
  75#define _WR_NEXT_SHIFT  8
  76#define _WR_CURR_SHIFT  16
  77#define _WR_WIDTH       8
  78#define _RD_COUNT_SHIFT 24
  79#define _RD_COUNT_WIDTH 8
  80
  81/* Internal functions; do not use. */
  82void arch_read_lock_slow(arch_rwlock_t *, u32);
  83int arch_read_trylock_slow(arch_rwlock_t *);
  84void arch_read_unlock_slow(arch_rwlock_t *);
  85void arch_write_lock_slow(arch_rwlock_t *, u32);
  86void arch_write_unlock_slow(arch_rwlock_t *, u32);
  87
  88/**
  89 * arch_read_can_lock() - would read_trylock() succeed?
  90 */
  91static inline int arch_read_can_lock(arch_rwlock_t *rwlock)
  92{
  93        return (rwlock->lock << _RD_COUNT_WIDTH) == 0;
  94}
  95
  96/**
  97 * arch_write_can_lock() - would write_trylock() succeed?
  98 */
  99static inline int arch_write_can_lock(arch_rwlock_t *rwlock)
 100{
 101        return rwlock->lock == 0;
 102}
 103
 104/**
 105 * arch_read_lock() - acquire a read lock.
 106 */
 107static inline void arch_read_lock(arch_rwlock_t *rwlock)
 108{
 109        u32 val = __insn_tns((int *)&rwlock->lock);
 110        if (unlikely(val << _RD_COUNT_WIDTH)) {
 111                arch_read_lock_slow(rwlock, val);
 112                return;
 113        }
 114        rwlock->lock = val + (1 << _RD_COUNT_SHIFT);
 115}
 116
 117/**
 118 * arch_read_lock() - acquire a write lock.
 119 */
 120static inline void arch_write_lock(arch_rwlock_t *rwlock)
 121{
 122        u32 val = __insn_tns((int *)&rwlock->lock);
 123        if (unlikely(val != 0)) {
 124                arch_write_lock_slow(rwlock, val);
 125                return;
 126        }
 127        rwlock->lock = 1 << _WR_NEXT_SHIFT;
 128}
 129
 130/**
 131 * arch_read_trylock() - try to acquire a read lock.
 132 */
 133static inline int arch_read_trylock(arch_rwlock_t *rwlock)
 134{
 135        int locked;
 136        u32 val = __insn_tns((int *)&rwlock->lock);
 137        if (unlikely(val & 1))
 138                return arch_read_trylock_slow(rwlock);
 139        locked = (val << _RD_COUNT_WIDTH) == 0;
 140        rwlock->lock = val + (locked << _RD_COUNT_SHIFT);
 141        return locked;
 142}
 143
 144/**
 145 * arch_write_trylock() - try to acquire a write lock.
 146 */
 147static inline int arch_write_trylock(arch_rwlock_t *rwlock)
 148{
 149        u32 val = __insn_tns((int *)&rwlock->lock);
 150
 151        /*
 152         * If a tns is in progress, or there's a waiting or active locker,
 153         * or active readers, we can't take the lock, so give up.
 154         */
 155        if (unlikely(val != 0)) {
 156                if (!(val & 1))
 157                        rwlock->lock = val;
 158                return 0;
 159        }
 160
 161        /* Set the "next" field to mark it locked. */
 162        rwlock->lock = 1 << _WR_NEXT_SHIFT;
 163        return 1;
 164}
 165
 166/**
 167 * arch_read_unlock() - release a read lock.
 168 */
 169static inline void arch_read_unlock(arch_rwlock_t *rwlock)
 170{
 171        u32 val;
 172        mb();  /* guarantee anything modified under the lock is visible */
 173        val = __insn_tns((int *)&rwlock->lock);
 174        if (unlikely(val & 1)) {
 175                arch_read_unlock_slow(rwlock);
 176                return;
 177        }
 178        rwlock->lock = val - (1 << _RD_COUNT_SHIFT);
 179}
 180
 181/**
 182 * arch_write_unlock() - release a write lock.
 183 */
 184static inline void arch_write_unlock(arch_rwlock_t *rwlock)
 185{
 186        u32 val;
 187        mb();  /* guarantee anything modified under the lock is visible */
 188        val = __insn_tns((int *)&rwlock->lock);
 189        if (unlikely(val != (1 << _WR_NEXT_SHIFT))) {
 190                arch_write_unlock_slow(rwlock, val);
 191                return;
 192        }
 193        rwlock->lock = 0;
 194}
 195
 196#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
 197#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
 198
 199#endif /* _ASM_TILE_SPINLOCK_32_H */
 200