linux/arch/tile/lib/spinlock_32.c
<<
>>
Prefs
   1/*
   2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
   3 *
   4 *   This program is free software; you can redistribute it and/or
   5 *   modify it under the terms of the GNU General Public License
   6 *   as published by the Free Software Foundation, version 2.
   7 *
   8 *   This program is distributed in the hope that it will be useful, but
   9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
  10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11 *   NON INFRINGEMENT.  See the GNU General Public License for
  12 *   more details.
  13 */
  14
  15#include <linux/spinlock.h>
  16#include <linux/module.h>
  17#include <asm/processor.h>
  18
  19#include "spinlock_common.h"
  20
  21void arch_spin_lock(arch_spinlock_t *lock)
  22{
  23        int my_ticket;
  24        int iterations = 0;
  25        int delta;
  26
  27        while ((my_ticket = __insn_tns((void *)&lock->next_ticket)) & 1)
  28                delay_backoff(iterations++);
  29
  30        /* Increment the next ticket number, implicitly releasing tns lock. */
  31        lock->next_ticket = my_ticket + TICKET_QUANTUM;
  32
  33        /* Wait until it's our turn. */
  34        while ((delta = my_ticket - lock->current_ticket) != 0)
  35                relax((128 / CYCLES_PER_RELAX_LOOP) * delta);
  36}
  37EXPORT_SYMBOL(arch_spin_lock);
  38
  39int arch_spin_trylock(arch_spinlock_t *lock)
  40{
  41        /*
  42         * Grab a ticket; no need to retry if it's busy, we'll just
  43         * treat that the same as "locked", since someone else
  44         * will lock it momentarily anyway.
  45         */
  46        int my_ticket = __insn_tns((void *)&lock->next_ticket);
  47
  48        if (my_ticket == lock->current_ticket) {
  49                /* Not currently locked, so lock it by keeping this ticket. */
  50                lock->next_ticket = my_ticket + TICKET_QUANTUM;
  51                /* Success! */
  52                return 1;
  53        }
  54
  55        if (!(my_ticket & 1)) {
  56                /* Release next_ticket. */
  57                lock->next_ticket = my_ticket;
  58        }
  59
  60        return 0;
  61}
  62EXPORT_SYMBOL(arch_spin_trylock);
  63
  64void arch_spin_unlock_wait(arch_spinlock_t *lock)
  65{
  66        u32 iterations = 0;
  67        while (arch_spin_is_locked(lock))
  68                delay_backoff(iterations++);
  69}
  70EXPORT_SYMBOL(arch_spin_unlock_wait);
  71
  72/*
  73 * The low byte is always reserved to be the marker for a "tns" operation
  74 * since the low bit is set to "1" by a tns.  The next seven bits are
  75 * zeroes.  The next byte holds the "next" writer value, i.e. the ticket
  76 * available for the next task that wants to write.  The third byte holds
  77 * the current writer value, i.e. the writer who holds the current ticket.
  78 * If current == next == 0, there are no interested writers.
  79 */
  80#define WR_NEXT_SHIFT   _WR_NEXT_SHIFT
  81#define WR_CURR_SHIFT   _WR_CURR_SHIFT
  82#define WR_WIDTH        _WR_WIDTH
  83#define WR_MASK         ((1 << WR_WIDTH) - 1)
  84
  85/*
  86 * The last eight bits hold the active reader count.  This has to be
  87 * zero before a writer can start to write.
  88 */
  89#define RD_COUNT_SHIFT  _RD_COUNT_SHIFT
  90#define RD_COUNT_WIDTH  _RD_COUNT_WIDTH
  91#define RD_COUNT_MASK   ((1 << RD_COUNT_WIDTH) - 1)
  92
  93
  94/* Lock the word, spinning until there are no tns-ers. */
  95static inline u32 get_rwlock(arch_rwlock_t *rwlock)
  96{
  97        u32 iterations = 0;
  98        for (;;) {
  99                u32 val = __insn_tns((int *)&rwlock->lock);
 100                if (unlikely(val & 1)) {
 101                        delay_backoff(iterations++);
 102                        continue;
 103                }
 104                return val;
 105        }
 106}
 107
 108int arch_read_trylock_slow(arch_rwlock_t *rwlock)
 109{
 110        u32 val = get_rwlock(rwlock);
 111        int locked = (val << RD_COUNT_WIDTH) == 0;
 112        rwlock->lock = val + (locked << RD_COUNT_SHIFT);
 113        return locked;
 114}
 115EXPORT_SYMBOL(arch_read_trylock_slow);
 116
 117void arch_read_unlock_slow(arch_rwlock_t *rwlock)
 118{
 119        u32 val = get_rwlock(rwlock);
 120        rwlock->lock = val - (1 << RD_COUNT_SHIFT);
 121}
 122EXPORT_SYMBOL(arch_read_unlock_slow);
 123
 124void arch_write_unlock_slow(arch_rwlock_t *rwlock, u32 val)
 125{
 126        u32 eq, mask = 1 << WR_CURR_SHIFT;
 127        while (unlikely(val & 1)) {
 128                /* Limited backoff since we are the highest-priority task. */
 129                relax(4);
 130                val = __insn_tns((int *)&rwlock->lock);
 131        }
 132        val = __insn_addb(val, mask);
 133        eq = __insn_seqb(val, val << (WR_CURR_SHIFT - WR_NEXT_SHIFT));
 134        val = __insn_mz(eq & mask, val);
 135        rwlock->lock = val;
 136}
 137EXPORT_SYMBOL(arch_write_unlock_slow);
 138
 139/*
 140 * We spin until everything but the reader bits (which are in the high
 141 * part of the word) are zero, i.e. no active or waiting writers, no tns.
 142 *
 143 * ISSUE: This approach can permanently starve readers.  A reader who sees
 144 * a writer could instead take a ticket lock (just like a writer would),
 145 * and atomically enter read mode (with 1 reader) when it gets the ticket.
 146 * This way both readers and writers will always make forward progress
 147 * in a finite time.
 148 */
 149void arch_read_lock_slow(arch_rwlock_t *rwlock, u32 val)
 150{
 151        u32 iterations = 0;
 152        do {
 153                if (!(val & 1))
 154                        rwlock->lock = val;
 155                delay_backoff(iterations++);
 156                val = __insn_tns((int *)&rwlock->lock);
 157        } while ((val << RD_COUNT_WIDTH) != 0);
 158        rwlock->lock = val + (1 << RD_COUNT_SHIFT);
 159}
 160EXPORT_SYMBOL(arch_read_lock_slow);
 161
 162void arch_write_lock_slow(arch_rwlock_t *rwlock, u32 val)
 163{
 164        /*
 165         * The trailing underscore on this variable (and curr_ below)
 166         * reminds us that the high bits are garbage; we mask them out
 167         * when we compare them.
 168         */
 169        u32 my_ticket_;
 170        u32 iterations = 0;
 171
 172        /*
 173         * Wait until there are no readers, then bump up the next
 174         * field and capture the ticket value.
 175         */
 176        for (;;) {
 177                if (!(val & 1)) {
 178                        if ((val >> RD_COUNT_SHIFT) == 0)
 179                                break;
 180                        rwlock->lock = val;
 181                }
 182                delay_backoff(iterations++);
 183                val = __insn_tns((int *)&rwlock->lock);
 184        }
 185
 186        /* Take out the next ticket and extract my ticket value. */
 187        rwlock->lock = __insn_addb(val, 1 << WR_NEXT_SHIFT);
 188        my_ticket_ = val >> WR_NEXT_SHIFT;
 189
 190        /* Wait until the "current" field matches our ticket. */
 191        for (;;) {
 192                u32 curr_ = val >> WR_CURR_SHIFT;
 193                u32 delta = ((my_ticket_ - curr_) & WR_MASK);
 194                if (likely(delta == 0))
 195                        break;
 196
 197                /* Delay based on how many lock-holders are still out there. */
 198                relax((256 / CYCLES_PER_RELAX_LOOP) * delta);
 199
 200                /*
 201                 * Get a non-tns value to check; we don't need to tns
 202                 * it ourselves.  Since we're not tns'ing, we retry
 203                 * more rapidly to get a valid value.
 204                 */
 205                while ((val = rwlock->lock) & 1)
 206                        relax(4);
 207        }
 208}
 209EXPORT_SYMBOL(arch_write_lock_slow);
 210
 211int __tns_atomic_acquire(atomic_t *lock)
 212{
 213        int ret;
 214        u32 iterations = 0;
 215
 216        BUG_ON(__insn_mfspr(SPR_INTERRUPT_CRITICAL_SECTION));
 217        __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 1);
 218
 219        while ((ret = __insn_tns((void *)&lock->counter)) == 1)
 220                delay_backoff(iterations++);
 221        return ret;
 222}
 223
 224void __tns_atomic_release(atomic_t *p, int v)
 225{
 226        p->counter = v;
 227        __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0);
 228}
 229