linux/arch/arc/include/asm/spinlock.h
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 */
   8
   9#ifndef __ASM_SPINLOCK_H
  10#define __ASM_SPINLOCK_H
  11
  12#include <asm/spinlock_types.h>
  13#include <asm/processor.h>
  14#include <asm/barrier.h>
  15
  16#define arch_spin_is_locked(x)  ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
  17#define arch_spin_lock_flags(lock, flags)       arch_spin_lock(lock)
  18
  19#ifdef CONFIG_ARC_HAS_LLSC
  20
  21static inline void arch_spin_lock(arch_spinlock_t *lock)
  22{
  23        unsigned int val;
  24
  25        smp_mb();
  26
  27        __asm__ __volatile__(
  28        "1:     llock   %[val], [%[slock]]      \n"
  29        "       breq    %[val], %[LOCKED], 1b   \n"     /* spin while LOCKED */
  30        "       scond   %[LOCKED], [%[slock]]   \n"     /* acquire */
  31        "       bnz     1b                      \n"
  32        "                                       \n"
  33        : [val]         "=&r"   (val)
  34        : [slock]       "r"     (&(lock->slock)),
  35          [LOCKED]      "r"     (__ARCH_SPIN_LOCK_LOCKED__)
  36        : "memory", "cc");
  37
  38        smp_mb();
  39}
  40
  41/* 1 - lock taken successfully */
  42static inline int arch_spin_trylock(arch_spinlock_t *lock)
  43{
  44        unsigned int val, got_it = 0;
  45
  46        smp_mb();
  47
  48        __asm__ __volatile__(
  49        "1:     llock   %[val], [%[slock]]      \n"
  50        "       breq    %[val], %[LOCKED], 4f   \n"     /* already LOCKED, just bail */
  51        "       scond   %[LOCKED], [%[slock]]   \n"     /* acquire */
  52        "       bnz     1b                      \n"
  53        "       mov     %[got_it], 1            \n"
  54        "4:                                     \n"
  55        "                                       \n"
  56        : [val]         "=&r"   (val),
  57          [got_it]      "+&r"   (got_it)
  58        : [slock]       "r"     (&(lock->slock)),
  59          [LOCKED]      "r"     (__ARCH_SPIN_LOCK_LOCKED__)
  60        : "memory", "cc");
  61
  62        smp_mb();
  63
  64        return got_it;
  65}
  66
  67static inline void arch_spin_unlock(arch_spinlock_t *lock)
  68{
  69        smp_mb();
  70
  71        lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
  72
  73        smp_mb();
  74}
  75
  76/*
  77 * Read-write spinlocks, allowing multiple readers but only one writer.
  78 * Unfair locking as Writers could be starved indefinitely by Reader(s)
  79 */
  80
  81static inline void arch_read_lock(arch_rwlock_t *rw)
  82{
  83        unsigned int val;
  84
  85        smp_mb();
  86
  87        /*
  88         * zero means writer holds the lock exclusively, deny Reader.
  89         * Otherwise grant lock to first/subseq reader
  90         *
  91         *      if (rw->counter > 0) {
  92         *              rw->counter--;
  93         *              ret = 1;
  94         *      }
  95         */
  96
  97        __asm__ __volatile__(
  98        "1:     llock   %[val], [%[rwlock]]     \n"
  99        "       brls    %[val], %[WR_LOCKED], 1b\n"     /* <= 0: spin while write locked */
 100        "       sub     %[val], %[val], 1       \n"     /* reader lock */
 101        "       scond   %[val], [%[rwlock]]     \n"
 102        "       bnz     1b                      \n"
 103        "                                       \n"
 104        : [val]         "=&r"   (val)
 105        : [rwlock]      "r"     (&(rw->counter)),
 106          [WR_LOCKED]   "ir"    (0)
 107        : "memory", "cc");
 108
 109        smp_mb();
 110}
 111
 112/* 1 - lock taken successfully */
 113static inline int arch_read_trylock(arch_rwlock_t *rw)
 114{
 115        unsigned int val, got_it = 0;
 116
 117        smp_mb();
 118
 119        __asm__ __volatile__(
 120        "1:     llock   %[val], [%[rwlock]]     \n"
 121        "       brls    %[val], %[WR_LOCKED], 4f\n"     /* <= 0: already write locked, bail */
 122        "       sub     %[val], %[val], 1       \n"     /* counter-- */
 123        "       scond   %[val], [%[rwlock]]     \n"
 124        "       bnz     1b                      \n"     /* retry if collided with someone */
 125        "       mov     %[got_it], 1            \n"
 126        "                                       \n"
 127        "4: ; --- done ---                      \n"
 128
 129        : [val]         "=&r"   (val),
 130          [got_it]      "+&r"   (got_it)
 131        : [rwlock]      "r"     (&(rw->counter)),
 132          [WR_LOCKED]   "ir"    (0)
 133        : "memory", "cc");
 134
 135        smp_mb();
 136
 137        return got_it;
 138}
 139
 140static inline void arch_write_lock(arch_rwlock_t *rw)
 141{
 142        unsigned int val;
 143
 144        smp_mb();
 145
 146        /*
 147         * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
 148         * deny writer. Otherwise if unlocked grant to writer
 149         * Hence the claim that Linux rwlocks are unfair to writers.
 150         * (can be starved for an indefinite time by readers).
 151         *
 152         *      if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
 153         *              rw->counter = 0;
 154         *              ret = 1;
 155         *      }
 156         */
 157
 158        __asm__ __volatile__(
 159        "1:     llock   %[val], [%[rwlock]]     \n"
 160        "       brne    %[val], %[UNLOCKED], 1b \n"     /* while !UNLOCKED spin */
 161        "       mov     %[val], %[WR_LOCKED]    \n"
 162        "       scond   %[val], [%[rwlock]]     \n"
 163        "       bnz     1b                      \n"
 164        "                                       \n"
 165        : [val]         "=&r"   (val)
 166        : [rwlock]      "r"     (&(rw->counter)),
 167          [UNLOCKED]    "ir"    (__ARCH_RW_LOCK_UNLOCKED__),
 168          [WR_LOCKED]   "ir"    (0)
 169        : "memory", "cc");
 170
 171        smp_mb();
 172}
 173
 174/* 1 - lock taken successfully */
 175static inline int arch_write_trylock(arch_rwlock_t *rw)
 176{
 177        unsigned int val, got_it = 0;
 178
 179        smp_mb();
 180
 181        __asm__ __volatile__(
 182        "1:     llock   %[val], [%[rwlock]]     \n"
 183        "       brne    %[val], %[UNLOCKED], 4f \n"     /* !UNLOCKED, bail */
 184        "       mov     %[val], %[WR_LOCKED]    \n"
 185        "       scond   %[val], [%[rwlock]]     \n"
 186        "       bnz     1b                      \n"     /* retry if collided with someone */
 187        "       mov     %[got_it], 1            \n"
 188        "                                       \n"
 189        "4: ; --- done ---                      \n"
 190
 191        : [val]         "=&r"   (val),
 192          [got_it]      "+&r"   (got_it)
 193        : [rwlock]      "r"     (&(rw->counter)),
 194          [UNLOCKED]    "ir"    (__ARCH_RW_LOCK_UNLOCKED__),
 195          [WR_LOCKED]   "ir"    (0)
 196        : "memory", "cc");
 197
 198        smp_mb();
 199
 200        return got_it;
 201}
 202
 203static inline void arch_read_unlock(arch_rwlock_t *rw)
 204{
 205        unsigned int val;
 206
 207        smp_mb();
 208
 209        /*
 210         * rw->counter++;
 211         */
 212        __asm__ __volatile__(
 213        "1:     llock   %[val], [%[rwlock]]     \n"
 214        "       add     %[val], %[val], 1       \n"
 215        "       scond   %[val], [%[rwlock]]     \n"
 216        "       bnz     1b                      \n"
 217        "                                       \n"
 218        : [val]         "=&r"   (val)
 219        : [rwlock]      "r"     (&(rw->counter))
 220        : "memory", "cc");
 221
 222        smp_mb();
 223}
 224
 225static inline void arch_write_unlock(arch_rwlock_t *rw)
 226{
 227        smp_mb();
 228
 229        rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
 230
 231        smp_mb();
 232}
 233
 234#else   /* !CONFIG_ARC_HAS_LLSC */
 235
 236static inline void arch_spin_lock(arch_spinlock_t *lock)
 237{
 238        unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
 239
 240        /*
 241         * This smp_mb() is technically superfluous, we only need the one
 242         * after the lock for providing the ACQUIRE semantics.
 243         * However doing the "right" thing was regressing hackbench
 244         * so keeping this, pending further investigation
 245         */
 246        smp_mb();
 247
 248        __asm__ __volatile__(
 249        "1:     ex  %0, [%1]            \n"
 250#ifdef CONFIG_EZNPS_MTM_EXT
 251        "       .word %3                \n"
 252#endif
 253        "       breq  %0, %2, 1b        \n"
 254        : "+&r" (val)
 255        : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
 256#ifdef CONFIG_EZNPS_MTM_EXT
 257        , "i"(CTOP_INST_SCHD_RW)
 258#endif
 259        : "memory");
 260
 261        /*
 262         * ACQUIRE barrier to ensure load/store after taking the lock
 263         * don't "bleed-up" out of the critical section (leak-in is allowed)
 264         * http://www.spinics.net/lists/kernel/msg2010409.html
 265         *
 266         * ARCv2 only has load-load, store-store and all-all barrier
 267         * thus need the full all-all barrier
 268         */
 269        smp_mb();
 270}
 271
 272/* 1 - lock taken successfully */
 273static inline int arch_spin_trylock(arch_spinlock_t *lock)
 274{
 275        unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
 276
 277        smp_mb();
 278
 279        __asm__ __volatile__(
 280        "1:     ex  %0, [%1]            \n"
 281        : "+r" (val)
 282        : "r"(&(lock->slock))
 283        : "memory");
 284
 285        smp_mb();
 286
 287        return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
 288}
 289
 290static inline void arch_spin_unlock(arch_spinlock_t *lock)
 291{
 292        unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
 293
 294        /*
 295         * RELEASE barrier: given the instructions avail on ARCv2, full barrier
 296         * is the only option
 297         */
 298        smp_mb();
 299
 300        /*
 301         * EX is not really required here, a simple STore of 0 suffices.
 302         * However this causes tasklist livelocks in SystemC based SMP virtual
 303         * platforms where the systemc core scheduler uses EX as a cue for
 304         * moving to next core. Do a git log of this file for details
 305         */
 306        __asm__ __volatile__(
 307        "       ex  %0, [%1]            \n"
 308        : "+r" (val)
 309        : "r"(&(lock->slock))
 310        : "memory");
 311
 312        /*
 313         * superfluous, but keeping for now - see pairing version in
 314         * arch_spin_lock above
 315         */
 316        smp_mb();
 317}
 318
 319/*
 320 * Read-write spinlocks, allowing multiple readers but only one writer.
 321 * Unfair locking as Writers could be starved indefinitely by Reader(s)
 322 *
 323 * The spinlock itself is contained in @counter and access to it is
 324 * serialized with @lock_mutex.
 325 */
 326
 327/* 1 - lock taken successfully */
 328static inline int arch_read_trylock(arch_rwlock_t *rw)
 329{
 330        int ret = 0;
 331        unsigned long flags;
 332
 333        local_irq_save(flags);
 334        arch_spin_lock(&(rw->lock_mutex));
 335
 336        /*
 337         * zero means writer holds the lock exclusively, deny Reader.
 338         * Otherwise grant lock to first/subseq reader
 339         */
 340        if (rw->counter > 0) {
 341                rw->counter--;
 342                ret = 1;
 343        }
 344
 345        arch_spin_unlock(&(rw->lock_mutex));
 346        local_irq_restore(flags);
 347
 348        smp_mb();
 349        return ret;
 350}
 351
 352/* 1 - lock taken successfully */
 353static inline int arch_write_trylock(arch_rwlock_t *rw)
 354{
 355        int ret = 0;
 356        unsigned long flags;
 357
 358        local_irq_save(flags);
 359        arch_spin_lock(&(rw->lock_mutex));
 360
 361        /*
 362         * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
 363         * deny writer. Otherwise if unlocked grant to writer
 364         * Hence the claim that Linux rwlocks are unfair to writers.
 365         * (can be starved for an indefinite time by readers).
 366         */
 367        if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
 368                rw->counter = 0;
 369                ret = 1;
 370        }
 371        arch_spin_unlock(&(rw->lock_mutex));
 372        local_irq_restore(flags);
 373
 374        return ret;
 375}
 376
 377static inline void arch_read_lock(arch_rwlock_t *rw)
 378{
 379        while (!arch_read_trylock(rw))
 380                cpu_relax();
 381}
 382
 383static inline void arch_write_lock(arch_rwlock_t *rw)
 384{
 385        while (!arch_write_trylock(rw))
 386                cpu_relax();
 387}
 388
 389static inline void arch_read_unlock(arch_rwlock_t *rw)
 390{
 391        unsigned long flags;
 392
 393        local_irq_save(flags);
 394        arch_spin_lock(&(rw->lock_mutex));
 395        rw->counter++;
 396        arch_spin_unlock(&(rw->lock_mutex));
 397        local_irq_restore(flags);
 398}
 399
 400static inline void arch_write_unlock(arch_rwlock_t *rw)
 401{
 402        unsigned long flags;
 403
 404        local_irq_save(flags);
 405        arch_spin_lock(&(rw->lock_mutex));
 406        rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
 407        arch_spin_unlock(&(rw->lock_mutex));
 408        local_irq_restore(flags);
 409}
 410
 411#endif
 412
 413#define arch_read_can_lock(x)   ((x)->counter > 0)
 414#define arch_write_can_lock(x)  ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
 415
 416#define arch_read_lock_flags(lock, flags)       arch_read_lock(lock)
 417#define arch_write_lock_flags(lock, flags)      arch_write_lock(lock)
 418
 419#define arch_spin_relax(lock)   cpu_relax()
 420#define arch_read_relax(lock)   cpu_relax()
 421#define arch_write_relax(lock)  cpu_relax()
 422
 423#endif /* __ASM_SPINLOCK_H */
 424