linux/arch/arc/include/asm/spinlock.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
   4 */
   5
   6#ifndef __ASM_SPINLOCK_H
   7#define __ASM_SPINLOCK_H
   8
   9#include <asm/spinlock_types.h>
  10#include <asm/processor.h>
  11#include <asm/barrier.h>
  12
  13#define arch_spin_is_locked(x)  ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
  14
  15#ifdef CONFIG_ARC_HAS_LLSC
  16
  17static inline void arch_spin_lock(arch_spinlock_t *lock)
  18{
  19        unsigned int val;
  20
  21        __asm__ __volatile__(
  22        "1:     llock   %[val], [%[slock]]      \n"
  23        "       breq    %[val], %[LOCKED], 1b   \n"     /* spin while LOCKED */
  24        "       scond   %[LOCKED], [%[slock]]   \n"     /* acquire */
  25        "       bnz     1b                      \n"
  26        "                                       \n"
  27        : [val]         "=&r"   (val)
  28        : [slock]       "r"     (&(lock->slock)),
  29          [LOCKED]      "r"     (__ARCH_SPIN_LOCK_LOCKED__)
  30        : "memory", "cc");
  31
  32        /*
  33         * ACQUIRE barrier to ensure load/store after taking the lock
  34         * don't "bleed-up" out of the critical section (leak-in is allowed)
  35         * http://www.spinics.net/lists/kernel/msg2010409.html
  36         *
  37         * ARCv2 only has load-load, store-store and all-all barrier
  38         * thus need the full all-all barrier
  39         */
  40        smp_mb();
  41}
  42
  43/* 1 - lock taken successfully */
  44static inline int arch_spin_trylock(arch_spinlock_t *lock)
  45{
  46        unsigned int val, got_it = 0;
  47
  48        __asm__ __volatile__(
  49        "1:     llock   %[val], [%[slock]]      \n"
  50        "       breq    %[val], %[LOCKED], 4f   \n"     /* already LOCKED, just bail */
  51        "       scond   %[LOCKED], [%[slock]]   \n"     /* acquire */
  52        "       bnz     1b                      \n"
  53        "       mov     %[got_it], 1            \n"
  54        "4:                                     \n"
  55        "                                       \n"
  56        : [val]         "=&r"   (val),
  57          [got_it]      "+&r"   (got_it)
  58        : [slock]       "r"     (&(lock->slock)),
  59          [LOCKED]      "r"     (__ARCH_SPIN_LOCK_LOCKED__)
  60        : "memory", "cc");
  61
  62        smp_mb();
  63
  64        return got_it;
  65}
  66
  67static inline void arch_spin_unlock(arch_spinlock_t *lock)
  68{
  69        smp_mb();
  70
  71        WRITE_ONCE(lock->slock, __ARCH_SPIN_LOCK_UNLOCKED__);
  72}
  73
  74/*
  75 * Read-write spinlocks, allowing multiple readers but only one writer.
  76 * Unfair locking as Writers could be starved indefinitely by Reader(s)
  77 */
  78
  79static inline void arch_read_lock(arch_rwlock_t *rw)
  80{
  81        unsigned int val;
  82
  83        /*
  84         * zero means writer holds the lock exclusively, deny Reader.
  85         * Otherwise grant lock to first/subseq reader
  86         *
  87         *      if (rw->counter > 0) {
  88         *              rw->counter--;
  89         *              ret = 1;
  90         *      }
  91         */
  92
  93        __asm__ __volatile__(
  94        "1:     llock   %[val], [%[rwlock]]     \n"
  95        "       brls    %[val], %[WR_LOCKED], 1b\n"     /* <= 0: spin while write locked */
  96        "       sub     %[val], %[val], 1       \n"     /* reader lock */
  97        "       scond   %[val], [%[rwlock]]     \n"
  98        "       bnz     1b                      \n"
  99        "                                       \n"
 100        : [val]         "=&r"   (val)
 101        : [rwlock]      "r"     (&(rw->counter)),
 102          [WR_LOCKED]   "ir"    (0)
 103        : "memory", "cc");
 104
 105        smp_mb();
 106}
 107
 108/* 1 - lock taken successfully */
 109static inline int arch_read_trylock(arch_rwlock_t *rw)
 110{
 111        unsigned int val, got_it = 0;
 112
 113        __asm__ __volatile__(
 114        "1:     llock   %[val], [%[rwlock]]     \n"
 115        "       brls    %[val], %[WR_LOCKED], 4f\n"     /* <= 0: already write locked, bail */
 116        "       sub     %[val], %[val], 1       \n"     /* counter-- */
 117        "       scond   %[val], [%[rwlock]]     \n"
 118        "       bnz     1b                      \n"     /* retry if collided with someone */
 119        "       mov     %[got_it], 1            \n"
 120        "                                       \n"
 121        "4: ; --- done ---                      \n"
 122
 123        : [val]         "=&r"   (val),
 124          [got_it]      "+&r"   (got_it)
 125        : [rwlock]      "r"     (&(rw->counter)),
 126          [WR_LOCKED]   "ir"    (0)
 127        : "memory", "cc");
 128
 129        smp_mb();
 130
 131        return got_it;
 132}
 133
 134static inline void arch_write_lock(arch_rwlock_t *rw)
 135{
 136        unsigned int val;
 137
 138        /*
 139         * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
 140         * deny writer. Otherwise if unlocked grant to writer
 141         * Hence the claim that Linux rwlocks are unfair to writers.
 142         * (can be starved for an indefinite time by readers).
 143         *
 144         *      if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
 145         *              rw->counter = 0;
 146         *              ret = 1;
 147         *      }
 148         */
 149
 150        __asm__ __volatile__(
 151        "1:     llock   %[val], [%[rwlock]]     \n"
 152        "       brne    %[val], %[UNLOCKED], 1b \n"     /* while !UNLOCKED spin */
 153        "       mov     %[val], %[WR_LOCKED]    \n"
 154        "       scond   %[val], [%[rwlock]]     \n"
 155        "       bnz     1b                      \n"
 156        "                                       \n"
 157        : [val]         "=&r"   (val)
 158        : [rwlock]      "r"     (&(rw->counter)),
 159          [UNLOCKED]    "ir"    (__ARCH_RW_LOCK_UNLOCKED__),
 160          [WR_LOCKED]   "ir"    (0)
 161        : "memory", "cc");
 162
 163        smp_mb();
 164}
 165
 166/* 1 - lock taken successfully */
 167static inline int arch_write_trylock(arch_rwlock_t *rw)
 168{
 169        unsigned int val, got_it = 0;
 170
 171        __asm__ __volatile__(
 172        "1:     llock   %[val], [%[rwlock]]     \n"
 173        "       brne    %[val], %[UNLOCKED], 4f \n"     /* !UNLOCKED, bail */
 174        "       mov     %[val], %[WR_LOCKED]    \n"
 175        "       scond   %[val], [%[rwlock]]     \n"
 176        "       bnz     1b                      \n"     /* retry if collided with someone */
 177        "       mov     %[got_it], 1            \n"
 178        "                                       \n"
 179        "4: ; --- done ---                      \n"
 180
 181        : [val]         "=&r"   (val),
 182          [got_it]      "+&r"   (got_it)
 183        : [rwlock]      "r"     (&(rw->counter)),
 184          [UNLOCKED]    "ir"    (__ARCH_RW_LOCK_UNLOCKED__),
 185          [WR_LOCKED]   "ir"    (0)
 186        : "memory", "cc");
 187
 188        smp_mb();
 189
 190        return got_it;
 191}
 192
 193static inline void arch_read_unlock(arch_rwlock_t *rw)
 194{
 195        unsigned int val;
 196
 197        smp_mb();
 198
 199        /*
 200         * rw->counter++;
 201         */
 202        __asm__ __volatile__(
 203        "1:     llock   %[val], [%[rwlock]]     \n"
 204        "       add     %[val], %[val], 1       \n"
 205        "       scond   %[val], [%[rwlock]]     \n"
 206        "       bnz     1b                      \n"
 207        "                                       \n"
 208        : [val]         "=&r"   (val)
 209        : [rwlock]      "r"     (&(rw->counter))
 210        : "memory", "cc");
 211}
 212
 213static inline void arch_write_unlock(arch_rwlock_t *rw)
 214{
 215        smp_mb();
 216
 217        WRITE_ONCE(rw->counter, __ARCH_RW_LOCK_UNLOCKED__);
 218}
 219
 220#else   /* !CONFIG_ARC_HAS_LLSC */
 221
 222static inline void arch_spin_lock(arch_spinlock_t *lock)
 223{
 224        unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
 225
 226        /*
 227         * Per lkmm, smp_mb() is only required after _lock (and before_unlock)
 228         * for ACQ and REL semantics respectively. However EX based spinlocks
 229         * need the extra smp_mb to workaround a hardware quirk.
 230         */
 231        smp_mb();
 232
 233        __asm__ __volatile__(
 234        "1:     ex  %0, [%1]            \n"
 235        "       breq  %0, %2, 1b        \n"
 236        : "+&r" (val)
 237        : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
 238        : "memory");
 239
 240        smp_mb();
 241}
 242
 243/* 1 - lock taken successfully */
 244static inline int arch_spin_trylock(arch_spinlock_t *lock)
 245{
 246        unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
 247
 248        smp_mb();
 249
 250        __asm__ __volatile__(
 251        "1:     ex  %0, [%1]            \n"
 252        : "+r" (val)
 253        : "r"(&(lock->slock))
 254        : "memory");
 255
 256        smp_mb();
 257
 258        return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
 259}
 260
 261static inline void arch_spin_unlock(arch_spinlock_t *lock)
 262{
 263        unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
 264
 265        /*
 266         * RELEASE barrier: given the instructions avail on ARCv2, full barrier
 267         * is the only option
 268         */
 269        smp_mb();
 270
 271        /*
 272         * EX is not really required here, a simple STore of 0 suffices.
 273         * However this causes tasklist livelocks in SystemC based SMP virtual
 274         * platforms where the systemc core scheduler uses EX as a cue for
 275         * moving to next core. Do a git log of this file for details
 276         */
 277        __asm__ __volatile__(
 278        "       ex  %0, [%1]            \n"
 279        : "+r" (val)
 280        : "r"(&(lock->slock))
 281        : "memory");
 282
 283        /*
 284         * see pairing version/comment in arch_spin_lock above
 285         */
 286        smp_mb();
 287}
 288
 289/*
 290 * Read-write spinlocks, allowing multiple readers but only one writer.
 291 * Unfair locking as Writers could be starved indefinitely by Reader(s)
 292 *
 293 * The spinlock itself is contained in @counter and access to it is
 294 * serialized with @lock_mutex.
 295 */
 296
 297/* 1 - lock taken successfully */
 298static inline int arch_read_trylock(arch_rwlock_t *rw)
 299{
 300        int ret = 0;
 301        unsigned long flags;
 302
 303        local_irq_save(flags);
 304        arch_spin_lock(&(rw->lock_mutex));
 305
 306        /*
 307         * zero means writer holds the lock exclusively, deny Reader.
 308         * Otherwise grant lock to first/subseq reader
 309         */
 310        if (rw->counter > 0) {
 311                rw->counter--;
 312                ret = 1;
 313        }
 314
 315        arch_spin_unlock(&(rw->lock_mutex));
 316        local_irq_restore(flags);
 317
 318        return ret;
 319}
 320
 321/* 1 - lock taken successfully */
 322static inline int arch_write_trylock(arch_rwlock_t *rw)
 323{
 324        int ret = 0;
 325        unsigned long flags;
 326
 327        local_irq_save(flags);
 328        arch_spin_lock(&(rw->lock_mutex));
 329
 330        /*
 331         * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
 332         * deny writer. Otherwise if unlocked grant to writer
 333         * Hence the claim that Linux rwlocks are unfair to writers.
 334         * (can be starved for an indefinite time by readers).
 335         */
 336        if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
 337                rw->counter = 0;
 338                ret = 1;
 339        }
 340        arch_spin_unlock(&(rw->lock_mutex));
 341        local_irq_restore(flags);
 342
 343        return ret;
 344}
 345
 346static inline void arch_read_lock(arch_rwlock_t *rw)
 347{
 348        while (!arch_read_trylock(rw))
 349                cpu_relax();
 350}
 351
 352static inline void arch_write_lock(arch_rwlock_t *rw)
 353{
 354        while (!arch_write_trylock(rw))
 355                cpu_relax();
 356}
 357
 358static inline void arch_read_unlock(arch_rwlock_t *rw)
 359{
 360        unsigned long flags;
 361
 362        local_irq_save(flags);
 363        arch_spin_lock(&(rw->lock_mutex));
 364        rw->counter++;
 365        arch_spin_unlock(&(rw->lock_mutex));
 366        local_irq_restore(flags);
 367}
 368
 369static inline void arch_write_unlock(arch_rwlock_t *rw)
 370{
 371        unsigned long flags;
 372
 373        local_irq_save(flags);
 374        arch_spin_lock(&(rw->lock_mutex));
 375        rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
 376        arch_spin_unlock(&(rw->lock_mutex));
 377        local_irq_restore(flags);
 378}
 379
 380#endif
 381
 382#endif /* __ASM_SPINLOCK_H */
 383