linux/arch/arc/include/asm/spinlock.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
   4 */
   5
   6#ifndef __ASM_SPINLOCK_H
   7#define __ASM_SPINLOCK_H
   8
   9#include <asm/spinlock_types.h>
  10#include <asm/processor.h>
  11#include <asm/barrier.h>
  12
  13#define arch_spin_is_locked(x)  ((x)->slock != __ARCH_SPIN_LOCK_UNLOCKED__)
  14
  15#ifdef CONFIG_ARC_HAS_LLSC
  16
  17static inline void arch_spin_lock(arch_spinlock_t *lock)
  18{
  19        unsigned int val;
  20
  21        __asm__ __volatile__(
  22        "1:     llock   %[val], [%[slock]]      \n"
  23        "       breq    %[val], %[LOCKED], 1b   \n"     /* spin while LOCKED */
  24        "       scond   %[LOCKED], [%[slock]]   \n"     /* acquire */
  25        "       bnz     1b                      \n"
  26        "                                       \n"
  27        : [val]         "=&r"   (val)
  28        : [slock]       "r"     (&(lock->slock)),
  29          [LOCKED]      "r"     (__ARCH_SPIN_LOCK_LOCKED__)
  30        : "memory", "cc");
  31
  32        /*
  33         * ACQUIRE barrier to ensure load/store after taking the lock
  34         * don't "bleed-up" out of the critical section (leak-in is allowed)
  35         * http://www.spinics.net/lists/kernel/msg2010409.html
  36         *
  37         * ARCv2 only has load-load, store-store and all-all barrier
  38         * thus need the full all-all barrier
  39         */
  40        smp_mb();
  41}
  42
  43/* 1 - lock taken successfully */
  44static inline int arch_spin_trylock(arch_spinlock_t *lock)
  45{
  46        unsigned int val, got_it = 0;
  47
  48        __asm__ __volatile__(
  49        "1:     llock   %[val], [%[slock]]      \n"
  50        "       breq    %[val], %[LOCKED], 4f   \n"     /* already LOCKED, just bail */
  51        "       scond   %[LOCKED], [%[slock]]   \n"     /* acquire */
  52        "       bnz     1b                      \n"
  53        "       mov     %[got_it], 1            \n"
  54        "4:                                     \n"
  55        "                                       \n"
  56        : [val]         "=&r"   (val),
  57          [got_it]      "+&r"   (got_it)
  58        : [slock]       "r"     (&(lock->slock)),
  59          [LOCKED]      "r"     (__ARCH_SPIN_LOCK_LOCKED__)
  60        : "memory", "cc");
  61
  62        smp_mb();
  63
  64        return got_it;
  65}
  66
  67static inline void arch_spin_unlock(arch_spinlock_t *lock)
  68{
  69        smp_mb();
  70
  71        WRITE_ONCE(lock->slock, __ARCH_SPIN_LOCK_UNLOCKED__);
  72}
  73
  74/*
  75 * Read-write spinlocks, allowing multiple readers but only one writer.
  76 * Unfair locking as Writers could be starved indefinitely by Reader(s)
  77 */
  78
  79static inline void arch_read_lock(arch_rwlock_t *rw)
  80{
  81        unsigned int val;
  82
  83        /*
  84         * zero means writer holds the lock exclusively, deny Reader.
  85         * Otherwise grant lock to first/subseq reader
  86         *
  87         *      if (rw->counter > 0) {
  88         *              rw->counter--;
  89         *              ret = 1;
  90         *      }
  91         */
  92
  93        __asm__ __volatile__(
  94        "1:     llock   %[val], [%[rwlock]]     \n"
  95        "       brls    %[val], %[WR_LOCKED], 1b\n"     /* <= 0: spin while write locked */
  96        "       sub     %[val], %[val], 1       \n"     /* reader lock */
  97        "       scond   %[val], [%[rwlock]]     \n"
  98        "       bnz     1b                      \n"
  99        "                                       \n"
 100        : [val]         "=&r"   (val)
 101        : [rwlock]      "r"     (&(rw->counter)),
 102          [WR_LOCKED]   "ir"    (0)
 103        : "memory", "cc");
 104
 105        smp_mb();
 106}
 107
 108/* 1 - lock taken successfully */
 109static inline int arch_read_trylock(arch_rwlock_t *rw)
 110{
 111        unsigned int val, got_it = 0;
 112
 113        __asm__ __volatile__(
 114        "1:     llock   %[val], [%[rwlock]]     \n"
 115        "       brls    %[val], %[WR_LOCKED], 4f\n"     /* <= 0: already write locked, bail */
 116        "       sub     %[val], %[val], 1       \n"     /* counter-- */
 117        "       scond   %[val], [%[rwlock]]     \n"
 118        "       bnz     1b                      \n"     /* retry if collided with someone */
 119        "       mov     %[got_it], 1            \n"
 120        "                                       \n"
 121        "4: ; --- done ---                      \n"
 122
 123        : [val]         "=&r"   (val),
 124          [got_it]      "+&r"   (got_it)
 125        : [rwlock]      "r"     (&(rw->counter)),
 126          [WR_LOCKED]   "ir"    (0)
 127        : "memory", "cc");
 128
 129        smp_mb();
 130
 131        return got_it;
 132}
 133
 134static inline void arch_write_lock(arch_rwlock_t *rw)
 135{
 136        unsigned int val;
 137
 138        /*
 139         * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
 140         * deny writer. Otherwise if unlocked grant to writer
 141         * Hence the claim that Linux rwlocks are unfair to writers.
 142         * (can be starved for an indefinite time by readers).
 143         *
 144         *      if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
 145         *              rw->counter = 0;
 146         *              ret = 1;
 147         *      }
 148         */
 149
 150        __asm__ __volatile__(
 151        "1:     llock   %[val], [%[rwlock]]     \n"
 152        "       brne    %[val], %[UNLOCKED], 1b \n"     /* while !UNLOCKED spin */
 153        "       mov     %[val], %[WR_LOCKED]    \n"
 154        "       scond   %[val], [%[rwlock]]     \n"
 155        "       bnz     1b                      \n"
 156        "                                       \n"
 157        : [val]         "=&r"   (val)
 158        : [rwlock]      "r"     (&(rw->counter)),
 159          [UNLOCKED]    "ir"    (__ARCH_RW_LOCK_UNLOCKED__),
 160          [WR_LOCKED]   "ir"    (0)
 161        : "memory", "cc");
 162
 163        smp_mb();
 164}
 165
 166/* 1 - lock taken successfully */
 167static inline int arch_write_trylock(arch_rwlock_t *rw)
 168{
 169        unsigned int val, got_it = 0;
 170
 171        __asm__ __volatile__(
 172        "1:     llock   %[val], [%[rwlock]]     \n"
 173        "       brne    %[val], %[UNLOCKED], 4f \n"     /* !UNLOCKED, bail */
 174        "       mov     %[val], %[WR_LOCKED]    \n"
 175        "       scond   %[val], [%[rwlock]]     \n"
 176        "       bnz     1b                      \n"     /* retry if collided with someone */
 177        "       mov     %[got_it], 1            \n"
 178        "                                       \n"
 179        "4: ; --- done ---                      \n"
 180
 181        : [val]         "=&r"   (val),
 182          [got_it]      "+&r"   (got_it)
 183        : [rwlock]      "r"     (&(rw->counter)),
 184          [UNLOCKED]    "ir"    (__ARCH_RW_LOCK_UNLOCKED__),
 185          [WR_LOCKED]   "ir"    (0)
 186        : "memory", "cc");
 187
 188        smp_mb();
 189
 190        return got_it;
 191}
 192
 193static inline void arch_read_unlock(arch_rwlock_t *rw)
 194{
 195        unsigned int val;
 196
 197        smp_mb();
 198
 199        /*
 200         * rw->counter++;
 201         */
 202        __asm__ __volatile__(
 203        "1:     llock   %[val], [%[rwlock]]     \n"
 204        "       add     %[val], %[val], 1       \n"
 205        "       scond   %[val], [%[rwlock]]     \n"
 206        "       bnz     1b                      \n"
 207        "                                       \n"
 208        : [val]         "=&r"   (val)
 209        : [rwlock]      "r"     (&(rw->counter))
 210        : "memory", "cc");
 211}
 212
 213static inline void arch_write_unlock(arch_rwlock_t *rw)
 214{
 215        smp_mb();
 216
 217        WRITE_ONCE(rw->counter, __ARCH_RW_LOCK_UNLOCKED__);
 218}
 219
 220#else   /* !CONFIG_ARC_HAS_LLSC */
 221
 222static inline void arch_spin_lock(arch_spinlock_t *lock)
 223{
 224        unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
 225
 226        /*
 227         * Per lkmm, smp_mb() is only required after _lock (and before_unlock)
 228         * for ACQ and REL semantics respectively. However EX based spinlocks
 229         * need the extra smp_mb to workaround a hardware quirk.
 230         */
 231        smp_mb();
 232
 233        __asm__ __volatile__(
 234        "1:     ex  %0, [%1]            \n"
 235#ifdef CONFIG_EZNPS_MTM_EXT
 236        "       .word %3                \n"
 237#endif
 238        "       breq  %0, %2, 1b        \n"
 239        : "+&r" (val)
 240        : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
 241#ifdef CONFIG_EZNPS_MTM_EXT
 242        , "i"(CTOP_INST_SCHD_RW)
 243#endif
 244        : "memory");
 245
 246        smp_mb();
 247}
 248
 249/* 1 - lock taken successfully */
 250static inline int arch_spin_trylock(arch_spinlock_t *lock)
 251{
 252        unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
 253
 254        smp_mb();
 255
 256        __asm__ __volatile__(
 257        "1:     ex  %0, [%1]            \n"
 258        : "+r" (val)
 259        : "r"(&(lock->slock))
 260        : "memory");
 261
 262        smp_mb();
 263
 264        return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
 265}
 266
 267static inline void arch_spin_unlock(arch_spinlock_t *lock)
 268{
 269        unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
 270
 271        /*
 272         * RELEASE barrier: given the instructions avail on ARCv2, full barrier
 273         * is the only option
 274         */
 275        smp_mb();
 276
 277        /*
 278         * EX is not really required here, a simple STore of 0 suffices.
 279         * However this causes tasklist livelocks in SystemC based SMP virtual
 280         * platforms where the systemc core scheduler uses EX as a cue for
 281         * moving to next core. Do a git log of this file for details
 282         */
 283        __asm__ __volatile__(
 284        "       ex  %0, [%1]            \n"
 285        : "+r" (val)
 286        : "r"(&(lock->slock))
 287        : "memory");
 288
 289        /*
 290         * see pairing version/comment in arch_spin_lock above
 291         */
 292        smp_mb();
 293}
 294
 295/*
 296 * Read-write spinlocks, allowing multiple readers but only one writer.
 297 * Unfair locking as Writers could be starved indefinitely by Reader(s)
 298 *
 299 * The spinlock itself is contained in @counter and access to it is
 300 * serialized with @lock_mutex.
 301 */
 302
 303/* 1 - lock taken successfully */
 304static inline int arch_read_trylock(arch_rwlock_t *rw)
 305{
 306        int ret = 0;
 307        unsigned long flags;
 308
 309        local_irq_save(flags);
 310        arch_spin_lock(&(rw->lock_mutex));
 311
 312        /*
 313         * zero means writer holds the lock exclusively, deny Reader.
 314         * Otherwise grant lock to first/subseq reader
 315         */
 316        if (rw->counter > 0) {
 317                rw->counter--;
 318                ret = 1;
 319        }
 320
 321        arch_spin_unlock(&(rw->lock_mutex));
 322        local_irq_restore(flags);
 323
 324        return ret;
 325}
 326
 327/* 1 - lock taken successfully */
 328static inline int arch_write_trylock(arch_rwlock_t *rw)
 329{
 330        int ret = 0;
 331        unsigned long flags;
 332
 333        local_irq_save(flags);
 334        arch_spin_lock(&(rw->lock_mutex));
 335
 336        /*
 337         * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
 338         * deny writer. Otherwise if unlocked grant to writer
 339         * Hence the claim that Linux rwlocks are unfair to writers.
 340         * (can be starved for an indefinite time by readers).
 341         */
 342        if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
 343                rw->counter = 0;
 344                ret = 1;
 345        }
 346        arch_spin_unlock(&(rw->lock_mutex));
 347        local_irq_restore(flags);
 348
 349        return ret;
 350}
 351
 352static inline void arch_read_lock(arch_rwlock_t *rw)
 353{
 354        while (!arch_read_trylock(rw))
 355                cpu_relax();
 356}
 357
 358static inline void arch_write_lock(arch_rwlock_t *rw)
 359{
 360        while (!arch_write_trylock(rw))
 361                cpu_relax();
 362}
 363
 364static inline void arch_read_unlock(arch_rwlock_t *rw)
 365{
 366        unsigned long flags;
 367
 368        local_irq_save(flags);
 369        arch_spin_lock(&(rw->lock_mutex));
 370        rw->counter++;
 371        arch_spin_unlock(&(rw->lock_mutex));
 372        local_irq_restore(flags);
 373}
 374
 375static inline void arch_write_unlock(arch_rwlock_t *rw)
 376{
 377        unsigned long flags;
 378
 379        local_irq_save(flags);
 380        arch_spin_lock(&(rw->lock_mutex));
 381        rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
 382        arch_spin_unlock(&(rw->lock_mutex));
 383        local_irq_restore(flags);
 384}
 385
 386#endif
 387
 388#endif /* __ASM_SPINLOCK_H */
 389