linux/arch/mips/include/asm/spinlock.h
<<
>>
Prefs
   1/*
   2 * This file is subject to the terms and conditions of the GNU General Public
   3 * License.  See the file "COPYING" in the main directory of this archive
   4 * for more details.
   5 *
   6 * Copyright (C) 1999, 2000, 06 Ralf Baechle (ralf@linux-mips.org)
   7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
   8 */
   9#ifndef _ASM_SPINLOCK_H
  10#define _ASM_SPINLOCK_H
  11
  12#include <linux/compiler.h>
  13
  14#include <asm/barrier.h>
  15#include <asm/war.h>
  16
  17/*
  18 * Your basic SMP spinlocks, allowing only a single CPU anywhere
  19 *
  20 * Simple spin lock operations.  There are two variants, one clears IRQ's
  21 * on the local processor, one does not.
  22 *
  23 * These are fair FIFO ticket locks
  24 *
  25 * (the type definitions are in asm/spinlock_types.h)
  26 */
  27
  28
  29/*
  30 * Ticket locks are conceptually two parts, one indicating the current head of
  31 * the queue, and the other indicating the current tail. The lock is acquired
  32 * by atomically noting the tail and incrementing it by one (thus adding
  33 * ourself to the queue and noting our position), then waiting until the head
  34 * becomes equal to the the initial value of the tail.
  35 */
  36
  37static inline int arch_spin_is_locked(arch_spinlock_t *lock)
  38{
  39        u32 counters = ACCESS_ONCE(lock->lock);
  40
  41        return ((counters >> 16) ^ counters) & 0xffff;
  42}
  43
  44#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
  45#define arch_spin_unlock_wait(x) \
  46        while (arch_spin_is_locked(x)) { cpu_relax(); }
  47
  48static inline int arch_spin_is_contended(arch_spinlock_t *lock)
  49{
  50        u32 counters = ACCESS_ONCE(lock->lock);
  51
  52        return (((counters >> 16) - counters) & 0xffff) > 1;
  53}
  54#define arch_spin_is_contended  arch_spin_is_contended
  55
  56static inline void arch_spin_lock(arch_spinlock_t *lock)
  57{
  58        int my_ticket;
  59        int tmp;
  60        int inc = 0x10000;
  61
  62        if (R10000_LLSC_WAR) {
  63                __asm__ __volatile__ (
  64                "       .set push               # arch_spin_lock        \n"
  65                "       .set noreorder                                  \n"
  66                "                                                       \n"
  67                "1:     ll      %[ticket], %[ticket_ptr]                \n"
  68                "       addu    %[my_ticket], %[ticket], %[inc]         \n"
  69                "       sc      %[my_ticket], %[ticket_ptr]             \n"
  70                "       beqzl   %[my_ticket], 1b                        \n"
  71                "        nop                                            \n"
  72                "       srl     %[my_ticket], %[ticket], 16             \n"
  73                "       andi    %[ticket], %[ticket], 0xffff            \n"
  74                "       andi    %[my_ticket], %[my_ticket], 0xffff      \n"
  75                "       bne     %[ticket], %[my_ticket], 4f             \n"
  76                "        subu   %[ticket], %[my_ticket], %[ticket]      \n"
  77                "2:                                                     \n"
  78                "       .subsection 2                                   \n"
  79                "4:     andi    %[ticket], %[ticket], 0xffff            \n"
  80                "       sll     %[ticket], 5                            \n"
  81                "                                                       \n"
  82                "6:     bnez    %[ticket], 6b                           \n"
  83                "        subu   %[ticket], 1                            \n"
  84                "                                                       \n"
  85                "       lhu     %[ticket], %[serving_now_ptr]           \n"
  86                "       beq     %[ticket], %[my_ticket], 2b             \n"
  87                "        subu   %[ticket], %[my_ticket], %[ticket]      \n"
  88                "       b       4b                                      \n"
  89                "        subu   %[ticket], %[ticket], 1                 \n"
  90                "       .previous                                       \n"
  91                "       .set pop                                        \n"
  92                : [ticket_ptr] "+m" (lock->lock),
  93                  [serving_now_ptr] "+m" (lock->h.serving_now),
  94                  [ticket] "=&r" (tmp),
  95                  [my_ticket] "=&r" (my_ticket)
  96                : [inc] "r" (inc));
  97        } else {
  98                __asm__ __volatile__ (
  99                "       .set push               # arch_spin_lock        \n"
 100                "       .set noreorder                                  \n"
 101                "                                                       \n"
 102                "1:     ll      %[ticket], %[ticket_ptr]                \n"
 103                "       addu    %[my_ticket], %[ticket], %[inc]         \n"
 104                "       sc      %[my_ticket], %[ticket_ptr]             \n"
 105                "       beqz    %[my_ticket], 1b                        \n"
 106                "        srl    %[my_ticket], %[ticket], 16             \n"
 107                "       andi    %[ticket], %[ticket], 0xffff            \n"
 108                "       andi    %[my_ticket], %[my_ticket], 0xffff      \n"
 109                "       bne     %[ticket], %[my_ticket], 4f             \n"
 110                "        subu   %[ticket], %[my_ticket], %[ticket]      \n"
 111                "2:                                                     \n"
 112                "       .subsection 2                                   \n"
 113                "4:     andi    %[ticket], %[ticket], 0x1fff            \n"
 114                "       sll     %[ticket], 5                            \n"
 115                "                                                       \n"
 116                "6:     bnez    %[ticket], 6b                           \n"
 117                "        subu   %[ticket], 1                            \n"
 118                "                                                       \n"
 119                "       lhu     %[ticket], %[serving_now_ptr]           \n"
 120                "       beq     %[ticket], %[my_ticket], 2b             \n"
 121                "        subu   %[ticket], %[my_ticket], %[ticket]      \n"
 122                "       b       4b                                      \n"
 123                "        subu   %[ticket], %[ticket], 1                 \n"
 124                "       .previous                                       \n"
 125                "       .set pop                                        \n"
 126                : [ticket_ptr] "+m" (lock->lock),
 127                  [serving_now_ptr] "+m" (lock->h.serving_now),
 128                  [ticket] "=&r" (tmp),
 129                  [my_ticket] "=&r" (my_ticket)
 130                : [inc] "r" (inc));
 131        }
 132
 133        smp_llsc_mb();
 134}
 135
 136static inline void arch_spin_unlock(arch_spinlock_t *lock)
 137{
 138        unsigned int serving_now = lock->h.serving_now + 1;
 139        wmb();
 140        lock->h.serving_now = (u16)serving_now;
 141        nudge_writes();
 142}
 143
 144static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
 145{
 146        int tmp, tmp2, tmp3;
 147        int inc = 0x10000;
 148
 149        if (R10000_LLSC_WAR) {
 150                __asm__ __volatile__ (
 151                "       .set push               # arch_spin_trylock     \n"
 152                "       .set noreorder                                  \n"
 153                "                                                       \n"
 154                "1:     ll      %[ticket], %[ticket_ptr]                \n"
 155                "       srl     %[my_ticket], %[ticket], 16             \n"
 156                "       andi    %[my_ticket], %[my_ticket], 0xffff      \n"
 157                "       andi    %[now_serving], %[ticket], 0xffff       \n"
 158                "       bne     %[my_ticket], %[now_serving], 3f        \n"
 159                "        addu   %[ticket], %[ticket], %[inc]            \n"
 160                "       sc      %[ticket], %[ticket_ptr]                \n"
 161                "       beqzl   %[ticket], 1b                           \n"
 162                "        li     %[ticket], 1                            \n"
 163                "2:                                                     \n"
 164                "       .subsection 2                                   \n"
 165                "3:     b       2b                                      \n"
 166                "        li     %[ticket], 0                            \n"
 167                "       .previous                                       \n"
 168                "       .set pop                                        \n"
 169                : [ticket_ptr] "+m" (lock->lock),
 170                  [ticket] "=&r" (tmp),
 171                  [my_ticket] "=&r" (tmp2),
 172                  [now_serving] "=&r" (tmp3)
 173                : [inc] "r" (inc));
 174        } else {
 175                __asm__ __volatile__ (
 176                "       .set push               # arch_spin_trylock     \n"
 177                "       .set noreorder                                  \n"
 178                "                                                       \n"
 179                "1:     ll      %[ticket], %[ticket_ptr]                \n"
 180                "       srl     %[my_ticket], %[ticket], 16             \n"
 181                "       andi    %[my_ticket], %[my_ticket], 0xffff      \n"
 182                "       andi    %[now_serving], %[ticket], 0xffff       \n"
 183                "       bne     %[my_ticket], %[now_serving], 3f        \n"
 184                "        addu   %[ticket], %[ticket], %[inc]            \n"
 185                "       sc      %[ticket], %[ticket_ptr]                \n"
 186                "       beqz    %[ticket], 1b                           \n"
 187                "        li     %[ticket], 1                            \n"
 188                "2:                                                     \n"
 189                "       .subsection 2                                   \n"
 190                "3:     b       2b                                      \n"
 191                "        li     %[ticket], 0                            \n"
 192                "       .previous                                       \n"
 193                "       .set pop                                        \n"
 194                : [ticket_ptr] "+m" (lock->lock),
 195                  [ticket] "=&r" (tmp),
 196                  [my_ticket] "=&r" (tmp2),
 197                  [now_serving] "=&r" (tmp3)
 198                : [inc] "r" (inc));
 199        }
 200
 201        smp_llsc_mb();
 202
 203        return tmp;
 204}
 205
 206/*
 207 * Read-write spinlocks, allowing multiple readers but only one writer.
 208 *
 209 * NOTE! it is quite common to have readers in interrupts but no interrupt
 210 * writers. For those circumstances we can "mix" irq-safe locks - any writer
 211 * needs to get a irq-safe write-lock, but readers can get non-irqsafe
 212 * read-locks.
 213 */
 214
 215/*
 216 * read_can_lock - would read_trylock() succeed?
 217 * @lock: the rwlock in question.
 218 */
 219#define arch_read_can_lock(rw)  ((rw)->lock >= 0)
 220
 221/*
 222 * write_can_lock - would write_trylock() succeed?
 223 * @lock: the rwlock in question.
 224 */
 225#define arch_write_can_lock(rw) (!(rw)->lock)
 226
 227static inline void arch_read_lock(arch_rwlock_t *rw)
 228{
 229        unsigned int tmp;
 230
 231        if (R10000_LLSC_WAR) {
 232                __asm__ __volatile__(
 233                "       .set    noreorder       # arch_read_lock        \n"
 234                "1:     ll      %1, %2                                  \n"
 235                "       bltz    %1, 1b                                  \n"
 236                "        addu   %1, 1                                   \n"
 237                "       sc      %1, %0                                  \n"
 238                "       beqzl   %1, 1b                                  \n"
 239                "        nop                                            \n"
 240                "       .set    reorder                                 \n"
 241                : "=m" (rw->lock), "=&r" (tmp)
 242                : "m" (rw->lock)
 243                : "memory");
 244        } else {
 245                __asm__ __volatile__(
 246                "       .set    noreorder       # arch_read_lock        \n"
 247                "1:     ll      %1, %2                                  \n"
 248                "       bltz    %1, 3f                                  \n"
 249                "        addu   %1, 1                                   \n"
 250                "2:     sc      %1, %0                                  \n"
 251                "       beqz    %1, 1b                                  \n"
 252                "        nop                                            \n"
 253                "       .subsection 2                                   \n"
 254                "3:     ll      %1, %2                                  \n"
 255                "       bltz    %1, 3b                                  \n"
 256                "        addu   %1, 1                                   \n"
 257                "       b       2b                                      \n"
 258                "        nop                                            \n"
 259                "       .previous                                       \n"
 260                "       .set    reorder                                 \n"
 261                : "=m" (rw->lock), "=&r" (tmp)
 262                : "m" (rw->lock)
 263                : "memory");
 264        }
 265
 266        smp_llsc_mb();
 267}
 268
 269/* Note the use of sub, not subu which will make the kernel die with an
 270   overflow exception if we ever try to unlock an rwlock that is already
 271   unlocked or is being held by a writer.  */
 272static inline void arch_read_unlock(arch_rwlock_t *rw)
 273{
 274        unsigned int tmp;
 275
 276        smp_mb__before_llsc();
 277
 278        if (R10000_LLSC_WAR) {
 279                __asm__ __volatile__(
 280                "1:     ll      %1, %2          # arch_read_unlock      \n"
 281                "       sub     %1, 1                                   \n"
 282                "       sc      %1, %0                                  \n"
 283                "       beqzl   %1, 1b                                  \n"
 284                : "=m" (rw->lock), "=&r" (tmp)
 285                : "m" (rw->lock)
 286                : "memory");
 287        } else {
 288                __asm__ __volatile__(
 289                "       .set    noreorder       # arch_read_unlock      \n"
 290                "1:     ll      %1, %2                                  \n"
 291                "       sub     %1, 1                                   \n"
 292                "       sc      %1, %0                                  \n"
 293                "       beqz    %1, 2f                                  \n"
 294                "        nop                                            \n"
 295                "       .subsection 2                                   \n"
 296                "2:     b       1b                                      \n"
 297                "        nop                                            \n"
 298                "       .previous                                       \n"
 299                "       .set    reorder                                 \n"
 300                : "=m" (rw->lock), "=&r" (tmp)
 301                : "m" (rw->lock)
 302                : "memory");
 303        }
 304}
 305
 306static inline void arch_write_lock(arch_rwlock_t *rw)
 307{
 308        unsigned int tmp;
 309
 310        if (R10000_LLSC_WAR) {
 311                __asm__ __volatile__(
 312                "       .set    noreorder       # arch_write_lock       \n"
 313                "1:     ll      %1, %2                                  \n"
 314                "       bnez    %1, 1b                                  \n"
 315                "        lui    %1, 0x8000                              \n"
 316                "       sc      %1, %0                                  \n"
 317                "       beqzl   %1, 1b                                  \n"
 318                "        nop                                            \n"
 319                "       .set    reorder                                 \n"
 320                : "=m" (rw->lock), "=&r" (tmp)
 321                : "m" (rw->lock)
 322                : "memory");
 323        } else {
 324                __asm__ __volatile__(
 325                "       .set    noreorder       # arch_write_lock       \n"
 326                "1:     ll      %1, %2                                  \n"
 327                "       bnez    %1, 3f                                  \n"
 328                "        lui    %1, 0x8000                              \n"
 329                "2:     sc      %1, %0                                  \n"
 330                "       beqz    %1, 3f                                  \n"
 331                "        nop                                            \n"
 332                "       .subsection 2                                   \n"
 333                "3:     ll      %1, %2                                  \n"
 334                "       bnez    %1, 3b                                  \n"
 335                "        lui    %1, 0x8000                              \n"
 336                "       b       2b                                      \n"
 337                "        nop                                            \n"
 338                "       .previous                                       \n"
 339                "       .set    reorder                                 \n"
 340                : "=m" (rw->lock), "=&r" (tmp)
 341                : "m" (rw->lock)
 342                : "memory");
 343        }
 344
 345        smp_llsc_mb();
 346}
 347
 348static inline void arch_write_unlock(arch_rwlock_t *rw)
 349{
 350        smp_mb();
 351
 352        __asm__ __volatile__(
 353        "                               # arch_write_unlock     \n"
 354        "       sw      $0, %0                                  \n"
 355        : "=m" (rw->lock)
 356        : "m" (rw->lock)
 357        : "memory");
 358}
 359
 360static inline int arch_read_trylock(arch_rwlock_t *rw)
 361{
 362        unsigned int tmp;
 363        int ret;
 364
 365        if (R10000_LLSC_WAR) {
 366                __asm__ __volatile__(
 367                "       .set    noreorder       # arch_read_trylock     \n"
 368                "       li      %2, 0                                   \n"
 369                "1:     ll      %1, %3                                  \n"
 370                "       bltz    %1, 2f                                  \n"
 371                "        addu   %1, 1                                   \n"
 372                "       sc      %1, %0                                  \n"
 373                "       .set    reorder                                 \n"
 374                "       beqzl   %1, 1b                                  \n"
 375                "        nop                                            \n"
 376                __WEAK_LLSC_MB
 377                "       li      %2, 1                                   \n"
 378                "2:                                                     \n"
 379                : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
 380                : "m" (rw->lock)
 381                : "memory");
 382        } else {
 383                __asm__ __volatile__(
 384                "       .set    noreorder       # arch_read_trylock     \n"
 385                "       li      %2, 0                                   \n"
 386                "1:     ll      %1, %3                                  \n"
 387                "       bltz    %1, 2f                                  \n"
 388                "        addu   %1, 1                                   \n"
 389                "       sc      %1, %0                                  \n"
 390                "       beqz    %1, 1b                                  \n"
 391                "        nop                                            \n"
 392                "       .set    reorder                                 \n"
 393                __WEAK_LLSC_MB
 394                "       li      %2, 1                                   \n"
 395                "2:                                                     \n"
 396                : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
 397                : "m" (rw->lock)
 398                : "memory");
 399        }
 400
 401        return ret;
 402}
 403
 404static inline int arch_write_trylock(arch_rwlock_t *rw)
 405{
 406        unsigned int tmp;
 407        int ret;
 408
 409        if (R10000_LLSC_WAR) {
 410                __asm__ __volatile__(
 411                "       .set    noreorder       # arch_write_trylock    \n"
 412                "       li      %2, 0                                   \n"
 413                "1:     ll      %1, %3                                  \n"
 414                "       bnez    %1, 2f                                  \n"
 415                "        lui    %1, 0x8000                              \n"
 416                "       sc      %1, %0                                  \n"
 417                "       beqzl   %1, 1b                                  \n"
 418                "        nop                                            \n"
 419                __WEAK_LLSC_MB
 420                "       li      %2, 1                                   \n"
 421                "       .set    reorder                                 \n"
 422                "2:                                                     \n"
 423                : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
 424                : "m" (rw->lock)
 425                : "memory");
 426        } else {
 427                __asm__ __volatile__(
 428                "       .set    noreorder       # arch_write_trylock    \n"
 429                "       li      %2, 0                                   \n"
 430                "1:     ll      %1, %3                                  \n"
 431                "       bnez    %1, 2f                                  \n"
 432                "       lui     %1, 0x8000                              \n"
 433                "       sc      %1, %0                                  \n"
 434                "       beqz    %1, 3f                                  \n"
 435                "        li     %2, 1                                   \n"
 436                "2:                                                     \n"
 437                __WEAK_LLSC_MB
 438                "       .subsection 2                                   \n"
 439                "3:     b       1b                                      \n"
 440                "        li     %2, 0                                   \n"
 441                "       .previous                                       \n"
 442                "       .set    reorder                                 \n"
 443                : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
 444                : "m" (rw->lock)
 445                : "memory");
 446        }
 447
 448        return ret;
 449}
 450
 451#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
 452#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
 453
 454#define arch_spin_relax(lock)   cpu_relax()
 455#define arch_read_relax(lock)   cpu_relax()
 456#define arch_write_relax(lock)  cpu_relax()
 457
 458#endif /* _ASM_SPINLOCK_H */
 459