linux/arch/arm/include/asm/spinlock.h
<<
>>
Prefs
   1#ifndef __ASM_SPINLOCK_H
   2#define __ASM_SPINLOCK_H
   3
   4#if __LINUX_ARM_ARCH__ < 6
   5#error SMP not supported on pre-ARMv6 CPUs
   6#endif
   7
   8#include <linux/prefetch.h>
   9
  10/*
  11 * sev and wfe are ARMv6K extensions.  Uniprocessor ARMv6 may not have the K
  12 * extensions, so when running on UP, we have to patch these instructions away.
  13 */
  14#ifdef CONFIG_THUMB2_KERNEL
  15/*
  16 * For Thumb-2, special care is needed to ensure that the conditional WFE
  17 * instruction really does assemble to exactly 4 bytes (as required by
  18 * the SMP_ON_UP fixup code).   By itself "wfene" might cause the
  19 * assembler to insert a extra (16-bit) IT instruction, depending on the
  20 * presence or absence of neighbouring conditional instructions.
  21 *
  22 * To avoid this unpredictableness, an approprite IT is inserted explicitly:
  23 * the assembler won't change IT instructions which are explicitly present
  24 * in the input.
  25 */
  26#define WFE(cond)       __ALT_SMP_ASM(          \
  27        "it " cond "\n\t"                       \
  28        "wfe" cond ".n",                        \
  29                                                \
  30        "nop.w"                                 \
  31)
  32#else
  33#define WFE(cond)       __ALT_SMP_ASM("wfe" cond, "nop")
  34#endif
  35
  36#define SEV             __ALT_SMP_ASM(WASM(sev), WASM(nop))
  37
  38static inline void dsb_sev(void)
  39{
  40
  41        dsb(ishst);
  42        __asm__(SEV);
  43}
  44
  45/*
  46 * ARMv6 ticket-based spin-locking.
  47 *
  48 * A memory barrier is required after we get a lock, and before we
  49 * release it, because V6 CPUs are assumed to have weakly ordered
  50 * memory.
  51 */
  52
  53#define arch_spin_unlock_wait(lock) \
  54        do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
  55
  56#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
  57
  58static inline void arch_spin_lock(arch_spinlock_t *lock)
  59{
  60        unsigned long tmp;
  61        u32 newval;
  62        arch_spinlock_t lockval;
  63
  64        prefetchw(&lock->slock);
  65        __asm__ __volatile__(
  66"1:     ldrex   %0, [%3]\n"
  67"       add     %1, %0, %4\n"
  68"       strex   %2, %1, [%3]\n"
  69"       teq     %2, #0\n"
  70"       bne     1b"
  71        : "=&r" (lockval), "=&r" (newval), "=&r" (tmp)
  72        : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
  73        : "cc");
  74
  75        while (lockval.tickets.next != lockval.tickets.owner) {
  76                wfe();
  77                lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner);
  78        }
  79
  80        smp_mb();
  81}
  82
  83static inline int arch_spin_trylock(arch_spinlock_t *lock)
  84{
  85        unsigned long contended, res;
  86        u32 slock;
  87
  88        prefetchw(&lock->slock);
  89        do {
  90                __asm__ __volatile__(
  91                "       ldrex   %0, [%3]\n"
  92                "       mov     %2, #0\n"
  93                "       subs    %1, %0, %0, ror #16\n"
  94                "       addeq   %0, %0, %4\n"
  95                "       strexeq %2, %0, [%3]"
  96                : "=&r" (slock), "=&r" (contended), "=&r" (res)
  97                : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
  98                : "cc");
  99        } while (res);
 100
 101        if (!contended) {
 102                smp_mb();
 103                return 1;
 104        } else {
 105                return 0;
 106        }
 107}
 108
 109static inline void arch_spin_unlock(arch_spinlock_t *lock)
 110{
 111        smp_mb();
 112        lock->tickets.owner++;
 113        dsb_sev();
 114}
 115
 116static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
 117{
 118        return lock.tickets.owner == lock.tickets.next;
 119}
 120
 121static inline int arch_spin_is_locked(arch_spinlock_t *lock)
 122{
 123        return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));
 124}
 125
 126static inline int arch_spin_is_contended(arch_spinlock_t *lock)
 127{
 128        struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
 129        return (tickets.next - tickets.owner) > 1;
 130}
 131#define arch_spin_is_contended  arch_spin_is_contended
 132
 133/*
 134 * RWLOCKS
 135 *
 136 *
 137 * Write locks are easy - we just set bit 31.  When unlocking, we can
 138 * just write zero since the lock is exclusively held.
 139 */
 140
 141static inline void arch_write_lock(arch_rwlock_t *rw)
 142{
 143        unsigned long tmp;
 144
 145        prefetchw(&rw->lock);
 146        __asm__ __volatile__(
 147"1:     ldrex   %0, [%1]\n"
 148"       teq     %0, #0\n"
 149        WFE("ne")
 150"       strexeq %0, %2, [%1]\n"
 151"       teq     %0, #0\n"
 152"       bne     1b"
 153        : "=&r" (tmp)
 154        : "r" (&rw->lock), "r" (0x80000000)
 155        : "cc");
 156
 157        smp_mb();
 158}
 159
 160static inline int arch_write_trylock(arch_rwlock_t *rw)
 161{
 162        unsigned long contended, res;
 163
 164        prefetchw(&rw->lock);
 165        do {
 166                __asm__ __volatile__(
 167                "       ldrex   %0, [%2]\n"
 168                "       mov     %1, #0\n"
 169                "       teq     %0, #0\n"
 170                "       strexeq %1, %3, [%2]"
 171                : "=&r" (contended), "=&r" (res)
 172                : "r" (&rw->lock), "r" (0x80000000)
 173                : "cc");
 174        } while (res);
 175
 176        if (!contended) {
 177                smp_mb();
 178                return 1;
 179        } else {
 180                return 0;
 181        }
 182}
 183
 184static inline void arch_write_unlock(arch_rwlock_t *rw)
 185{
 186        smp_mb();
 187
 188        __asm__ __volatile__(
 189        "str    %1, [%0]\n"
 190        :
 191        : "r" (&rw->lock), "r" (0)
 192        : "cc");
 193
 194        dsb_sev();
 195}
 196
 197/* write_can_lock - would write_trylock() succeed? */
 198#define arch_write_can_lock(x)          (ACCESS_ONCE((x)->lock) == 0)
 199
 200/*
 201 * Read locks are a bit more hairy:
 202 *  - Exclusively load the lock value.
 203 *  - Increment it.
 204 *  - Store new lock value if positive, and we still own this location.
 205 *    If the value is negative, we've already failed.
 206 *  - If we failed to store the value, we want a negative result.
 207 *  - If we failed, try again.
 208 * Unlocking is similarly hairy.  We may have multiple read locks
 209 * currently active.  However, we know we won't have any write
 210 * locks.
 211 */
 212static inline void arch_read_lock(arch_rwlock_t *rw)
 213{
 214        unsigned long tmp, tmp2;
 215
 216        prefetchw(&rw->lock);
 217        __asm__ __volatile__(
 218"1:     ldrex   %0, [%2]\n"
 219"       adds    %0, %0, #1\n"
 220"       strexpl %1, %0, [%2]\n"
 221        WFE("mi")
 222"       rsbpls  %0, %1, #0\n"
 223"       bmi     1b"
 224        : "=&r" (tmp), "=&r" (tmp2)
 225        : "r" (&rw->lock)
 226        : "cc");
 227
 228        smp_mb();
 229}
 230
 231static inline void arch_read_unlock(arch_rwlock_t *rw)
 232{
 233        unsigned long tmp, tmp2;
 234
 235        smp_mb();
 236
 237        prefetchw(&rw->lock);
 238        __asm__ __volatile__(
 239"1:     ldrex   %0, [%2]\n"
 240"       sub     %0, %0, #1\n"
 241"       strex   %1, %0, [%2]\n"
 242"       teq     %1, #0\n"
 243"       bne     1b"
 244        : "=&r" (tmp), "=&r" (tmp2)
 245        : "r" (&rw->lock)
 246        : "cc");
 247
 248        if (tmp == 0)
 249                dsb_sev();
 250}
 251
 252static inline int arch_read_trylock(arch_rwlock_t *rw)
 253{
 254        unsigned long contended, res;
 255
 256        prefetchw(&rw->lock);
 257        do {
 258                __asm__ __volatile__(
 259                "       ldrex   %0, [%2]\n"
 260                "       mov     %1, #0\n"
 261                "       adds    %0, %0, #1\n"
 262                "       strexpl %1, %0, [%2]"
 263                : "=&r" (contended), "=&r" (res)
 264                : "r" (&rw->lock)
 265                : "cc");
 266        } while (res);
 267
 268        /* If the lock is negative, then it is already held for write. */
 269        if (contended < 0x80000000) {
 270                smp_mb();
 271                return 1;
 272        } else {
 273                return 0;
 274        }
 275}
 276
 277/* read_can_lock - would read_trylock() succeed? */
 278#define arch_read_can_lock(x)           (ACCESS_ONCE((x)->lock) < 0x80000000)
 279
 280#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
 281#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
 282
 283#define arch_spin_relax(lock)   cpu_relax()
 284#define arch_read_relax(lock)   cpu_relax()
 285#define arch_write_relax(lock)  cpu_relax()
 286
 287#endif /* __ASM_SPINLOCK_H */
 288