linux/arch/arm/include/asm/mutex.h
<<
>>
Prefs
   1/*
   2 * arch/arm/include/asm/mutex.h
   3 *
   4 * ARM optimized mutex locking primitives
   5 *
   6 * Please look into asm-generic/mutex-xchg.h for a formal definition.
   7 */
   8#ifndef _ASM_MUTEX_H
   9#define _ASM_MUTEX_H
  10
  11#if __LINUX_ARM_ARCH__ < 6
  12/* On pre-ARMv6 hardware the swp based implementation is the most efficient. */
  13# include <asm-generic/mutex-xchg.h>
  14#else
  15
  16/*
  17 * Attempting to lock a mutex on ARMv6+ can be done with a bastardized
  18 * atomic decrement (it is not a reliable atomic decrement but it satisfies
  19 * the defined semantics for our purpose, while being smaller and faster
  20 * than a real atomic decrement or atomic swap.  The idea is to attempt
  21 * decrementing the lock value only once.  If once decremented it isn't zero,
  22 * or if its store-back fails due to a dispute on the exclusive store, we
  23 * simply bail out immediately through the slow path where the lock will be
  24 * reattempted until it succeeds.
  25 */
  26static inline void
  27__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
  28{
  29        int __ex_flag, __res;
  30
  31        __asm__ (
  32
  33                "ldrex  %0, [%2]        \n\t"
  34                "sub    %0, %0, #1      \n\t"
  35                "strex  %1, %0, [%2]    "
  36
  37                : "=&r" (__res), "=&r" (__ex_flag)
  38                : "r" (&(count)->counter)
  39                : "cc","memory" );
  40
  41        __res |= __ex_flag;
  42        if (unlikely(__res != 0))
  43                fail_fn(count);
  44}
  45
  46static inline int
  47__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
  48{
  49        int __ex_flag, __res;
  50
  51        __asm__ (
  52
  53                "ldrex  %0, [%2]        \n\t"
  54                "sub    %0, %0, #1      \n\t"
  55                "strex  %1, %0, [%2]    "
  56
  57                : "=&r" (__res), "=&r" (__ex_flag)
  58                : "r" (&(count)->counter)
  59                : "cc","memory" );
  60
  61        __res |= __ex_flag;
  62        if (unlikely(__res != 0))
  63                __res = fail_fn(count);
  64        return __res;
  65}
  66
  67/*
  68 * Same trick is used for the unlock fast path. However the original value,
  69 * rather than the result, is used to test for success in order to have
  70 * better generated assembly.
  71 */
  72static inline void
  73__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
  74{
  75        int __ex_flag, __res, __orig;
  76
  77        __asm__ (
  78
  79                "ldrex  %0, [%3]        \n\t"
  80                "add    %1, %0, #1      \n\t"
  81                "strex  %2, %1, [%3]    "
  82
  83                : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
  84                : "r" (&(count)->counter)
  85                : "cc","memory" );
  86
  87        __orig |= __ex_flag;
  88        if (unlikely(__orig != 0))
  89                fail_fn(count);
  90}
  91
  92/*
  93 * If the unlock was done on a contended lock, or if the unlock simply fails
  94 * then the mutex remains locked.
  95 */
  96#define __mutex_slowpath_needs_to_unlock()      1
  97
  98/*
  99 * For __mutex_fastpath_trylock we use another construct which could be
 100 * described as a "single value cmpxchg".
 101 *
 102 * This provides the needed trylock semantics like cmpxchg would, but it is
 103 * lighter and less generic than a true cmpxchg implementation.
 104 */
 105static inline int
 106__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
 107{
 108        int __ex_flag, __res, __orig;
 109
 110        __asm__ (
 111
 112                "1: ldrex       %0, [%3]        \n\t"
 113                "subs           %1, %0, #1      \n\t"
 114                "strexeq        %2, %1, [%3]    \n\t"
 115                "movlt          %0, #0          \n\t"
 116                "cmpeq          %2, #0          \n\t"
 117                "bgt            1b              "
 118
 119                : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
 120                : "r" (&count->counter)
 121                : "cc", "memory" );
 122
 123        return __orig;
 124}
 125
 126#endif
 127#endif
 128