linux/arch/sh/include/asm/mutex-llsc.h
<<
>>
Prefs
   1/*
   2 * arch/sh/include/asm/mutex-llsc.h
   3 *
   4 * SH-4A optimized mutex locking primitives
   5 *
   6 * Please look into asm-generic/mutex-xchg.h for a formal definition.
   7 */
   8#ifndef __ASM_SH_MUTEX_LLSC_H
   9#define __ASM_SH_MUTEX_LLSC_H
  10
  11/*
  12 * Attempting to lock a mutex on SH4A is done like in ARMv6+ architecure.
  13 * with a bastardized atomic decrement (it is not a reliable atomic decrement
  14 * but it satisfies the defined semantics for our purpose, while being
  15 * smaller and faster than a real atomic decrement or atomic swap.
  16 * The idea is to attempt  decrementing the lock value only once. If once
  17 * decremented it isn't zero, or if its store-back fails due to a dispute
  18 * on the exclusive store, we simply bail out immediately through the slow
  19 * path where the lock will be reattempted until it succeeds.
  20 */
  21static inline void
  22__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
  23{
  24        int __done, __res;
  25
  26        __asm__ __volatile__ (
  27                "movli.l        @%2, %0 \n"
  28                "add            #-1, %0 \n"
  29                "movco.l        %0, @%2 \n"
  30                "movt           %1      \n"
  31                : "=&z" (__res), "=&r" (__done)
  32                : "r" (&(count)->counter)
  33                : "t");
  34
  35        if (unlikely(!__done || __res != 0))
  36                fail_fn(count);
  37}
  38
  39static inline int
  40__mutex_fastpath_lock_retval(atomic_t *count)
  41{
  42        int __done, __res;
  43
  44        __asm__ __volatile__ (
  45                "movli.l        @%2, %0 \n"
  46                "add            #-1, %0 \n"
  47                "movco.l        %0, @%2 \n"
  48                "movt           %1      \n"
  49                : "=&z" (__res), "=&r" (__done)
  50                : "r" (&(count)->counter)
  51                : "t");
  52
  53        if (unlikely(!__done || __res != 0))
  54                __res = -1;
  55
  56        return __res;
  57}
  58
  59static inline void
  60__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
  61{
  62        int __done, __res;
  63
  64        __asm__ __volatile__ (
  65                "movli.l        @%2, %0 \n\t"
  66                "add            #1, %0  \n\t"
  67                "movco.l        %0, @%2 \n\t"
  68                "movt           %1      \n\t"
  69                : "=&z" (__res), "=&r" (__done)
  70                : "r" (&(count)->counter)
  71                : "t");
  72
  73        if (unlikely(!__done || __res <= 0))
  74                fail_fn(count);
  75}
  76
  77/*
  78 * If the unlock was done on a contended lock, or if the unlock simply fails
  79 * then the mutex remains locked.
  80 */
  81#define __mutex_slowpath_needs_to_unlock()      1
  82
  83/*
  84 * For __mutex_fastpath_trylock we do an atomic decrement and check the
  85 * result and put it in the __res variable.
  86 */
  87static inline int
  88__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
  89{
  90        int __res, __orig;
  91
  92        __asm__ __volatile__ (
  93                "1: movli.l     @%2, %0         \n\t"
  94                "dt             %0              \n\t"
  95                "movco.l        %0,@%2          \n\t"
  96                "bf             1b              \n\t"
  97                "cmp/eq         #0,%0           \n\t"
  98                "bt             2f              \n\t"
  99                "mov            #0, %1          \n\t"
 100                "bf             3f              \n\t"
 101                "2: mov         #1, %1          \n\t"
 102                "3:                             "
 103                : "=&z" (__orig), "=&r" (__res)
 104                : "r" (&count->counter)
 105                : "t");
 106
 107        return __res;
 108}
 109#endif /* __ASM_SH_MUTEX_LLSC_H */
 110