1/* 2 * Pull in the generic implementation for the mutex fastpath. 3 * 4 * TODO: implement optimized primitives instead, or leave the generic 5 * implementation in place, or pick the atomic_xchg() based generic 6 * implementation. (see asm-generic/mutex-xchg.h for details) 7 * 8 * Copyright 2006-2009 Analog Devices Inc. 9 * 10 * Licensed under the GPL-2 or later. 11 */ 12 13#ifndef _ASM_MUTEX_H 14#define _ASM_MUTEX_H 15 16#ifndef CONFIG_SMP 17#include <asm-generic/mutex.h> 18#else 19 20static inline void 21__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) 22{ 23 if (unlikely(atomic_dec_return(count) < 0)) 24 fail_fn(count); 25 else 26 smp_mb(); 27} 28 29static inline int 30__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) 31{ 32 if (unlikely(atomic_dec_return(count) < 0)) 33 return fail_fn(count); 34 else { 35 smp_mb(); 36 return 0; 37 } 38} 39 40static inline void 41__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) 42{ 43 smp_mb(); 44 if (unlikely(atomic_inc_return(count) <= 0)) 45 fail_fn(count); 46} 47 48#define __mutex_slowpath_needs_to_unlock() 1 49 50static inline int 51__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) 52{ 53 /* 54 * We have two variants here. The cmpxchg based one is the best one 55 * because it never induce a false contention state. It is included 56 * here because architectures using the inc/dec algorithms over the 57 * xchg ones are much more likely to support cmpxchg natively. 58 * 59 * If not we fall back to the spinlock based variant - that is 60 * just as efficient (and simpler) as a 'destructive' probing of 61 * the mutex state would be. 62 */ 63#ifdef __HAVE_ARCH_CMPXCHG 64 if (likely(atomic_cmpxchg(count, 1, 0) == 1)) { 65 smp_mb(); 66 return 1; 67 } 68 return 0; 69#else 70 return fail_fn(count); 71#endif 72} 73 74#endif 75 76#endif 77