1
2
3
4
5
6
7
8
9#ifndef _ASM_X86_MUTEX_64_H
10#define _ASM_X86_MUTEX_64_H
11
12
13
14
15
16
17
18
19#define __mutex_fastpath_lock(v, fail_fn) \
20do { \
21 unsigned long dummy; \
22 \
23 typecheck(atomic_t *, v); \
24 typecheck_fn(void (*)(atomic_t *), fail_fn); \
25 \
26 asm volatile(LOCK_PREFIX " decl (%%rdi)\n" \
27 " jns 1f \n" \
28 " call " #fail_fn "\n" \
29 "1:" \
30 : "=D" (dummy) \
31 : "D" (v) \
32 : "rax", "rsi", "rdx", "rcx", \
33 "r8", "r9", "r10", "r11", "memory"); \
34} while (0)
35
36
37
38
39
40
41
42
43
44static inline int __mutex_fastpath_lock_retval(atomic_t *count)
45{
46 if (unlikely(atomic_dec_return(count) < 0))
47 return -1;
48 else
49 return 0;
50}
51
52
53
54
55
56
57
58
59#define __mutex_fastpath_unlock(v, fail_fn) \
60do { \
61 unsigned long dummy; \
62 \
63 typecheck(atomic_t *, v); \
64 typecheck_fn(void (*)(atomic_t *), fail_fn); \
65 \
66 asm volatile(LOCK_PREFIX " incl (%%rdi)\n" \
67 " jg 1f\n" \
68 " call " #fail_fn "\n" \
69 "1:" \
70 : "=D" (dummy) \
71 : "D" (v) \
72 : "rax", "rsi", "rdx", "rcx", \
73 "r8", "r9", "r10", "r11", "memory"); \
74} while (0)
75
76#define __mutex_slowpath_needs_to_unlock() 1
77
78
79
80
81
82
83
84
85
86
87
88static inline int __mutex_fastpath_trylock(atomic_t *count,
89 int (*fail_fn)(atomic_t *))
90{
91 if (likely(atomic_cmpxchg(count, 1, 0) == 1))
92 return 1;
93 else
94 return 0;
95}
96
97#endif
98