1
2
3
4
5
6
7
8
9
10#ifndef __LINUX_MUTEX_H
11#define __LINUX_MUTEX_H
12
13#include <linux/list.h>
14#include <linux/spinlock_types.h>
15#include <linux/linkage.h>
16#include <linux/lockdep.h>
17
18#include <asm/atomic.h>
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48struct mutex {
49
50 atomic_t count;
51 spinlock_t wait_lock;
52 struct list_head wait_list;
53#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
54 struct task_struct *owner;
55#endif
56#ifdef CONFIG_DEBUG_MUTEXES
57 const char *name;
58 void *magic;
59#endif
60#ifdef CONFIG_DEBUG_LOCK_ALLOC
61 struct lockdep_map dep_map;
62#endif
63};
64
65
66
67
68
69struct mutex_waiter {
70 struct list_head list;
71 struct task_struct *task;
72#ifdef CONFIG_DEBUG_MUTEXES
73 void *magic;
74#endif
75};
76
77#ifdef CONFIG_DEBUG_MUTEXES
78# include <linux/mutex-debug.h>
79#else
80# define __DEBUG_MUTEX_INITIALIZER(lockname)
81
82
83
84
85
86
87
88
89# define mutex_init(mutex) \
90do { \
91 static struct lock_class_key __key; \
92 \
93 __mutex_init((mutex), #mutex, &__key); \
94} while (0)
95# define mutex_destroy(mutex) do { } while (0)
96#endif
97
98#ifdef CONFIG_DEBUG_LOCK_ALLOC
99# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
100 , .dep_map = { .name = #lockname }
101#else
102# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
103#endif
104
105#define __MUTEX_INITIALIZER(lockname) \
106 { .count = ATOMIC_INIT(1) \
107 , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
108 , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
109 __DEBUG_MUTEX_INITIALIZER(lockname) \
110 __DEP_MAP_MUTEX_INITIALIZER(lockname) }
111
112#define DEFINE_MUTEX(mutexname) \
113 struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
114
115extern void __mutex_init(struct mutex *lock, const char *name,
116 struct lock_class_key *key);
117
118
119
120
121
122
123
124static inline int mutex_is_locked(struct mutex *lock)
125{
126 return atomic_read(&lock->count) != 1;
127}
128
129
130
131
132
133#ifdef CONFIG_DEBUG_LOCK_ALLOC
134extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
135extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
136extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
137 unsigned int subclass);
138extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
139 unsigned int subclass);
140
141#define mutex_lock(lock) mutex_lock_nested(lock, 0)
142#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
143#define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0)
144
145#define mutex_lock_nest_lock(lock, nest_lock) \
146do { \
147 typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
148 _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
149} while (0)
150
151#else
152extern void mutex_lock(struct mutex *lock);
153extern int __must_check mutex_lock_interruptible(struct mutex *lock);
154extern int __must_check mutex_lock_killable(struct mutex *lock);
155
156# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
157# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
158# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
159# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
160#endif
161
162
163
164
165
166
167
168extern int mutex_trylock(struct mutex *lock);
169extern void mutex_unlock(struct mutex *lock);
170extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
171
172#ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX
173#define arch_mutex_cpu_relax() cpu_relax()
174#endif
175
176#endif
177