1
2
3
4
5
6
7
8
9
10#ifndef __LINUX_MUTEX_H
11#define __LINUX_MUTEX_H
12
13#include <linux/list.h>
14#include <linux/spinlock_types.h>
15#include <linux/linkage.h>
16#include <linux/lockdep.h>
17
18#include <asm/atomic.h>
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48struct mutex {
49
50 atomic_t count;
51 spinlock_t wait_lock;
52 struct list_head wait_list;
53#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
54 struct thread_info *owner;
55#endif
56#ifdef CONFIG_DEBUG_MUTEXES
57 const char *name;
58 void *magic;
59#endif
60#ifdef CONFIG_DEBUG_LOCK_ALLOC
61 struct lockdep_map dep_map;
62#endif
63};
64
65
66
67
68
69struct mutex_waiter {
70 struct list_head list;
71 struct task_struct *task;
72#ifdef CONFIG_DEBUG_MUTEXES
73 void *magic;
74#endif
75};
76
77#ifdef CONFIG_DEBUG_MUTEXES
78# include <linux/mutex-debug.h>
79#else
80# define __DEBUG_MUTEX_INITIALIZER(lockname)
81
82
83
84
85
86
87
88
89# define mutex_init(mutex) \
90do { \
91 static struct lock_class_key __key; \
92 \
93 __mutex_init((mutex), #mutex, &__key); \
94} while (0)
95# define mutex_destroy(mutex) do { } while (0)
96#endif
97
98#ifdef CONFIG_DEBUG_LOCK_ALLOC
99# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
100 , .dep_map = { .name = #lockname }
101#else
102# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
103#endif
104
105#define __MUTEX_INITIALIZER(lockname) \
106 { .count = ATOMIC_INIT(1) \
107 , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
108 , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \
109 __DEBUG_MUTEX_INITIALIZER(lockname) \
110 __DEP_MAP_MUTEX_INITIALIZER(lockname) }
111
112#define DEFINE_MUTEX(mutexname) \
113 struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
114
115extern void __mutex_init(struct mutex *lock, const char *name,
116 struct lock_class_key *key);
117
118
119
120
121
122
123
124static inline int mutex_is_locked(struct mutex *lock)
125{
126 return atomic_read(&lock->count) != 1;
127}
128
129
130
131
132
133#ifdef CONFIG_DEBUG_LOCK_ALLOC
134extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
135extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock,
136 unsigned int subclass);
137extern int __must_check mutex_lock_killable_nested(struct mutex *lock,
138 unsigned int subclass);
139
140#define mutex_lock(lock) mutex_lock_nested(lock, 0)
141#define mutex_lock_interruptible(lock) mutex_lock_interruptible_nested(lock, 0)
142#define mutex_lock_killable(lock) mutex_lock_killable_nested(lock, 0)
143#else
144extern void mutex_lock(struct mutex *lock);
145extern int __must_check mutex_lock_interruptible(struct mutex *lock);
146extern int __must_check mutex_lock_killable(struct mutex *lock);
147
148# define mutex_lock_nested(lock, subclass) mutex_lock(lock)
149# define mutex_lock_interruptible_nested(lock, subclass) mutex_lock_interruptible(lock)
150# define mutex_lock_killable_nested(lock, subclass) mutex_lock_killable(lock)
151#endif
152
153
154
155
156
157
158
159extern int mutex_trylock(struct mutex *lock);
160extern void mutex_unlock(struct mutex *lock);
161extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
162
163#ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX
164#define arch_mutex_cpu_relax() cpu_relax()
165#endif
166
167#endif
168