1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/mutex.h>
16#include <linux/sched.h>
17#include <linux/module.h>
18#include <linux/spinlock.h>
19#include <linux/interrupt.h>
20#include <linux/debug_locks.h>
21
22
23
24
25
26#ifdef CONFIG_DEBUG_MUTEXES
27# include "mutex-debug.h"
28# include <asm-generic/mutex-null.h>
29#else
30# include "mutex.h"
31# include <asm/mutex.h>
32#endif
33
34
35
36
37
38
39
40
41
42void
43__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
44{
45 atomic_set(&lock->count, 1);
46 spin_lock_init(&lock->wait_lock);
47 INIT_LIST_HEAD(&lock->wait_list);
48
49 debug_mutex_init(lock, name, key);
50}
51
52EXPORT_SYMBOL(__mutex_init);
53
54#ifndef CONFIG_DEBUG_LOCK_ALLOC
55
56
57
58
59
60
61static void fastcall noinline __sched
62__mutex_lock_slowpath(atomic_t *lock_count);
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85void inline fastcall __sched mutex_lock(struct mutex *lock)
86{
87 might_sleep();
88
89
90
91
92 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
93}
94
95EXPORT_SYMBOL(mutex_lock);
96#endif
97
98static void fastcall noinline __sched
99__mutex_unlock_slowpath(atomic_t *lock_count);
100
101
102
103
104
105
106
107
108
109
110
111
112void fastcall __sched mutex_unlock(struct mutex *lock)
113{
114
115
116
117
118 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
119}
120
121EXPORT_SYMBOL(mutex_unlock);
122
123
124
125
126static inline int __sched
127__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
128 unsigned long ip)
129{
130 struct task_struct *task = current;
131 struct mutex_waiter waiter;
132 unsigned int old_val;
133 unsigned long flags;
134
135 spin_lock_mutex(&lock->wait_lock, flags);
136
137 debug_mutex_lock_common(lock, &waiter);
138 mutex_acquire(&lock->dep_map, subclass, 0, ip);
139 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
140
141
142 list_add_tail(&waiter.list, &lock->wait_list);
143 waiter.task = task;
144
145 old_val = atomic_xchg(&lock->count, -1);
146 if (old_val == 1)
147 goto done;
148
149 lock_contended(&lock->dep_map, ip);
150
151 for (;;) {
152
153
154
155
156
157
158
159
160
161 old_val = atomic_xchg(&lock->count, -1);
162 if (old_val == 1)
163 break;
164
165
166
167
168
169 if (unlikely(state == TASK_INTERRUPTIBLE &&
170 signal_pending(task))) {
171 mutex_remove_waiter(lock, &waiter, task_thread_info(task));
172 mutex_release(&lock->dep_map, 1, ip);
173 spin_unlock_mutex(&lock->wait_lock, flags);
174
175 debug_mutex_free_waiter(&waiter);
176 return -EINTR;
177 }
178 __set_task_state(task, state);
179
180
181 spin_unlock_mutex(&lock->wait_lock, flags);
182 schedule();
183 spin_lock_mutex(&lock->wait_lock, flags);
184 }
185
186done:
187 lock_acquired(&lock->dep_map);
188
189 mutex_remove_waiter(lock, &waiter, task_thread_info(task));
190 debug_mutex_set_owner(lock, task_thread_info(task));
191
192
193 if (likely(list_empty(&lock->wait_list)))
194 atomic_set(&lock->count, 0);
195
196 spin_unlock_mutex(&lock->wait_lock, flags);
197
198 debug_mutex_free_waiter(&waiter);
199
200 return 0;
201}
202
203#ifdef CONFIG_DEBUG_LOCK_ALLOC
204void __sched
205mutex_lock_nested(struct mutex *lock, unsigned int subclass)
206{
207 might_sleep();
208 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, _RET_IP_);
209}
210
211EXPORT_SYMBOL_GPL(mutex_lock_nested);
212
213int __sched
214mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
215{
216 might_sleep();
217 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, _RET_IP_);
218}
219
220EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
221#endif
222
223
224
225
226static fastcall inline void
227__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
228{
229 struct mutex *lock = container_of(lock_count, struct mutex, count);
230 unsigned long flags;
231
232 spin_lock_mutex(&lock->wait_lock, flags);
233 mutex_release(&lock->dep_map, nested, _RET_IP_);
234 debug_mutex_unlock(lock);
235
236
237
238
239
240
241 if (__mutex_slowpath_needs_to_unlock())
242 atomic_set(&lock->count, 1);
243
244 if (!list_empty(&lock->wait_list)) {
245
246 struct mutex_waiter *waiter =
247 list_entry(lock->wait_list.next,
248 struct mutex_waiter, list);
249
250 debug_mutex_wake_waiter(lock, waiter);
251
252 wake_up_process(waiter->task);
253 }
254
255 debug_mutex_clear_owner(lock);
256
257 spin_unlock_mutex(&lock->wait_lock, flags);
258}
259
260
261
262
263static fastcall noinline void
264__mutex_unlock_slowpath(atomic_t *lock_count)
265{
266 __mutex_unlock_common_slowpath(lock_count, 1);
267}
268
269#ifndef CONFIG_DEBUG_LOCK_ALLOC
270
271
272
273
274static int fastcall noinline __sched
275__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
276
277
278
279
280
281
282
283
284
285
286
287
288int fastcall __sched mutex_lock_interruptible(struct mutex *lock)
289{
290 might_sleep();
291 return __mutex_fastpath_lock_retval
292 (&lock->count, __mutex_lock_interruptible_slowpath);
293}
294
295EXPORT_SYMBOL(mutex_lock_interruptible);
296
297static void fastcall noinline __sched
298__mutex_lock_slowpath(atomic_t *lock_count)
299{
300 struct mutex *lock = container_of(lock_count, struct mutex, count);
301
302 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_);
303}
304
305static int fastcall noinline __sched
306__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
307{
308 struct mutex *lock = container_of(lock_count, struct mutex, count);
309
310 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, _RET_IP_);
311}
312#endif
313
314
315
316
317
318static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
319{
320 struct mutex *lock = container_of(lock_count, struct mutex, count);
321 unsigned long flags;
322 int prev;
323
324 spin_lock_mutex(&lock->wait_lock, flags);
325
326 prev = atomic_xchg(&lock->count, -1);
327 if (likely(prev == 1)) {
328 debug_mutex_set_owner(lock, current_thread_info());
329 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
330 }
331
332 if (likely(list_empty(&lock->wait_list)))
333 atomic_set(&lock->count, 0);
334
335 spin_unlock_mutex(&lock->wait_lock, flags);
336
337 return prev == 1;
338}
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354int fastcall __sched mutex_trylock(struct mutex *lock)
355{
356 return __mutex_fastpath_trylock(&lock->count,
357 __mutex_trylock_slowpath);
358}
359
360EXPORT_SYMBOL(mutex_trylock);
361