1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/mutex.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/spinlock.h>
24#include <linux/interrupt.h>
25#include <linux/debug_locks.h>
26
27
28
29
30
31#ifdef CONFIG_DEBUG_MUTEXES
32# include "mutex-debug.h"
33# include <asm-generic/mutex-null.h>
34#else
35# include "mutex.h"
36# include <asm/mutex.h>
37#endif
38
39
40
41
42
43
44
45
46
47
48void
49__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
50{
51 atomic_set(&lock->count, 1);
52 spin_lock_init(&lock->wait_lock);
53 INIT_LIST_HEAD(&lock->wait_list);
54 mutex_clear_owner(lock);
55
56 debug_mutex_init(lock, name, key);
57}
58
59EXPORT_SYMBOL(__mutex_init);
60
61#ifndef CONFIG_DEBUG_LOCK_ALLOC
62
63
64
65
66
67
68static __used noinline void __sched
69__mutex_lock_slowpath(atomic_t *lock_count);
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92void __sched mutex_lock(struct mutex *lock)
93{
94 might_sleep();
95
96
97
98
99 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
100 mutex_set_owner(lock);
101}
102
103EXPORT_SYMBOL(mutex_lock);
104#endif
105
106static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
107
108
109
110
111
112
113
114
115
116
117
118
119void __sched mutex_unlock(struct mutex *lock)
120{
121
122
123
124
125#ifndef CONFIG_DEBUG_MUTEXES
126
127
128
129
130
131 mutex_clear_owner(lock);
132#endif
133 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
134}
135
136EXPORT_SYMBOL(mutex_unlock);
137
138
139
140
141static inline int __sched
142__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
143 unsigned long ip)
144{
145 struct task_struct *task = current;
146 struct mutex_waiter waiter;
147 unsigned long flags;
148
149 preempt_disable();
150 mutex_acquire(&lock->dep_map, subclass, 0, ip);
151#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) && \
152 !defined(CONFIG_HAVE_DEFAULT_NO_SPIN_MUTEXES)
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171 for (;;) {
172 struct thread_info *owner;
173
174
175
176
177
178 owner = ACCESS_ONCE(lock->owner);
179 if (owner && !mutex_spin_on_owner(lock, owner))
180 break;
181
182 if (atomic_cmpxchg(&lock->count, 1, 0) == 1) {
183 lock_acquired(&lock->dep_map, ip);
184 mutex_set_owner(lock);
185 preempt_enable();
186 return 0;
187 }
188
189
190
191
192
193
194
195 if (!owner && (need_resched() || rt_task(task)))
196 break;
197
198
199
200
201
202
203
204 cpu_relax();
205 }
206#endif
207 spin_lock_mutex(&lock->wait_lock, flags);
208
209 debug_mutex_lock_common(lock, &waiter);
210 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
211
212
213 list_add_tail(&waiter.list, &lock->wait_list);
214 waiter.task = task;
215
216 if (atomic_xchg(&lock->count, -1) == 1)
217 goto done;
218
219 lock_contended(&lock->dep_map, ip);
220
221 for (;;) {
222
223
224
225
226
227
228
229
230
231 if (atomic_xchg(&lock->count, -1) == 1)
232 break;
233
234
235
236
237
238 if (unlikely(signal_pending_state(state, task))) {
239 mutex_remove_waiter(lock, &waiter,
240 task_thread_info(task));
241 mutex_release(&lock->dep_map, 1, ip);
242 spin_unlock_mutex(&lock->wait_lock, flags);
243
244 debug_mutex_free_waiter(&waiter);
245 preempt_enable();
246 return -EINTR;
247 }
248 __set_task_state(task, state);
249
250
251 spin_unlock_mutex(&lock->wait_lock, flags);
252 preempt_enable_no_resched();
253 schedule();
254 preempt_disable();
255 spin_lock_mutex(&lock->wait_lock, flags);
256 }
257
258done:
259 lock_acquired(&lock->dep_map, ip);
260
261 mutex_remove_waiter(lock, &waiter, current_thread_info());
262 mutex_set_owner(lock);
263
264
265 if (likely(list_empty(&lock->wait_list)))
266 atomic_set(&lock->count, 0);
267
268 spin_unlock_mutex(&lock->wait_lock, flags);
269
270 debug_mutex_free_waiter(&waiter);
271 preempt_enable();
272
273 return 0;
274}
275
276#ifdef CONFIG_DEBUG_LOCK_ALLOC
277void __sched
278mutex_lock_nested(struct mutex *lock, unsigned int subclass)
279{
280 might_sleep();
281 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, _RET_IP_);
282}
283
284EXPORT_SYMBOL_GPL(mutex_lock_nested);
285
286int __sched
287mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
288{
289 might_sleep();
290 return __mutex_lock_common(lock, TASK_KILLABLE, subclass, _RET_IP_);
291}
292EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
293
294int __sched
295mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
296{
297 might_sleep();
298 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
299 subclass, _RET_IP_);
300}
301
302EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
303#endif
304
305
306
307
308static inline void
309__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
310{
311 struct mutex *lock = container_of(lock_count, struct mutex, count);
312 unsigned long flags;
313
314 spin_lock_mutex(&lock->wait_lock, flags);
315 mutex_release(&lock->dep_map, nested, _RET_IP_);
316 debug_mutex_unlock(lock);
317
318
319
320
321
322
323 if (__mutex_slowpath_needs_to_unlock())
324 atomic_set(&lock->count, 1);
325
326 if (!list_empty(&lock->wait_list)) {
327
328 struct mutex_waiter *waiter =
329 list_entry(lock->wait_list.next,
330 struct mutex_waiter, list);
331
332 debug_mutex_wake_waiter(lock, waiter);
333
334 wake_up_process(waiter->task);
335 }
336
337 spin_unlock_mutex(&lock->wait_lock, flags);
338}
339
340
341
342
343static __used noinline void
344__mutex_unlock_slowpath(atomic_t *lock_count)
345{
346 __mutex_unlock_common_slowpath(lock_count, 1);
347}
348
349#ifndef CONFIG_DEBUG_LOCK_ALLOC
350
351
352
353
354static noinline int __sched
355__mutex_lock_killable_slowpath(atomic_t *lock_count);
356
357static noinline int __sched
358__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
359
360
361
362
363
364
365
366
367
368
369
370
371int __sched mutex_lock_interruptible(struct mutex *lock)
372{
373 int ret;
374
375 might_sleep();
376 ret = __mutex_fastpath_lock_retval
377 (&lock->count, __mutex_lock_interruptible_slowpath);
378 if (!ret)
379 mutex_set_owner(lock);
380
381 return ret;
382}
383
384EXPORT_SYMBOL(mutex_lock_interruptible);
385
386int __sched mutex_lock_killable(struct mutex *lock)
387{
388 int ret;
389
390 might_sleep();
391 ret = __mutex_fastpath_lock_retval
392 (&lock->count, __mutex_lock_killable_slowpath);
393 if (!ret)
394 mutex_set_owner(lock);
395
396 return ret;
397}
398EXPORT_SYMBOL(mutex_lock_killable);
399
400static __used noinline void __sched
401__mutex_lock_slowpath(atomic_t *lock_count)
402{
403 struct mutex *lock = container_of(lock_count, struct mutex, count);
404
405 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_);
406}
407
408static noinline int __sched
409__mutex_lock_killable_slowpath(atomic_t *lock_count)
410{
411 struct mutex *lock = container_of(lock_count, struct mutex, count);
412
413 return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_);
414}
415
416static noinline int __sched
417__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
418{
419 struct mutex *lock = container_of(lock_count, struct mutex, count);
420
421 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, _RET_IP_);
422}
423#endif
424
425
426
427
428
429static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
430{
431 struct mutex *lock = container_of(lock_count, struct mutex, count);
432 unsigned long flags;
433 int prev;
434
435 spin_lock_mutex(&lock->wait_lock, flags);
436
437 prev = atomic_xchg(&lock->count, -1);
438 if (likely(prev == 1)) {
439 mutex_set_owner(lock);
440 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
441 }
442
443
444 if (likely(list_empty(&lock->wait_list)))
445 atomic_set(&lock->count, 0);
446
447 spin_unlock_mutex(&lock->wait_lock, flags);
448
449 return prev == 1;
450}
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466int __sched mutex_trylock(struct mutex *lock)
467{
468 int ret;
469
470 ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
471 if (ret)
472 mutex_set_owner(lock);
473
474 return ret;
475}
476EXPORT_SYMBOL(mutex_trylock);
477
478
479
480
481
482
483
484
485int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
486{
487
488 if (atomic_add_unless(cnt, -1, 1))
489 return 0;
490
491 mutex_lock(lock);
492 if (!atomic_dec_and_test(cnt)) {
493
494 mutex_unlock(lock);
495 return 0;
496 }
497
498 return 1;
499}
500EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
501