1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/mutex.h>
21#include <linux/sched.h>
22#include <linux/module.h>
23#include <linux/spinlock.h>
24#include <linux/interrupt.h>
25#include <linux/debug_locks.h>
26
27
28
29
30
31#ifdef CONFIG_DEBUG_MUTEXES
32# include "mutex-debug.h"
33# include <asm-generic/mutex-null.h>
34#else
35# include "mutex.h"
36# include <asm/mutex.h>
37#endif
38
39void
40__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
41{
42 atomic_set(&lock->count, 1);
43 spin_lock_init(&lock->wait_lock);
44 INIT_LIST_HEAD(&lock->wait_list);
45 mutex_clear_owner(lock);
46
47 debug_mutex_init(lock, name, key);
48}
49
50EXPORT_SYMBOL(__mutex_init);
51
52#ifndef CONFIG_DEBUG_LOCK_ALLOC
53
54
55
56
57
58
59static __used noinline void __sched
60__mutex_lock_slowpath(atomic_t *lock_count);
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83void __sched mutex_lock(struct mutex *lock)
84{
85 might_sleep();
86
87
88
89
90 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
91 mutex_set_owner(lock);
92}
93
94EXPORT_SYMBOL(mutex_lock);
95#endif
96
97static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
98
99
100
101
102
103
104
105
106
107
108
109
110void __sched mutex_unlock(struct mutex *lock)
111{
112
113
114
115
116#ifndef CONFIG_DEBUG_MUTEXES
117
118
119
120
121
122 mutex_clear_owner(lock);
123#endif
124 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
125}
126
127EXPORT_SYMBOL(mutex_unlock);
128
129
130
131
132static inline int __sched
133__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
134 unsigned long ip)
135{
136 struct task_struct *task = current;
137 struct mutex_waiter waiter;
138 unsigned long flags;
139
140 preempt_disable();
141 mutex_acquire(&lock->dep_map, subclass, 0, ip);
142
143#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162 for (;;) {
163 struct thread_info *owner;
164
165
166
167
168
169 if (unlikely(current->lock_depth >= 0))
170 break;
171
172
173
174
175
176 owner = ACCESS_ONCE(lock->owner);
177 if (owner && !mutex_spin_on_owner(lock, owner))
178 break;
179
180 if (atomic_cmpxchg(&lock->count, 1, 0) == 1) {
181 lock_acquired(&lock->dep_map, ip);
182 mutex_set_owner(lock);
183 preempt_enable();
184 return 0;
185 }
186
187
188
189
190
191
192
193 if (!owner && (need_resched() || rt_task(task)))
194 break;
195
196
197
198
199
200
201
202 arch_mutex_cpu_relax();
203 }
204#endif
205 spin_lock_mutex(&lock->wait_lock, flags);
206
207 debug_mutex_lock_common(lock, &waiter);
208 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
209
210
211 list_add_tail(&waiter.list, &lock->wait_list);
212 waiter.task = task;
213
214 if (atomic_xchg(&lock->count, -1) == 1)
215 goto done;
216
217 lock_contended(&lock->dep_map, ip);
218
219 for (;;) {
220
221
222
223
224
225
226
227
228
229 if (atomic_xchg(&lock->count, -1) == 1)
230 break;
231
232
233
234
235
236 if (unlikely(signal_pending_state(state, task))) {
237 mutex_remove_waiter(lock, &waiter,
238 task_thread_info(task));
239 mutex_release(&lock->dep_map, 1, ip);
240 spin_unlock_mutex(&lock->wait_lock, flags);
241
242 debug_mutex_free_waiter(&waiter);
243 preempt_enable();
244 return -EINTR;
245 }
246 __set_task_state(task, state);
247
248
249 spin_unlock_mutex(&lock->wait_lock, flags);
250 preempt_enable_no_resched();
251 schedule();
252 preempt_disable();
253 spin_lock_mutex(&lock->wait_lock, flags);
254 }
255
256done:
257 lock_acquired(&lock->dep_map, ip);
258
259 mutex_remove_waiter(lock, &waiter, current_thread_info());
260 mutex_set_owner(lock);
261
262
263 if (likely(list_empty(&lock->wait_list)))
264 atomic_set(&lock->count, 0);
265
266 spin_unlock_mutex(&lock->wait_lock, flags);
267
268 debug_mutex_free_waiter(&waiter);
269 preempt_enable();
270
271 return 0;
272}
273
274#ifdef CONFIG_DEBUG_LOCK_ALLOC
275void __sched
276mutex_lock_nested(struct mutex *lock, unsigned int subclass)
277{
278 might_sleep();
279 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, _RET_IP_);
280}
281
282EXPORT_SYMBOL_GPL(mutex_lock_nested);
283
284int __sched
285mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
286{
287 might_sleep();
288 return __mutex_lock_common(lock, TASK_KILLABLE, subclass, _RET_IP_);
289}
290EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
291
292int __sched
293mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
294{
295 might_sleep();
296 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
297 subclass, _RET_IP_);
298}
299
300EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
301#endif
302
303
304
305
306static inline void
307__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
308{
309 struct mutex *lock = container_of(lock_count, struct mutex, count);
310 unsigned long flags;
311
312 spin_lock_mutex(&lock->wait_lock, flags);
313 mutex_release(&lock->dep_map, nested, _RET_IP_);
314 debug_mutex_unlock(lock);
315
316
317
318
319
320
321 if (__mutex_slowpath_needs_to_unlock())
322 atomic_set(&lock->count, 1);
323
324 if (!list_empty(&lock->wait_list)) {
325
326 struct mutex_waiter *waiter =
327 list_entry(lock->wait_list.next,
328 struct mutex_waiter, list);
329
330 debug_mutex_wake_waiter(lock, waiter);
331
332 wake_up_process(waiter->task);
333 }
334
335 spin_unlock_mutex(&lock->wait_lock, flags);
336}
337
338
339
340
341static __used noinline void
342__mutex_unlock_slowpath(atomic_t *lock_count)
343{
344 __mutex_unlock_common_slowpath(lock_count, 1);
345}
346
347#ifndef CONFIG_DEBUG_LOCK_ALLOC
348
349
350
351
352static noinline int __sched
353__mutex_lock_killable_slowpath(atomic_t *lock_count);
354
355static noinline int __sched
356__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
357
358
359
360
361
362
363
364
365
366
367
368
369int __sched mutex_lock_interruptible(struct mutex *lock)
370{
371 int ret;
372
373 might_sleep();
374 ret = __mutex_fastpath_lock_retval
375 (&lock->count, __mutex_lock_interruptible_slowpath);
376 if (!ret)
377 mutex_set_owner(lock);
378
379 return ret;
380}
381
382EXPORT_SYMBOL(mutex_lock_interruptible);
383
384int __sched mutex_lock_killable(struct mutex *lock)
385{
386 int ret;
387
388 might_sleep();
389 ret = __mutex_fastpath_lock_retval
390 (&lock->count, __mutex_lock_killable_slowpath);
391 if (!ret)
392 mutex_set_owner(lock);
393
394 return ret;
395}
396EXPORT_SYMBOL(mutex_lock_killable);
397
398static __used noinline void __sched
399__mutex_lock_slowpath(atomic_t *lock_count)
400{
401 struct mutex *lock = container_of(lock_count, struct mutex, count);
402
403 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_);
404}
405
406static noinline int __sched
407__mutex_lock_killable_slowpath(atomic_t *lock_count)
408{
409 struct mutex *lock = container_of(lock_count, struct mutex, count);
410
411 return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_);
412}
413
414static noinline int __sched
415__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
416{
417 struct mutex *lock = container_of(lock_count, struct mutex, count);
418
419 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, _RET_IP_);
420}
421#endif
422
423
424
425
426
427static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
428{
429 struct mutex *lock = container_of(lock_count, struct mutex, count);
430 unsigned long flags;
431 int prev;
432
433 spin_lock_mutex(&lock->wait_lock, flags);
434
435 prev = atomic_xchg(&lock->count, -1);
436 if (likely(prev == 1)) {
437 mutex_set_owner(lock);
438 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
439 }
440
441
442 if (likely(list_empty(&lock->wait_list)))
443 atomic_set(&lock->count, 0);
444
445 spin_unlock_mutex(&lock->wait_lock, flags);
446
447 return prev == 1;
448}
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464int __sched mutex_trylock(struct mutex *lock)
465{
466 int ret;
467
468 ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
469 if (ret)
470 mutex_set_owner(lock);
471
472 return ret;
473}
474EXPORT_SYMBOL(mutex_trylock);
475
476
477
478
479
480
481
482
483int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
484{
485
486 if (atomic_add_unless(cnt, -1, 1))
487 return 0;
488
489 mutex_lock(lock);
490 if (!atomic_dec_and_test(cnt)) {
491
492 mutex_unlock(lock);
493 return 0;
494 }
495
496 return 1;
497}
498EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
499