1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/mutex.h>
21#include <linux/sched.h>
22#include <linux/sched/rt.h>
23#include <linux/export.h>
24#include <linux/spinlock.h>
25#include <linux/interrupt.h>
26#include <linux/debug_locks.h>
27
28
29
30
31
32#ifdef CONFIG_DEBUG_MUTEXES
33# include "mutex-debug.h"
34# include <asm-generic/mutex-null.h>
35#else
36# include "mutex.h"
37# include <asm/mutex.h>
38#endif
39
40
41
42
43
44#define MUTEX_SHOW_NO_WAITER(mutex) (atomic_read(&(mutex)->count) >= 0)
45
46void
47__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
48{
49 atomic_set(&lock->count, 1);
50 spin_lock_init(&lock->wait_lock);
51 INIT_LIST_HEAD(&lock->wait_list);
52 mutex_clear_owner(lock);
53#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
54 lock->spin_mlock = NULL;
55#endif
56
57 debug_mutex_init(lock, name, key);
58}
59
60EXPORT_SYMBOL(__mutex_init);
61
62#ifndef CONFIG_DEBUG_LOCK_ALLOC
63
64
65
66
67
68
69static __used noinline void __sched
70__mutex_lock_slowpath(atomic_t *lock_count);
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93void __sched mutex_lock(struct mutex *lock)
94{
95 might_sleep();
96
97
98
99
100 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
101 mutex_set_owner(lock);
102}
103
104EXPORT_SYMBOL(mutex_lock);
105#endif
106
107#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
108
109
110
111
112
113
114
115
116struct mspin_node {
117 struct mspin_node *next ;
118 int locked;
119};
120#define MLOCK(mutex) ((struct mspin_node **)&((mutex)->spin_mlock))
121
122static noinline
123void mspin_lock(struct mspin_node **lock, struct mspin_node *node)
124{
125 struct mspin_node *prev;
126
127
128 node->locked = 0;
129 node->next = NULL;
130
131 prev = xchg(lock, node);
132 if (likely(prev == NULL)) {
133
134 node->locked = 1;
135 return;
136 }
137 ACCESS_ONCE(prev->next) = node;
138 smp_wmb();
139
140 while (!ACCESS_ONCE(node->locked))
141 arch_mutex_cpu_relax();
142}
143
144static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node)
145{
146 struct mspin_node *next = ACCESS_ONCE(node->next);
147
148 if (likely(!next)) {
149
150
151
152 if (cmpxchg(lock, node, NULL) == node)
153 return;
154
155 while (!(next = ACCESS_ONCE(node->next)))
156 arch_mutex_cpu_relax();
157 }
158 ACCESS_ONCE(next->locked) = 1;
159 smp_wmb();
160}
161
162
163
164
165
166static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
167{
168 if (lock->owner != owner)
169 return false;
170
171
172
173
174
175
176
177 barrier();
178
179 return owner->on_cpu;
180}
181
182
183
184
185
186static noinline
187int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
188{
189 rcu_read_lock();
190 while (owner_running(lock, owner)) {
191 if (need_resched())
192 break;
193
194 arch_mutex_cpu_relax();
195 }
196 rcu_read_unlock();
197
198
199
200
201
202
203 return lock->owner == NULL;
204}
205
206
207
208
209static inline int mutex_can_spin_on_owner(struct mutex *lock)
210{
211 int retval = 1;
212
213 rcu_read_lock();
214 if (lock->owner)
215 retval = lock->owner->on_cpu;
216 rcu_read_unlock();
217
218
219
220
221 return retval;
222}
223#endif
224
225static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
226
227
228
229
230
231
232
233
234
235
236
237
238void __sched mutex_unlock(struct mutex *lock)
239{
240
241
242
243
244#ifndef CONFIG_DEBUG_MUTEXES
245
246
247
248
249
250 mutex_clear_owner(lock);
251#endif
252 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
253}
254
255EXPORT_SYMBOL(mutex_unlock);
256
257
258
259
260static inline int __sched
261__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
262 struct lockdep_map *nest_lock, unsigned long ip)
263{
264 struct task_struct *task = current;
265 struct mutex_waiter waiter;
266 unsigned long flags;
267
268 preempt_disable();
269 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
270
271#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294 if (!mutex_can_spin_on_owner(lock))
295 goto slowpath;
296
297 for (;;) {
298 struct task_struct *owner;
299 struct mspin_node node;
300
301
302
303
304
305 mspin_lock(MLOCK(lock), &node);
306 owner = ACCESS_ONCE(lock->owner);
307 if (owner && !mutex_spin_on_owner(lock, owner)) {
308 mspin_unlock(MLOCK(lock), &node);
309 break;
310 }
311
312 if ((atomic_read(&lock->count) == 1) &&
313 (atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
314 lock_acquired(&lock->dep_map, ip);
315 mutex_set_owner(lock);
316 mspin_unlock(MLOCK(lock), &node);
317 preempt_enable();
318 return 0;
319 }
320 mspin_unlock(MLOCK(lock), &node);
321
322
323
324
325
326
327
328 if (!owner && (need_resched() || rt_task(task)))
329 break;
330
331
332
333
334
335
336
337 arch_mutex_cpu_relax();
338 }
339slowpath:
340#endif
341 spin_lock_mutex(&lock->wait_lock, flags);
342
343 debug_mutex_lock_common(lock, &waiter);
344 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
345
346
347 list_add_tail(&waiter.list, &lock->wait_list);
348 waiter.task = task;
349
350 if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, -1) == 1))
351 goto done;
352
353 lock_contended(&lock->dep_map, ip);
354
355 for (;;) {
356
357
358
359
360
361
362
363
364
365 if (MUTEX_SHOW_NO_WAITER(lock) &&
366 (atomic_xchg(&lock->count, -1) == 1))
367 break;
368
369
370
371
372
373 if (unlikely(signal_pending_state(state, task))) {
374 mutex_remove_waiter(lock, &waiter,
375 task_thread_info(task));
376 mutex_release(&lock->dep_map, 1, ip);
377 spin_unlock_mutex(&lock->wait_lock, flags);
378
379 debug_mutex_free_waiter(&waiter);
380 preempt_enable();
381 return -EINTR;
382 }
383 __set_task_state(task, state);
384
385
386 spin_unlock_mutex(&lock->wait_lock, flags);
387 schedule_preempt_disabled();
388 spin_lock_mutex(&lock->wait_lock, flags);
389 }
390
391done:
392 lock_acquired(&lock->dep_map, ip);
393
394 mutex_remove_waiter(lock, &waiter, current_thread_info());
395 mutex_set_owner(lock);
396
397
398 if (likely(list_empty(&lock->wait_list)))
399 atomic_set(&lock->count, 0);
400
401 spin_unlock_mutex(&lock->wait_lock, flags);
402
403 debug_mutex_free_waiter(&waiter);
404 preempt_enable();
405
406 return 0;
407}
408
409#ifdef CONFIG_DEBUG_LOCK_ALLOC
410void __sched
411mutex_lock_nested(struct mutex *lock, unsigned int subclass)
412{
413 might_sleep();
414 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
415}
416
417EXPORT_SYMBOL_GPL(mutex_lock_nested);
418
419void __sched
420_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
421{
422 might_sleep();
423 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
424}
425
426EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
427
428int __sched
429mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
430{
431 might_sleep();
432 return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
433}
434EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
435
436int __sched
437mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
438{
439 might_sleep();
440 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
441 subclass, NULL, _RET_IP_);
442}
443
444EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
445#endif
446
447
448
449
450static inline void
451__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
452{
453 struct mutex *lock = container_of(lock_count, struct mutex, count);
454 unsigned long flags;
455
456 spin_lock_mutex(&lock->wait_lock, flags);
457 mutex_release(&lock->dep_map, nested, _RET_IP_);
458 debug_mutex_unlock(lock);
459
460
461
462
463
464
465 if (__mutex_slowpath_needs_to_unlock())
466 atomic_set(&lock->count, 1);
467
468 if (!list_empty(&lock->wait_list)) {
469
470 struct mutex_waiter *waiter =
471 list_entry(lock->wait_list.next,
472 struct mutex_waiter, list);
473
474 debug_mutex_wake_waiter(lock, waiter);
475
476 wake_up_process(waiter->task);
477 }
478
479 spin_unlock_mutex(&lock->wait_lock, flags);
480}
481
482
483
484
485static __used noinline void
486__mutex_unlock_slowpath(atomic_t *lock_count)
487{
488 __mutex_unlock_common_slowpath(lock_count, 1);
489}
490
491#ifndef CONFIG_DEBUG_LOCK_ALLOC
492
493
494
495
496static noinline int __sched
497__mutex_lock_killable_slowpath(atomic_t *lock_count);
498
499static noinline int __sched
500__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
501
502
503
504
505
506
507
508
509
510
511
512
513int __sched mutex_lock_interruptible(struct mutex *lock)
514{
515 int ret;
516
517 might_sleep();
518 ret = __mutex_fastpath_lock_retval
519 (&lock->count, __mutex_lock_interruptible_slowpath);
520 if (!ret)
521 mutex_set_owner(lock);
522
523 return ret;
524}
525
526EXPORT_SYMBOL(mutex_lock_interruptible);
527
528int __sched mutex_lock_killable(struct mutex *lock)
529{
530 int ret;
531
532 might_sleep();
533 ret = __mutex_fastpath_lock_retval
534 (&lock->count, __mutex_lock_killable_slowpath);
535 if (!ret)
536 mutex_set_owner(lock);
537
538 return ret;
539}
540EXPORT_SYMBOL(mutex_lock_killable);
541
542static __used noinline void __sched
543__mutex_lock_slowpath(atomic_t *lock_count)
544{
545 struct mutex *lock = container_of(lock_count, struct mutex, count);
546
547 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
548}
549
550static noinline int __sched
551__mutex_lock_killable_slowpath(atomic_t *lock_count)
552{
553 struct mutex *lock = container_of(lock_count, struct mutex, count);
554
555 return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
556}
557
558static noinline int __sched
559__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
560{
561 struct mutex *lock = container_of(lock_count, struct mutex, count);
562
563 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
564}
565#endif
566
567
568
569
570
571static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
572{
573 struct mutex *lock = container_of(lock_count, struct mutex, count);
574 unsigned long flags;
575 int prev;
576
577 spin_lock_mutex(&lock->wait_lock, flags);
578
579 prev = atomic_xchg(&lock->count, -1);
580 if (likely(prev == 1)) {
581 mutex_set_owner(lock);
582 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
583 }
584
585
586 if (likely(list_empty(&lock->wait_list)))
587 atomic_set(&lock->count, 0);
588
589 spin_unlock_mutex(&lock->wait_lock, flags);
590
591 return prev == 1;
592}
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608int __sched mutex_trylock(struct mutex *lock)
609{
610 int ret;
611
612 ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
613 if (ret)
614 mutex_set_owner(lock);
615
616 return ret;
617}
618EXPORT_SYMBOL(mutex_trylock);
619
620
621
622
623
624
625
626
627int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
628{
629
630 if (atomic_add_unless(cnt, -1, 1))
631 return 0;
632
633 mutex_lock(lock);
634 if (!atomic_dec_and_test(cnt)) {
635
636 mutex_unlock(lock);
637 return 0;
638 }
639
640 return 1;
641}
642EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
643