1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/mutex.h>
21#include <linux/ww_mutex.h>
22#include <linux/sched.h>
23#include <linux/sched/rt.h>
24#include <linux/export.h>
25#include <linux/spinlock.h>
26#include <linux/interrupt.h>
27#include <linux/debug_locks.h>
28
29
30
31
32
33#ifdef CONFIG_DEBUG_MUTEXES
34# include "mutex-debug.h"
35# include <asm-generic/mutex-null.h>
36#else
37# include "mutex.h"
38# include <asm/mutex.h>
39#endif
40
41
42
43
44
45#define MUTEX_SHOW_NO_WAITER(mutex) (atomic_read(&(mutex)->count) >= 0)
46
47void
48__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
49{
50 atomic_set(&lock->count, 1);
51 spin_lock_init(&lock->wait_lock);
52 INIT_LIST_HEAD(&lock->wait_list);
53 mutex_clear_owner(lock);
54#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
55 lock->spin_mlock = NULL;
56#endif
57
58 debug_mutex_init(lock, name, key);
59}
60
61EXPORT_SYMBOL(__mutex_init);
62
63#ifndef CONFIG_DEBUG_LOCK_ALLOC
64
65
66
67
68
69
70static __used noinline void __sched
71__mutex_lock_slowpath(atomic_t *lock_count);
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94void __sched mutex_lock(struct mutex *lock)
95{
96 might_sleep();
97
98
99
100
101 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
102 mutex_set_owner(lock);
103}
104
105EXPORT_SYMBOL(mutex_lock);
106#endif
107
108#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
109
110
111
112
113
114
115
116
117struct mspin_node {
118 struct mspin_node *next ;
119 int locked;
120};
121#define MLOCK(mutex) ((struct mspin_node **)&((mutex)->spin_mlock))
122
123static noinline
124void mspin_lock(struct mspin_node **lock, struct mspin_node *node)
125{
126 struct mspin_node *prev;
127
128
129 node->locked = 0;
130 node->next = NULL;
131
132 prev = xchg(lock, node);
133 if (likely(prev == NULL)) {
134
135 node->locked = 1;
136 return;
137 }
138 ACCESS_ONCE(prev->next) = node;
139 smp_wmb();
140
141 while (!ACCESS_ONCE(node->locked))
142 arch_mutex_cpu_relax();
143}
144
145static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node)
146{
147 struct mspin_node *next = ACCESS_ONCE(node->next);
148
149 if (likely(!next)) {
150
151
152
153 if (cmpxchg(lock, node, NULL) == node)
154 return;
155
156 while (!(next = ACCESS_ONCE(node->next)))
157 arch_mutex_cpu_relax();
158 }
159 ACCESS_ONCE(next->locked) = 1;
160 smp_wmb();
161}
162
163
164
165
166
167static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
168{
169 if (lock->owner != owner)
170 return false;
171
172
173
174
175
176
177
178 barrier();
179
180 return owner->on_cpu;
181}
182
183
184
185
186
187static noinline
188int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
189{
190 rcu_read_lock();
191 while (owner_running(lock, owner)) {
192 if (need_resched())
193 break;
194
195 arch_mutex_cpu_relax();
196 }
197 rcu_read_unlock();
198
199
200
201
202
203
204 return lock->owner == NULL;
205}
206
207
208
209
210static inline int mutex_can_spin_on_owner(struct mutex *lock)
211{
212 int retval = 1;
213
214 rcu_read_lock();
215 if (lock->owner)
216 retval = lock->owner->on_cpu;
217 rcu_read_unlock();
218
219
220
221
222 return retval;
223}
224#endif
225
226static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
227
228
229
230
231
232
233
234
235
236
237
238
239void __sched mutex_unlock(struct mutex *lock)
240{
241
242
243
244
245#ifndef CONFIG_DEBUG_MUTEXES
246
247
248
249
250
251 mutex_clear_owner(lock);
252#endif
253 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
254}
255
256EXPORT_SYMBOL(mutex_unlock);
257
258
259
260
261
262
263
264
265
266
267
268
269void __sched ww_mutex_unlock(struct ww_mutex *lock)
270{
271
272
273
274
275 if (lock->ctx) {
276#ifdef CONFIG_DEBUG_MUTEXES
277 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
278#endif
279 if (lock->ctx->acquired > 0)
280 lock->ctx->acquired--;
281 lock->ctx = NULL;
282 }
283
284#ifndef CONFIG_DEBUG_MUTEXES
285
286
287
288
289
290 mutex_clear_owner(&lock->base);
291#endif
292 __mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath);
293}
294EXPORT_SYMBOL(ww_mutex_unlock);
295
296static inline int __sched
297__mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
298{
299 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
300 struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx);
301
302 if (!hold_ctx)
303 return 0;
304
305 if (unlikely(ctx == hold_ctx))
306 return -EALREADY;
307
308 if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
309 (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
310#ifdef CONFIG_DEBUG_MUTEXES
311 DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
312 ctx->contending_lock = ww;
313#endif
314 return -EDEADLK;
315 }
316
317 return 0;
318}
319
320static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
321 struct ww_acquire_ctx *ww_ctx)
322{
323#ifdef CONFIG_DEBUG_MUTEXES
324
325
326
327
328
329
330 DEBUG_LOCKS_WARN_ON(ww->ctx);
331
332
333
334
335 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
336
337 if (ww_ctx->contending_lock) {
338
339
340
341
342 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
343
344
345
346
347
348 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
349 ww_ctx->contending_lock = NULL;
350 }
351
352
353
354
355 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
356#endif
357 ww_ctx->acquired++;
358}
359
360
361
362
363
364
365
366
367static __always_inline void
368ww_mutex_set_context_fastpath(struct ww_mutex *lock,
369 struct ww_acquire_ctx *ctx)
370{
371 unsigned long flags;
372 struct mutex_waiter *cur;
373
374 ww_mutex_lock_acquired(lock, ctx);
375
376 lock->ctx = ctx;
377
378
379
380
381
382
383
384
385 smp_mb();
386
387
388
389
390 if (likely(atomic_read(&lock->base.count) == 0))
391 return;
392
393
394
395
396
397 spin_lock_mutex(&lock->base.wait_lock, flags);
398 list_for_each_entry(cur, &lock->base.wait_list, list) {
399 debug_mutex_wake_waiter(&lock->base, cur);
400 wake_up_process(cur->task);
401 }
402 spin_unlock_mutex(&lock->base.wait_lock, flags);
403}
404
405
406
407
408static __always_inline int __sched
409__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
410 struct lockdep_map *nest_lock, unsigned long ip,
411 struct ww_acquire_ctx *ww_ctx)
412{
413 struct task_struct *task = current;
414 struct mutex_waiter waiter;
415 unsigned long flags;
416 int ret;
417
418 preempt_disable();
419 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
420
421#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444 if (!mutex_can_spin_on_owner(lock))
445 goto slowpath;
446
447 for (;;) {
448 struct task_struct *owner;
449 struct mspin_node node;
450
451 if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) {
452 struct ww_mutex *ww;
453
454 ww = container_of(lock, struct ww_mutex, base);
455
456
457
458
459
460
461
462
463 if (ACCESS_ONCE(ww->ctx))
464 break;
465 }
466
467
468
469
470
471 mspin_lock(MLOCK(lock), &node);
472 owner = ACCESS_ONCE(lock->owner);
473 if (owner && !mutex_spin_on_owner(lock, owner)) {
474 mspin_unlock(MLOCK(lock), &node);
475 break;
476 }
477
478 if ((atomic_read(&lock->count) == 1) &&
479 (atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
480 lock_acquired(&lock->dep_map, ip);
481 if (!__builtin_constant_p(ww_ctx == NULL)) {
482 struct ww_mutex *ww;
483 ww = container_of(lock, struct ww_mutex, base);
484
485 ww_mutex_set_context_fastpath(ww, ww_ctx);
486 }
487
488 mutex_set_owner(lock);
489 mspin_unlock(MLOCK(lock), &node);
490 preempt_enable();
491 return 0;
492 }
493 mspin_unlock(MLOCK(lock), &node);
494
495
496
497
498
499
500
501 if (!owner && (need_resched() || rt_task(task)))
502 break;
503
504
505
506
507
508
509
510 arch_mutex_cpu_relax();
511 }
512slowpath:
513#endif
514 spin_lock_mutex(&lock->wait_lock, flags);
515
516 debug_mutex_lock_common(lock, &waiter);
517 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
518
519
520 list_add_tail(&waiter.list, &lock->wait_list);
521 waiter.task = task;
522
523 if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, -1) == 1))
524 goto done;
525
526 lock_contended(&lock->dep_map, ip);
527
528 for (;;) {
529
530
531
532
533
534
535
536
537
538 if (MUTEX_SHOW_NO_WAITER(lock) &&
539 (atomic_xchg(&lock->count, -1) == 1))
540 break;
541
542
543
544
545
546 if (unlikely(signal_pending_state(state, task))) {
547 ret = -EINTR;
548 goto err;
549 }
550
551 if (!__builtin_constant_p(ww_ctx == NULL) && ww_ctx->acquired > 0) {
552 ret = __mutex_lock_check_stamp(lock, ww_ctx);
553 if (ret)
554 goto err;
555 }
556
557 __set_task_state(task, state);
558
559
560 spin_unlock_mutex(&lock->wait_lock, flags);
561 schedule_preempt_disabled();
562 spin_lock_mutex(&lock->wait_lock, flags);
563 }
564
565done:
566 lock_acquired(&lock->dep_map, ip);
567
568 mutex_remove_waiter(lock, &waiter, current_thread_info());
569 mutex_set_owner(lock);
570
571 if (!__builtin_constant_p(ww_ctx == NULL)) {
572 struct ww_mutex *ww = container_of(lock,
573 struct ww_mutex,
574 base);
575 struct mutex_waiter *cur;
576
577
578
579
580
581
582 ww_mutex_lock_acquired(ww, ww_ctx);
583 ww->ctx = ww_ctx;
584
585
586
587
588
589 list_for_each_entry(cur, &lock->wait_list, list) {
590 debug_mutex_wake_waiter(lock, cur);
591 wake_up_process(cur->task);
592 }
593 }
594
595
596 if (likely(list_empty(&lock->wait_list)))
597 atomic_set(&lock->count, 0);
598
599 spin_unlock_mutex(&lock->wait_lock, flags);
600
601 debug_mutex_free_waiter(&waiter);
602 preempt_enable();
603
604 return 0;
605
606err:
607 mutex_remove_waiter(lock, &waiter, task_thread_info(task));
608 spin_unlock_mutex(&lock->wait_lock, flags);
609 debug_mutex_free_waiter(&waiter);
610 mutex_release(&lock->dep_map, 1, ip);
611 preempt_enable();
612 return ret;
613}
614
615#ifdef CONFIG_DEBUG_LOCK_ALLOC
616void __sched
617mutex_lock_nested(struct mutex *lock, unsigned int subclass)
618{
619 might_sleep();
620 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
621 subclass, NULL, _RET_IP_, NULL);
622}
623
624EXPORT_SYMBOL_GPL(mutex_lock_nested);
625
626void __sched
627_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
628{
629 might_sleep();
630 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
631 0, nest, _RET_IP_, NULL);
632}
633
634EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
635
636int __sched
637mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
638{
639 might_sleep();
640 return __mutex_lock_common(lock, TASK_KILLABLE,
641 subclass, NULL, _RET_IP_, NULL);
642}
643EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
644
645int __sched
646mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
647{
648 might_sleep();
649 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
650 subclass, NULL, _RET_IP_, NULL);
651}
652
653EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
654
655static inline int
656ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
657{
658#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
659 unsigned tmp;
660
661 if (ctx->deadlock_inject_countdown-- == 0) {
662 tmp = ctx->deadlock_inject_interval;
663 if (tmp > UINT_MAX/4)
664 tmp = UINT_MAX;
665 else
666 tmp = tmp*2 + tmp + tmp/2;
667
668 ctx->deadlock_inject_interval = tmp;
669 ctx->deadlock_inject_countdown = tmp;
670 ctx->contending_lock = lock;
671
672 ww_mutex_unlock(lock);
673
674 return -EDEADLK;
675 }
676#endif
677
678 return 0;
679}
680
681int __sched
682__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
683{
684 int ret;
685
686 might_sleep();
687 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
688 0, &ctx->dep_map, _RET_IP_, ctx);
689 if (!ret && ctx->acquired > 1)
690 return ww_mutex_deadlock_injection(lock, ctx);
691
692 return ret;
693}
694EXPORT_SYMBOL_GPL(__ww_mutex_lock);
695
696int __sched
697__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
698{
699 int ret;
700
701 might_sleep();
702 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
703 0, &ctx->dep_map, _RET_IP_, ctx);
704
705 if (!ret && ctx->acquired > 1)
706 return ww_mutex_deadlock_injection(lock, ctx);
707
708 return ret;
709}
710EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
711
712#endif
713
714
715
716
717static inline void
718__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
719{
720 struct mutex *lock = container_of(lock_count, struct mutex, count);
721 unsigned long flags;
722
723 spin_lock_mutex(&lock->wait_lock, flags);
724 mutex_release(&lock->dep_map, nested, _RET_IP_);
725 debug_mutex_unlock(lock);
726
727
728
729
730
731
732 if (__mutex_slowpath_needs_to_unlock())
733 atomic_set(&lock->count, 1);
734
735 if (!list_empty(&lock->wait_list)) {
736
737 struct mutex_waiter *waiter =
738 list_entry(lock->wait_list.next,
739 struct mutex_waiter, list);
740
741 debug_mutex_wake_waiter(lock, waiter);
742
743 wake_up_process(waiter->task);
744 }
745
746 spin_unlock_mutex(&lock->wait_lock, flags);
747}
748
749
750
751
752static __used noinline void
753__mutex_unlock_slowpath(atomic_t *lock_count)
754{
755 __mutex_unlock_common_slowpath(lock_count, 1);
756}
757
758#ifndef CONFIG_DEBUG_LOCK_ALLOC
759
760
761
762
763static noinline int __sched
764__mutex_lock_killable_slowpath(struct mutex *lock);
765
766static noinline int __sched
767__mutex_lock_interruptible_slowpath(struct mutex *lock);
768
769
770
771
772
773
774
775
776
777
778
779
780int __sched mutex_lock_interruptible(struct mutex *lock)
781{
782 int ret;
783
784 might_sleep();
785 ret = __mutex_fastpath_lock_retval(&lock->count);
786 if (likely(!ret)) {
787 mutex_set_owner(lock);
788 return 0;
789 } else
790 return __mutex_lock_interruptible_slowpath(lock);
791}
792
793EXPORT_SYMBOL(mutex_lock_interruptible);
794
795int __sched mutex_lock_killable(struct mutex *lock)
796{
797 int ret;
798
799 might_sleep();
800 ret = __mutex_fastpath_lock_retval(&lock->count);
801 if (likely(!ret)) {
802 mutex_set_owner(lock);
803 return 0;
804 } else
805 return __mutex_lock_killable_slowpath(lock);
806}
807EXPORT_SYMBOL(mutex_lock_killable);
808
809static __used noinline void __sched
810__mutex_lock_slowpath(atomic_t *lock_count)
811{
812 struct mutex *lock = container_of(lock_count, struct mutex, count);
813
814 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
815 NULL, _RET_IP_, NULL);
816}
817
818static noinline int __sched
819__mutex_lock_killable_slowpath(struct mutex *lock)
820{
821 return __mutex_lock_common(lock, TASK_KILLABLE, 0,
822 NULL, _RET_IP_, NULL);
823}
824
825static noinline int __sched
826__mutex_lock_interruptible_slowpath(struct mutex *lock)
827{
828 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
829 NULL, _RET_IP_, NULL);
830}
831
832static noinline int __sched
833__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
834{
835 return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
836 NULL, _RET_IP_, ctx);
837}
838
839static noinline int __sched
840__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
841 struct ww_acquire_ctx *ctx)
842{
843 return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
844 NULL, _RET_IP_, ctx);
845}
846
847#endif
848
849
850
851
852
853static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
854{
855 struct mutex *lock = container_of(lock_count, struct mutex, count);
856 unsigned long flags;
857 int prev;
858
859 spin_lock_mutex(&lock->wait_lock, flags);
860
861 prev = atomic_xchg(&lock->count, -1);
862 if (likely(prev == 1)) {
863 mutex_set_owner(lock);
864 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
865 }
866
867
868 if (likely(list_empty(&lock->wait_list)))
869 atomic_set(&lock->count, 0);
870
871 spin_unlock_mutex(&lock->wait_lock, flags);
872
873 return prev == 1;
874}
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890int __sched mutex_trylock(struct mutex *lock)
891{
892 int ret;
893
894 ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
895 if (ret)
896 mutex_set_owner(lock);
897
898 return ret;
899}
900EXPORT_SYMBOL(mutex_trylock);
901
902#ifndef CONFIG_DEBUG_LOCK_ALLOC
903int __sched
904__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
905{
906 int ret;
907
908 might_sleep();
909
910 ret = __mutex_fastpath_lock_retval(&lock->base.count);
911
912 if (likely(!ret)) {
913 ww_mutex_set_context_fastpath(lock, ctx);
914 mutex_set_owner(&lock->base);
915 } else
916 ret = __ww_mutex_lock_slowpath(lock, ctx);
917 return ret;
918}
919EXPORT_SYMBOL(__ww_mutex_lock);
920
921int __sched
922__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
923{
924 int ret;
925
926 might_sleep();
927
928 ret = __mutex_fastpath_lock_retval(&lock->base.count);
929
930 if (likely(!ret)) {
931 ww_mutex_set_context_fastpath(lock, ctx);
932 mutex_set_owner(&lock->base);
933 } else
934 ret = __ww_mutex_lock_interruptible_slowpath(lock, ctx);
935 return ret;
936}
937EXPORT_SYMBOL(__ww_mutex_lock_interruptible);
938
939#endif
940
941
942
943
944
945
946
947
948int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
949{
950
951 if (atomic_add_unless(cnt, -1, 1))
952 return 0;
953
954 mutex_lock(lock);
955 if (!atomic_dec_and_test(cnt)) {
956
957 mutex_unlock(lock);
958 return 0;
959 }
960
961 return 1;
962}
963EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
964