1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/mutex.h>
22#include <linux/ww_mutex.h>
23#include <linux/sched/signal.h>
24#include <linux/sched/rt.h>
25#include <linux/sched/wake_q.h>
26#include <linux/sched/debug.h>
27#include <linux/export.h>
28#include <linux/spinlock.h>
29#include <linux/interrupt.h>
30#include <linux/debug_locks.h>
31#include <linux/osq_lock.h>
32
33#ifdef CONFIG_DEBUG_MUTEXES
34# include "mutex-debug.h"
35#else
36# include "mutex.h"
37#endif
38
39void
40__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
41{
42 atomic_long_set(&lock->owner, 0);
43 spin_lock_init(&lock->wait_lock);
44 INIT_LIST_HEAD(&lock->wait_list);
45#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
46 osq_lock_init(&lock->osq);
47#endif
48
49 debug_mutex_init(lock, name, key);
50}
51EXPORT_SYMBOL(__mutex_init);
52
53
54
55
56
57
58
59
60
61
62#define MUTEX_FLAG_WAITERS 0x01
63#define MUTEX_FLAG_HANDOFF 0x02
64#define MUTEX_FLAG_PICKUP 0x04
65
66#define MUTEX_FLAGS 0x07
67
68static inline struct task_struct *__owner_task(unsigned long owner)
69{
70 return (struct task_struct *)(owner & ~MUTEX_FLAGS);
71}
72
73static inline unsigned long __owner_flags(unsigned long owner)
74{
75 return owner & MUTEX_FLAGS;
76}
77
78
79
80
81static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
82{
83 unsigned long owner, curr = (unsigned long)current;
84
85 owner = atomic_long_read(&lock->owner);
86 for (;;) {
87 unsigned long old, flags = __owner_flags(owner);
88 unsigned long task = owner & ~MUTEX_FLAGS;
89
90 if (task) {
91 if (likely(task != curr))
92 break;
93
94 if (likely(!(flags & MUTEX_FLAG_PICKUP)))
95 break;
96
97 flags &= ~MUTEX_FLAG_PICKUP;
98 } else {
99#ifdef CONFIG_DEBUG_MUTEXES
100 DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP);
101#endif
102 }
103
104
105
106
107
108
109 flags &= ~MUTEX_FLAG_HANDOFF;
110
111 old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
112 if (old == owner)
113 return NULL;
114
115 owner = old;
116 }
117
118 return __owner_task(owner);
119}
120
121
122
123
124static inline bool __mutex_trylock(struct mutex *lock)
125{
126 return !__mutex_trylock_or_owner(lock);
127}
128
129#ifndef CONFIG_DEBUG_LOCK_ALLOC
130
131
132
133
134
135
136
137
138
139
140static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
141{
142 unsigned long curr = (unsigned long)current;
143 unsigned long zero = 0UL;
144
145 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
146 return true;
147
148 return false;
149}
150
151static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
152{
153 unsigned long curr = (unsigned long)current;
154
155 if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
156 return true;
157
158 return false;
159}
160#endif
161
162static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
163{
164 atomic_long_or(flag, &lock->owner);
165}
166
167static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
168{
169 atomic_long_andnot(flag, &lock->owner);
170}
171
172static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
173{
174 return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
175}
176
177
178
179
180
181static void __sched
182__mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
183 struct list_head *list)
184{
185 debug_mutex_add_waiter(lock, waiter, current);
186
187 list_add_tail(&waiter->list, list);
188 if (__mutex_waiter_is_first(lock, waiter))
189 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
190}
191
192
193
194
195
196
197
198static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
199{
200 unsigned long owner = atomic_long_read(&lock->owner);
201
202 for (;;) {
203 unsigned long old, new;
204
205#ifdef CONFIG_DEBUG_MUTEXES
206 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
207 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
208#endif
209
210 new = (owner & MUTEX_FLAG_WAITERS);
211 new |= (unsigned long)task;
212 if (task)
213 new |= MUTEX_FLAG_PICKUP;
214
215 old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
216 if (old == owner)
217 break;
218
219 owner = old;
220 }
221}
222
223#ifndef CONFIG_DEBUG_LOCK_ALLOC
224
225
226
227
228
229
230static void __sched __mutex_lock_slowpath(struct mutex *lock);
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253void __sched mutex_lock(struct mutex *lock)
254{
255 might_sleep();
256
257 if (!__mutex_trylock_fast(lock))
258 __mutex_lock_slowpath(lock);
259}
260EXPORT_SYMBOL(mutex_lock);
261#endif
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279static __always_inline void
280ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
281{
282#ifdef CONFIG_DEBUG_MUTEXES
283
284
285
286
287
288
289 DEBUG_LOCKS_WARN_ON(ww->ctx);
290
291
292
293
294 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
295
296 if (ww_ctx->contending_lock) {
297
298
299
300
301 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
302
303
304
305
306
307 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
308 ww_ctx->contending_lock = NULL;
309 }
310
311
312
313
314 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
315#endif
316 ww_ctx->acquired++;
317 ww->ctx = ww_ctx;
318}
319
320
321
322
323
324
325static inline bool __sched
326__ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
327{
328
329 return (signed long)(a->stamp - b->stamp) > 0;
330}
331
332
333
334
335
336
337
338
339
340static bool __sched
341__ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter,
342 struct ww_acquire_ctx *ww_ctx)
343{
344 if (!ww_ctx->is_wait_die)
345 return false;
346
347 if (waiter->ww_ctx->acquired > 0 &&
348 __ww_ctx_stamp_after(waiter->ww_ctx, ww_ctx)) {
349 debug_mutex_wake_waiter(lock, waiter);
350 wake_up_process(waiter->task);
351 }
352
353 return true;
354}
355
356
357
358
359
360
361
362
363static bool __ww_mutex_wound(struct mutex *lock,
364 struct ww_acquire_ctx *ww_ctx,
365 struct ww_acquire_ctx *hold_ctx)
366{
367 struct task_struct *owner = __mutex_owner(lock);
368
369 lockdep_assert_held(&lock->wait_lock);
370
371
372
373
374
375
376 if (!hold_ctx)
377 return false;
378
379
380
381
382
383
384 if (!owner)
385 return false;
386
387 if (ww_ctx->acquired > 0 && __ww_ctx_stamp_after(hold_ctx, ww_ctx)) {
388 hold_ctx->wounded = 1;
389
390
391
392
393
394
395
396 if (owner != current)
397 wake_up_process(owner);
398
399 return true;
400 }
401
402 return false;
403}
404
405
406
407
408
409
410
411
412
413
414
415
416
417static void __sched
418__ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
419{
420 struct mutex_waiter *cur;
421
422 lockdep_assert_held(&lock->wait_lock);
423
424 list_for_each_entry(cur, &lock->wait_list, list) {
425 if (!cur->ww_ctx)
426 continue;
427
428 if (__ww_mutex_die(lock, cur, ww_ctx) ||
429 __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx))
430 break;
431 }
432}
433
434
435
436
437
438static __always_inline void
439ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
440{
441 ww_mutex_lock_acquired(lock, ctx);
442
443
444
445
446
447
448
449
450 smp_mb();
451
452
453
454
455
456
457
458
459
460
461 if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
462 return;
463
464
465
466
467
468 spin_lock(&lock->base.wait_lock);
469 __ww_mutex_check_waiters(&lock->base, ctx);
470 spin_unlock(&lock->base.wait_lock);
471}
472
473#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
474
475static inline
476bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
477 struct mutex_waiter *waiter)
478{
479 struct ww_mutex *ww;
480
481 ww = container_of(lock, struct ww_mutex, base);
482
483
484
485
486
487
488
489
490
491
492
493
494 if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
495 return false;
496
497
498
499
500
501
502
503
504 if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
505 return false;
506
507
508
509
510
511 if (waiter && !__mutex_waiter_is_first(lock, waiter))
512 return false;
513
514 return true;
515}
516
517
518
519
520
521
522
523static noinline
524bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
525 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
526{
527 bool ret = true;
528
529 rcu_read_lock();
530 while (__mutex_owner(lock) == owner) {
531
532
533
534
535
536
537 barrier();
538
539
540
541
542 if (!owner->on_cpu || need_resched() ||
543 vcpu_is_preempted(task_cpu(owner))) {
544 ret = false;
545 break;
546 }
547
548 if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
549 ret = false;
550 break;
551 }
552
553 cpu_relax();
554 }
555 rcu_read_unlock();
556
557 return ret;
558}
559
560
561
562
563static inline int mutex_can_spin_on_owner(struct mutex *lock)
564{
565 struct task_struct *owner;
566 int retval = 1;
567
568 if (need_resched())
569 return 0;
570
571 rcu_read_lock();
572 owner = __mutex_owner(lock);
573
574
575
576
577
578 if (owner)
579 retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
580 rcu_read_unlock();
581
582
583
584
585
586
587 return retval;
588}
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611static __always_inline bool
612mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
613 const bool use_ww_ctx, struct mutex_waiter *waiter)
614{
615 if (!waiter) {
616
617
618
619
620
621
622
623 if (!mutex_can_spin_on_owner(lock))
624 goto fail;
625
626
627
628
629
630
631 if (!osq_lock(&lock->osq))
632 goto fail;
633 }
634
635 for (;;) {
636 struct task_struct *owner;
637
638
639 owner = __mutex_trylock_or_owner(lock);
640 if (!owner)
641 break;
642
643
644
645
646
647 if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
648 goto fail_unlock;
649
650
651
652
653
654
655
656 cpu_relax();
657 }
658
659 if (!waiter)
660 osq_unlock(&lock->osq);
661
662 return true;
663
664
665fail_unlock:
666 if (!waiter)
667 osq_unlock(&lock->osq);
668
669fail:
670
671
672
673
674
675 if (need_resched()) {
676
677
678
679
680 __set_current_state(TASK_RUNNING);
681 schedule_preempt_disabled();
682 }
683
684 return false;
685}
686#else
687static __always_inline bool
688mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
689 const bool use_ww_ctx, struct mutex_waiter *waiter)
690{
691 return false;
692}
693#endif
694
695static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
696
697
698
699
700
701
702
703
704
705
706
707
708void __sched mutex_unlock(struct mutex *lock)
709{
710#ifndef CONFIG_DEBUG_LOCK_ALLOC
711 if (__mutex_unlock_fast(lock))
712 return;
713#endif
714 __mutex_unlock_slowpath(lock, _RET_IP_);
715}
716EXPORT_SYMBOL(mutex_unlock);
717
718
719
720
721
722
723
724
725
726
727
728
729void __sched ww_mutex_unlock(struct ww_mutex *lock)
730{
731
732
733
734
735 if (lock->ctx) {
736#ifdef CONFIG_DEBUG_MUTEXES
737 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
738#endif
739 if (lock->ctx->acquired > 0)
740 lock->ctx->acquired--;
741 lock->ctx = NULL;
742 }
743
744 mutex_unlock(&lock->base);
745}
746EXPORT_SYMBOL(ww_mutex_unlock);
747
748
749static __always_inline int __sched
750__ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
751{
752 if (ww_ctx->acquired > 0) {
753#ifdef CONFIG_DEBUG_MUTEXES
754 struct ww_mutex *ww;
755
756 ww = container_of(lock, struct ww_mutex, base);
757 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
758 ww_ctx->contending_lock = ww;
759#endif
760 return -EDEADLK;
761 }
762
763 return 0;
764}
765
766
767
768
769
770
771
772
773
774
775
776
777
778static inline int __sched
779__ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter,
780 struct ww_acquire_ctx *ctx)
781{
782 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
783 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
784 struct mutex_waiter *cur;
785
786 if (ctx->acquired == 0)
787 return 0;
788
789 if (!ctx->is_wait_die) {
790 if (ctx->wounded)
791 return __ww_mutex_kill(lock, ctx);
792
793 return 0;
794 }
795
796 if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
797 return __ww_mutex_kill(lock, ctx);
798
799
800
801
802
803 cur = waiter;
804 list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
805 if (!cur->ww_ctx)
806 continue;
807
808 return __ww_mutex_kill(lock, ctx);
809 }
810
811 return 0;
812}
813
814
815
816
817
818
819
820
821
822
823
824
825static inline int __sched
826__ww_mutex_add_waiter(struct mutex_waiter *waiter,
827 struct mutex *lock,
828 struct ww_acquire_ctx *ww_ctx)
829{
830 struct mutex_waiter *cur;
831 struct list_head *pos;
832 bool is_wait_die;
833
834 if (!ww_ctx) {
835 __mutex_add_waiter(lock, waiter, &lock->wait_list);
836 return 0;
837 }
838
839 is_wait_die = ww_ctx->is_wait_die;
840
841
842
843
844
845
846
847
848 pos = &lock->wait_list;
849 list_for_each_entry_reverse(cur, &lock->wait_list, list) {
850 if (!cur->ww_ctx)
851 continue;
852
853 if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) {
854
855
856
857
858
859 if (is_wait_die) {
860 int ret = __ww_mutex_kill(lock, ww_ctx);
861
862 if (ret)
863 return ret;
864 }
865
866 break;
867 }
868
869 pos = &cur->list;
870
871
872 __ww_mutex_die(lock, cur, ww_ctx);
873 }
874
875 __mutex_add_waiter(lock, waiter, pos);
876
877
878
879
880
881 if (!is_wait_die) {
882 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
883
884
885
886
887
888
889 smp_mb();
890 __ww_mutex_wound(lock, ww_ctx, ww->ctx);
891 }
892
893 return 0;
894}
895
896
897
898
899static __always_inline int __sched
900__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
901 struct lockdep_map *nest_lock, unsigned long ip,
902 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
903{
904 struct mutex_waiter waiter;
905 bool first = false;
906 struct ww_mutex *ww;
907 int ret;
908
909 might_sleep();
910
911#ifdef CONFIG_DEBUG_MUTEXES
912 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
913#endif
914
915 ww = container_of(lock, struct ww_mutex, base);
916 if (use_ww_ctx && ww_ctx) {
917 if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
918 return -EALREADY;
919
920
921
922
923
924
925 if (ww_ctx->acquired == 0)
926 ww_ctx->wounded = 0;
927 }
928
929 preempt_disable();
930 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
931
932 if (__mutex_trylock(lock) ||
933 mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) {
934
935 lock_acquired(&lock->dep_map, ip);
936 if (use_ww_ctx && ww_ctx)
937 ww_mutex_set_context_fastpath(ww, ww_ctx);
938 preempt_enable();
939 return 0;
940 }
941
942 spin_lock(&lock->wait_lock);
943
944
945
946 if (__mutex_trylock(lock)) {
947 if (use_ww_ctx && ww_ctx)
948 __ww_mutex_check_waiters(lock, ww_ctx);
949
950 goto skip_wait;
951 }
952
953 debug_mutex_lock_common(lock, &waiter);
954
955 lock_contended(&lock->dep_map, ip);
956
957 if (!use_ww_ctx) {
958
959 __mutex_add_waiter(lock, &waiter, &lock->wait_list);
960
961
962#ifdef CONFIG_DEBUG_MUTEXES
963 waiter.ww_ctx = MUTEX_POISON_WW_CTX;
964#endif
965 } else {
966
967
968
969
970 ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
971 if (ret)
972 goto err_early_kill;
973
974 waiter.ww_ctx = ww_ctx;
975 }
976
977 waiter.task = current;
978
979 set_current_state(state);
980 for (;;) {
981
982
983
984
985
986
987 if (__mutex_trylock(lock))
988 goto acquired;
989
990
991
992
993
994
995 if (signal_pending_state(state, current)) {
996 ret = -EINTR;
997 goto err;
998 }
999
1000 if (use_ww_ctx && ww_ctx) {
1001 ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
1002 if (ret)
1003 goto err;
1004 }
1005
1006 spin_unlock(&lock->wait_lock);
1007 schedule_preempt_disabled();
1008
1009
1010
1011
1012
1013 if ((use_ww_ctx && ww_ctx) || !first) {
1014 first = __mutex_waiter_is_first(lock, &waiter);
1015 if (first)
1016 __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
1017 }
1018
1019 set_current_state(state);
1020
1021
1022
1023
1024
1025 if (__mutex_trylock(lock) ||
1026 (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter)))
1027 break;
1028
1029 spin_lock(&lock->wait_lock);
1030 }
1031 spin_lock(&lock->wait_lock);
1032acquired:
1033 __set_current_state(TASK_RUNNING);
1034
1035 if (use_ww_ctx && ww_ctx) {
1036
1037
1038
1039
1040 if (!ww_ctx->is_wait_die &&
1041 !__mutex_waiter_is_first(lock, &waiter))
1042 __ww_mutex_check_waiters(lock, ww_ctx);
1043 }
1044
1045 mutex_remove_waiter(lock, &waiter, current);
1046 if (likely(list_empty(&lock->wait_list)))
1047 __mutex_clear_flag(lock, MUTEX_FLAGS);
1048
1049 debug_mutex_free_waiter(&waiter);
1050
1051skip_wait:
1052
1053 lock_acquired(&lock->dep_map, ip);
1054
1055 if (use_ww_ctx && ww_ctx)
1056 ww_mutex_lock_acquired(ww, ww_ctx);
1057
1058 spin_unlock(&lock->wait_lock);
1059 preempt_enable();
1060 return 0;
1061
1062err:
1063 __set_current_state(TASK_RUNNING);
1064 mutex_remove_waiter(lock, &waiter, current);
1065err_early_kill:
1066 spin_unlock(&lock->wait_lock);
1067 debug_mutex_free_waiter(&waiter);
1068 mutex_release(&lock->dep_map, 1, ip);
1069 preempt_enable();
1070 return ret;
1071}
1072
1073static int __sched
1074__mutex_lock(struct mutex *lock, long state, unsigned int subclass,
1075 struct lockdep_map *nest_lock, unsigned long ip)
1076{
1077 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
1078}
1079
1080static int __sched
1081__ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass,
1082 struct lockdep_map *nest_lock, unsigned long ip,
1083 struct ww_acquire_ctx *ww_ctx)
1084{
1085 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true);
1086}
1087
1088#ifdef CONFIG_DEBUG_LOCK_ALLOC
1089void __sched
1090mutex_lock_nested(struct mutex *lock, unsigned int subclass)
1091{
1092 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
1093}
1094
1095EXPORT_SYMBOL_GPL(mutex_lock_nested);
1096
1097void __sched
1098_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
1099{
1100 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
1101}
1102EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
1103
1104int __sched
1105mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
1106{
1107 return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
1108}
1109EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
1110
1111int __sched
1112mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
1113{
1114 return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
1115}
1116EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
1117
1118void __sched
1119mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
1120{
1121 int token;
1122
1123 might_sleep();
1124
1125 token = io_schedule_prepare();
1126 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
1127 subclass, NULL, _RET_IP_, NULL, 0);
1128 io_schedule_finish(token);
1129}
1130EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
1131
1132static inline int
1133ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1134{
1135#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
1136 unsigned tmp;
1137
1138 if (ctx->deadlock_inject_countdown-- == 0) {
1139 tmp = ctx->deadlock_inject_interval;
1140 if (tmp > UINT_MAX/4)
1141 tmp = UINT_MAX;
1142 else
1143 tmp = tmp*2 + tmp + tmp/2;
1144
1145 ctx->deadlock_inject_interval = tmp;
1146 ctx->deadlock_inject_countdown = tmp;
1147 ctx->contending_lock = lock;
1148
1149 ww_mutex_unlock(lock);
1150
1151 return -EDEADLK;
1152 }
1153#endif
1154
1155 return 0;
1156}
1157
1158int __sched
1159ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1160{
1161 int ret;
1162
1163 might_sleep();
1164 ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
1165 0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
1166 ctx);
1167 if (!ret && ctx && ctx->acquired > 1)
1168 return ww_mutex_deadlock_injection(lock, ctx);
1169
1170 return ret;
1171}
1172EXPORT_SYMBOL_GPL(ww_mutex_lock);
1173
1174int __sched
1175ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1176{
1177 int ret;
1178
1179 might_sleep();
1180 ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
1181 0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
1182 ctx);
1183
1184 if (!ret && ctx && ctx->acquired > 1)
1185 return ww_mutex_deadlock_injection(lock, ctx);
1186
1187 return ret;
1188}
1189EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
1190
1191#endif
1192
1193
1194
1195
1196static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
1197{
1198 struct task_struct *next = NULL;
1199 DEFINE_WAKE_Q(wake_q);
1200 unsigned long owner;
1201
1202 mutex_release(&lock->dep_map, 1, ip);
1203
1204
1205
1206
1207
1208
1209
1210
1211 owner = atomic_long_read(&lock->owner);
1212 for (;;) {
1213 unsigned long old;
1214
1215#ifdef CONFIG_DEBUG_MUTEXES
1216 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
1217 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
1218#endif
1219
1220 if (owner & MUTEX_FLAG_HANDOFF)
1221 break;
1222
1223 old = atomic_long_cmpxchg_release(&lock->owner, owner,
1224 __owner_flags(owner));
1225 if (old == owner) {
1226 if (owner & MUTEX_FLAG_WAITERS)
1227 break;
1228
1229 return;
1230 }
1231
1232 owner = old;
1233 }
1234
1235 spin_lock(&lock->wait_lock);
1236 debug_mutex_unlock(lock);
1237 if (!list_empty(&lock->wait_list)) {
1238
1239 struct mutex_waiter *waiter =
1240 list_first_entry(&lock->wait_list,
1241 struct mutex_waiter, list);
1242
1243 next = waiter->task;
1244
1245 debug_mutex_wake_waiter(lock, waiter);
1246 wake_q_add(&wake_q, next);
1247 }
1248
1249 if (owner & MUTEX_FLAG_HANDOFF)
1250 __mutex_handoff(lock, next);
1251
1252 spin_unlock(&lock->wait_lock);
1253
1254 wake_up_q(&wake_q);
1255}
1256
1257#ifndef CONFIG_DEBUG_LOCK_ALLOC
1258
1259
1260
1261
1262static noinline int __sched
1263__mutex_lock_killable_slowpath(struct mutex *lock);
1264
1265static noinline int __sched
1266__mutex_lock_interruptible_slowpath(struct mutex *lock);
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280int __sched mutex_lock_interruptible(struct mutex *lock)
1281{
1282 might_sleep();
1283
1284 if (__mutex_trylock_fast(lock))
1285 return 0;
1286
1287 return __mutex_lock_interruptible_slowpath(lock);
1288}
1289
1290EXPORT_SYMBOL(mutex_lock_interruptible);
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304int __sched mutex_lock_killable(struct mutex *lock)
1305{
1306 might_sleep();
1307
1308 if (__mutex_trylock_fast(lock))
1309 return 0;
1310
1311 return __mutex_lock_killable_slowpath(lock);
1312}
1313EXPORT_SYMBOL(mutex_lock_killable);
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325void __sched mutex_lock_io(struct mutex *lock)
1326{
1327 int token;
1328
1329 token = io_schedule_prepare();
1330 mutex_lock(lock);
1331 io_schedule_finish(token);
1332}
1333EXPORT_SYMBOL_GPL(mutex_lock_io);
1334
1335static noinline void __sched
1336__mutex_lock_slowpath(struct mutex *lock)
1337{
1338 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
1339}
1340
1341static noinline int __sched
1342__mutex_lock_killable_slowpath(struct mutex *lock)
1343{
1344 return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
1345}
1346
1347static noinline int __sched
1348__mutex_lock_interruptible_slowpath(struct mutex *lock)
1349{
1350 return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
1351}
1352
1353static noinline int __sched
1354__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1355{
1356 return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL,
1357 _RET_IP_, ctx);
1358}
1359
1360static noinline int __sched
1361__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
1362 struct ww_acquire_ctx *ctx)
1363{
1364 return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL,
1365 _RET_IP_, ctx);
1366}
1367
1368#endif
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384int __sched mutex_trylock(struct mutex *lock)
1385{
1386 bool locked;
1387
1388#ifdef CONFIG_DEBUG_MUTEXES
1389 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
1390#endif
1391
1392 locked = __mutex_trylock(lock);
1393 if (locked)
1394 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1395
1396 return locked;
1397}
1398EXPORT_SYMBOL(mutex_trylock);
1399
1400#ifndef CONFIG_DEBUG_LOCK_ALLOC
1401int __sched
1402ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1403{
1404 might_sleep();
1405
1406 if (__mutex_trylock_fast(&lock->base)) {
1407 if (ctx)
1408 ww_mutex_set_context_fastpath(lock, ctx);
1409 return 0;
1410 }
1411
1412 return __ww_mutex_lock_slowpath(lock, ctx);
1413}
1414EXPORT_SYMBOL(ww_mutex_lock);
1415
1416int __sched
1417ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1418{
1419 might_sleep();
1420
1421 if (__mutex_trylock_fast(&lock->base)) {
1422 if (ctx)
1423 ww_mutex_set_context_fastpath(lock, ctx);
1424 return 0;
1425 }
1426
1427 return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1428}
1429EXPORT_SYMBOL(ww_mutex_lock_interruptible);
1430
1431#endif
1432
1433
1434
1435
1436
1437
1438
1439
1440int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1441{
1442
1443 if (atomic_add_unless(cnt, -1, 1))
1444 return 0;
1445
1446 mutex_lock(lock);
1447 if (!atomic_dec_and_test(cnt)) {
1448
1449 mutex_unlock(lock);
1450 return 0;
1451 }
1452
1453 return 1;
1454}
1455EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
1456