1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/mutex.h>
21#include <linux/ww_mutex.h>
22#include <linux/sched/signal.h>
23#include <linux/sched/rt.h>
24#include <linux/sched/wake_q.h>
25#include <linux/sched/debug.h>
26#include <linux/export.h>
27#include <linux/spinlock.h>
28#include <linux/interrupt.h>
29#include <linux/debug_locks.h>
30#include <linux/osq_lock.h>
31
32#ifdef CONFIG_DEBUG_MUTEXES
33# include "mutex-debug.h"
34#else
35# include "mutex.h"
36#endif
37
38void
39__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
40{
41 atomic_long_set(&lock->owner, 0);
42 spin_lock_init(&lock->wait_lock);
43 INIT_LIST_HEAD(&lock->wait_list);
44#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
45 osq_lock_init(&lock->osq);
46#endif
47
48 debug_mutex_init(lock, name, key);
49}
50EXPORT_SYMBOL(__mutex_init);
51
52
53
54
55
56
57
58
59
60
61#define MUTEX_FLAG_WAITERS 0x01
62#define MUTEX_FLAG_HANDOFF 0x02
63#define MUTEX_FLAG_PICKUP 0x04
64
65#define MUTEX_FLAGS 0x07
66
67static inline struct task_struct *__owner_task(unsigned long owner)
68{
69 return (struct task_struct *)(owner & ~MUTEX_FLAGS);
70}
71
72static inline unsigned long __owner_flags(unsigned long owner)
73{
74 return owner & MUTEX_FLAGS;
75}
76
77
78
79
80static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
81{
82 unsigned long owner, curr = (unsigned long)current;
83
84 owner = atomic_long_read(&lock->owner);
85 for (;;) {
86 unsigned long old, flags = __owner_flags(owner);
87 unsigned long task = owner & ~MUTEX_FLAGS;
88
89 if (task) {
90 if (likely(task != curr))
91 break;
92
93 if (likely(!(flags & MUTEX_FLAG_PICKUP)))
94 break;
95
96 flags &= ~MUTEX_FLAG_PICKUP;
97 } else {
98#ifdef CONFIG_DEBUG_MUTEXES
99 DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP);
100#endif
101 }
102
103
104
105
106
107
108 flags &= ~MUTEX_FLAG_HANDOFF;
109
110 old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
111 if (old == owner)
112 return NULL;
113
114 owner = old;
115 }
116
117 return __owner_task(owner);
118}
119
120
121
122
123static inline bool __mutex_trylock(struct mutex *lock)
124{
125 return !__mutex_trylock_or_owner(lock);
126}
127
128#ifndef CONFIG_DEBUG_LOCK_ALLOC
129
130
131
132
133
134
135
136
137
138
139static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
140{
141 unsigned long curr = (unsigned long)current;
142 unsigned long zero = 0UL;
143
144 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
145 return true;
146
147 return false;
148}
149
150static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
151{
152 unsigned long curr = (unsigned long)current;
153
154 if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
155 return true;
156
157 return false;
158}
159#endif
160
161static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
162{
163 atomic_long_or(flag, &lock->owner);
164}
165
166static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
167{
168 atomic_long_andnot(flag, &lock->owner);
169}
170
171static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
172{
173 return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
174}
175
176
177
178
179
180static void __sched
181__mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
182 struct list_head *list)
183{
184 debug_mutex_add_waiter(lock, waiter, current);
185
186 list_add_tail(&waiter->list, list);
187 if (__mutex_waiter_is_first(lock, waiter))
188 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
189}
190
191
192
193
194
195
196
197static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
198{
199 unsigned long owner = atomic_long_read(&lock->owner);
200
201 for (;;) {
202 unsigned long old, new;
203
204#ifdef CONFIG_DEBUG_MUTEXES
205 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
206 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
207#endif
208
209 new = (owner & MUTEX_FLAG_WAITERS);
210 new |= (unsigned long)task;
211 if (task)
212 new |= MUTEX_FLAG_PICKUP;
213
214 old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
215 if (old == owner)
216 break;
217
218 owner = old;
219 }
220}
221
222#ifndef CONFIG_DEBUG_LOCK_ALLOC
223
224
225
226
227
228
229static void __sched __mutex_lock_slowpath(struct mutex *lock);
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252void __sched mutex_lock(struct mutex *lock)
253{
254 might_sleep();
255
256 if (!__mutex_trylock_fast(lock))
257 __mutex_lock_slowpath(lock);
258}
259EXPORT_SYMBOL(mutex_lock);
260#endif
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278static __always_inline void
279ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
280{
281#ifdef CONFIG_DEBUG_MUTEXES
282
283
284
285
286
287
288 DEBUG_LOCKS_WARN_ON(ww->ctx);
289
290
291
292
293 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
294
295 if (ww_ctx->contending_lock) {
296
297
298
299
300 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
301
302
303
304
305
306 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
307 ww_ctx->contending_lock = NULL;
308 }
309
310
311
312
313 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
314#endif
315 ww_ctx->acquired++;
316 ww->ctx = ww_ctx;
317}
318
319
320
321
322
323
324static inline bool __sched
325__ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
326{
327
328 return (signed long)(a->stamp - b->stamp) > 0;
329}
330
331
332
333
334
335
336
337
338
339static bool __sched
340__ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter,
341 struct ww_acquire_ctx *ww_ctx)
342{
343 if (!ww_ctx->is_wait_die)
344 return false;
345
346 if (waiter->ww_ctx->acquired > 0 &&
347 __ww_ctx_stamp_after(waiter->ww_ctx, ww_ctx)) {
348 debug_mutex_wake_waiter(lock, waiter);
349 wake_up_process(waiter->task);
350 }
351
352 return true;
353}
354
355
356
357
358
359
360
361
362static bool __ww_mutex_wound(struct mutex *lock,
363 struct ww_acquire_ctx *ww_ctx,
364 struct ww_acquire_ctx *hold_ctx)
365{
366 struct task_struct *owner = __mutex_owner(lock);
367
368 lockdep_assert_held(&lock->wait_lock);
369
370
371
372
373
374
375 if (!hold_ctx)
376 return false;
377
378
379
380
381
382
383 if (!owner)
384 return false;
385
386 if (ww_ctx->acquired > 0 && __ww_ctx_stamp_after(hold_ctx, ww_ctx)) {
387 hold_ctx->wounded = 1;
388
389
390
391
392
393
394
395 if (owner != current)
396 wake_up_process(owner);
397
398 return true;
399 }
400
401 return false;
402}
403
404
405
406
407
408
409
410
411
412
413
414
415
416static void __sched
417__ww_mutex_check_waiters(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
418{
419 struct mutex_waiter *cur;
420
421 lockdep_assert_held(&lock->wait_lock);
422
423 list_for_each_entry(cur, &lock->wait_list, list) {
424 if (!cur->ww_ctx)
425 continue;
426
427 if (__ww_mutex_die(lock, cur, ww_ctx) ||
428 __ww_mutex_wound(lock, cur->ww_ctx, ww_ctx))
429 break;
430 }
431}
432
433
434
435
436
437static __always_inline void
438ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
439{
440 ww_mutex_lock_acquired(lock, ctx);
441
442
443
444
445
446
447
448
449 smp_mb();
450
451
452
453
454
455
456
457
458
459
460 if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
461 return;
462
463
464
465
466
467 spin_lock(&lock->base.wait_lock);
468 __ww_mutex_check_waiters(&lock->base, ctx);
469 spin_unlock(&lock->base.wait_lock);
470}
471
472#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
473
474static inline
475bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
476 struct mutex_waiter *waiter)
477{
478 struct ww_mutex *ww;
479
480 ww = container_of(lock, struct ww_mutex, base);
481
482
483
484
485
486
487
488
489
490
491
492
493 if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
494 return false;
495
496
497
498
499
500
501
502
503 if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
504 return false;
505
506
507
508
509
510 if (waiter && !__mutex_waiter_is_first(lock, waiter))
511 return false;
512
513 return true;
514}
515
516
517
518
519
520
521
522static noinline
523bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
524 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
525{
526 bool ret = true;
527
528 rcu_read_lock();
529 while (__mutex_owner(lock) == owner) {
530
531
532
533
534
535
536 barrier();
537
538
539
540
541 if (!owner->on_cpu || need_resched() ||
542 vcpu_is_preempted(task_cpu(owner))) {
543 ret = false;
544 break;
545 }
546
547 if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
548 ret = false;
549 break;
550 }
551
552 cpu_relax();
553 }
554 rcu_read_unlock();
555
556 return ret;
557}
558
559
560
561
562static inline int mutex_can_spin_on_owner(struct mutex *lock)
563{
564 struct task_struct *owner;
565 int retval = 1;
566
567 if (need_resched())
568 return 0;
569
570 rcu_read_lock();
571 owner = __mutex_owner(lock);
572
573
574
575
576
577 if (owner)
578 retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
579 rcu_read_unlock();
580
581
582
583
584
585
586 return retval;
587}
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610static __always_inline bool
611mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
612 const bool use_ww_ctx, struct mutex_waiter *waiter)
613{
614 if (!waiter) {
615
616
617
618
619
620
621
622 if (!mutex_can_spin_on_owner(lock))
623 goto fail;
624
625
626
627
628
629
630 if (!osq_lock(&lock->osq))
631 goto fail;
632 }
633
634 for (;;) {
635 struct task_struct *owner;
636
637
638 owner = __mutex_trylock_or_owner(lock);
639 if (!owner)
640 break;
641
642
643
644
645
646 if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
647 goto fail_unlock;
648
649
650
651
652
653
654
655 cpu_relax();
656 }
657
658 if (!waiter)
659 osq_unlock(&lock->osq);
660
661 return true;
662
663
664fail_unlock:
665 if (!waiter)
666 osq_unlock(&lock->osq);
667
668fail:
669
670
671
672
673
674 if (need_resched()) {
675
676
677
678
679 __set_current_state(TASK_RUNNING);
680 schedule_preempt_disabled();
681 }
682
683 return false;
684}
685#else
686static __always_inline bool
687mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
688 const bool use_ww_ctx, struct mutex_waiter *waiter)
689{
690 return false;
691}
692#endif
693
694static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
695
696
697
698
699
700
701
702
703
704
705
706
707void __sched mutex_unlock(struct mutex *lock)
708{
709#ifndef CONFIG_DEBUG_LOCK_ALLOC
710 if (__mutex_unlock_fast(lock))
711 return;
712#endif
713 __mutex_unlock_slowpath(lock, _RET_IP_);
714}
715EXPORT_SYMBOL(mutex_unlock);
716
717
718
719
720
721
722
723
724
725
726
727
728void __sched ww_mutex_unlock(struct ww_mutex *lock)
729{
730
731
732
733
734 if (lock->ctx) {
735#ifdef CONFIG_DEBUG_MUTEXES
736 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
737#endif
738 if (lock->ctx->acquired > 0)
739 lock->ctx->acquired--;
740 lock->ctx = NULL;
741 }
742
743 mutex_unlock(&lock->base);
744}
745EXPORT_SYMBOL(ww_mutex_unlock);
746
747
748static __always_inline int __sched
749__ww_mutex_kill(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
750{
751 if (ww_ctx->acquired > 0) {
752#ifdef CONFIG_DEBUG_MUTEXES
753 struct ww_mutex *ww;
754
755 ww = container_of(lock, struct ww_mutex, base);
756 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
757 ww_ctx->contending_lock = ww;
758#endif
759 return -EDEADLK;
760 }
761
762 return 0;
763}
764
765
766
767
768
769
770
771
772
773
774
775
776
777static inline int __sched
778__ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter,
779 struct ww_acquire_ctx *ctx)
780{
781 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
782 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
783 struct mutex_waiter *cur;
784
785 if (ctx->acquired == 0)
786 return 0;
787
788 if (!ctx->is_wait_die) {
789 if (ctx->wounded)
790 return __ww_mutex_kill(lock, ctx);
791
792 return 0;
793 }
794
795 if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
796 return __ww_mutex_kill(lock, ctx);
797
798
799
800
801
802 cur = waiter;
803 list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
804 if (!cur->ww_ctx)
805 continue;
806
807 return __ww_mutex_kill(lock, ctx);
808 }
809
810 return 0;
811}
812
813
814
815
816
817
818
819
820
821
822
823
824static inline int __sched
825__ww_mutex_add_waiter(struct mutex_waiter *waiter,
826 struct mutex *lock,
827 struct ww_acquire_ctx *ww_ctx)
828{
829 struct mutex_waiter *cur;
830 struct list_head *pos;
831 bool is_wait_die;
832
833 if (!ww_ctx) {
834 __mutex_add_waiter(lock, waiter, &lock->wait_list);
835 return 0;
836 }
837
838 is_wait_die = ww_ctx->is_wait_die;
839
840
841
842
843
844
845
846
847 pos = &lock->wait_list;
848 list_for_each_entry_reverse(cur, &lock->wait_list, list) {
849 if (!cur->ww_ctx)
850 continue;
851
852 if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) {
853
854
855
856
857
858 if (is_wait_die) {
859 int ret = __ww_mutex_kill(lock, ww_ctx);
860
861 if (ret)
862 return ret;
863 }
864
865 break;
866 }
867
868 pos = &cur->list;
869
870
871 __ww_mutex_die(lock, cur, ww_ctx);
872 }
873
874 __mutex_add_waiter(lock, waiter, pos);
875
876
877
878
879
880 if (!is_wait_die) {
881 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
882
883
884
885
886
887
888 smp_mb();
889 __ww_mutex_wound(lock, ww_ctx, ww->ctx);
890 }
891
892 return 0;
893}
894
895
896
897
898static __always_inline int __sched
899__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
900 struct lockdep_map *nest_lock, unsigned long ip,
901 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
902{
903 struct mutex_waiter waiter;
904 bool first = false;
905 struct ww_mutex *ww;
906 int ret;
907
908 might_sleep();
909
910 ww = container_of(lock, struct ww_mutex, base);
911 if (use_ww_ctx && ww_ctx) {
912 if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
913 return -EALREADY;
914
915
916
917
918
919
920 if (ww_ctx->acquired == 0)
921 ww_ctx->wounded = 0;
922 }
923
924 preempt_disable();
925 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
926
927 if (__mutex_trylock(lock) ||
928 mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) {
929
930 lock_acquired(&lock->dep_map, ip);
931 if (use_ww_ctx && ww_ctx)
932 ww_mutex_set_context_fastpath(ww, ww_ctx);
933 preempt_enable();
934 return 0;
935 }
936
937 spin_lock(&lock->wait_lock);
938
939
940
941 if (__mutex_trylock(lock)) {
942 if (use_ww_ctx && ww_ctx)
943 __ww_mutex_check_waiters(lock, ww_ctx);
944
945 goto skip_wait;
946 }
947
948 debug_mutex_lock_common(lock, &waiter);
949
950 lock_contended(&lock->dep_map, ip);
951
952 if (!use_ww_ctx) {
953
954 __mutex_add_waiter(lock, &waiter, &lock->wait_list);
955
956
957#ifdef CONFIG_DEBUG_MUTEXES
958 waiter.ww_ctx = MUTEX_POISON_WW_CTX;
959#endif
960 } else {
961
962
963
964
965 ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
966 if (ret)
967 goto err_early_kill;
968
969 waiter.ww_ctx = ww_ctx;
970 }
971
972 waiter.task = current;
973
974 set_current_state(state);
975 for (;;) {
976
977
978
979
980
981
982 if (__mutex_trylock(lock))
983 goto acquired;
984
985
986
987
988
989
990 if (unlikely(signal_pending_state(state, current))) {
991 ret = -EINTR;
992 goto err;
993 }
994
995 if (use_ww_ctx && ww_ctx) {
996 ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
997 if (ret)
998 goto err;
999 }
1000
1001 spin_unlock(&lock->wait_lock);
1002 schedule_preempt_disabled();
1003
1004
1005
1006
1007
1008 if ((use_ww_ctx && ww_ctx) || !first) {
1009 first = __mutex_waiter_is_first(lock, &waiter);
1010 if (first)
1011 __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
1012 }
1013
1014 set_current_state(state);
1015
1016
1017
1018
1019
1020 if (__mutex_trylock(lock) ||
1021 (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter)))
1022 break;
1023
1024 spin_lock(&lock->wait_lock);
1025 }
1026 spin_lock(&lock->wait_lock);
1027acquired:
1028 __set_current_state(TASK_RUNNING);
1029
1030 if (use_ww_ctx && ww_ctx) {
1031
1032
1033
1034
1035 if (!ww_ctx->is_wait_die &&
1036 !__mutex_waiter_is_first(lock, &waiter))
1037 __ww_mutex_check_waiters(lock, ww_ctx);
1038 }
1039
1040 mutex_remove_waiter(lock, &waiter, current);
1041 if (likely(list_empty(&lock->wait_list)))
1042 __mutex_clear_flag(lock, MUTEX_FLAGS);
1043
1044 debug_mutex_free_waiter(&waiter);
1045
1046skip_wait:
1047
1048 lock_acquired(&lock->dep_map, ip);
1049
1050 if (use_ww_ctx && ww_ctx)
1051 ww_mutex_lock_acquired(ww, ww_ctx);
1052
1053 spin_unlock(&lock->wait_lock);
1054 preempt_enable();
1055 return 0;
1056
1057err:
1058 __set_current_state(TASK_RUNNING);
1059 mutex_remove_waiter(lock, &waiter, current);
1060err_early_kill:
1061 spin_unlock(&lock->wait_lock);
1062 debug_mutex_free_waiter(&waiter);
1063 mutex_release(&lock->dep_map, 1, ip);
1064 preempt_enable();
1065 return ret;
1066}
1067
1068static int __sched
1069__mutex_lock(struct mutex *lock, long state, unsigned int subclass,
1070 struct lockdep_map *nest_lock, unsigned long ip)
1071{
1072 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
1073}
1074
1075static int __sched
1076__ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass,
1077 struct lockdep_map *nest_lock, unsigned long ip,
1078 struct ww_acquire_ctx *ww_ctx)
1079{
1080 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true);
1081}
1082
1083#ifdef CONFIG_DEBUG_LOCK_ALLOC
1084void __sched
1085mutex_lock_nested(struct mutex *lock, unsigned int subclass)
1086{
1087 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
1088}
1089
1090EXPORT_SYMBOL_GPL(mutex_lock_nested);
1091
1092void __sched
1093_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
1094{
1095 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
1096}
1097EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
1098
1099int __sched
1100mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
1101{
1102 return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
1103}
1104EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
1105
1106int __sched
1107mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
1108{
1109 return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
1110}
1111EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
1112
1113void __sched
1114mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
1115{
1116 int token;
1117
1118 might_sleep();
1119
1120 token = io_schedule_prepare();
1121 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
1122 subclass, NULL, _RET_IP_, NULL, 0);
1123 io_schedule_finish(token);
1124}
1125EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
1126
1127static inline int
1128ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1129{
1130#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
1131 unsigned tmp;
1132
1133 if (ctx->deadlock_inject_countdown-- == 0) {
1134 tmp = ctx->deadlock_inject_interval;
1135 if (tmp > UINT_MAX/4)
1136 tmp = UINT_MAX;
1137 else
1138 tmp = tmp*2 + tmp + tmp/2;
1139
1140 ctx->deadlock_inject_interval = tmp;
1141 ctx->deadlock_inject_countdown = tmp;
1142 ctx->contending_lock = lock;
1143
1144 ww_mutex_unlock(lock);
1145
1146 return -EDEADLK;
1147 }
1148#endif
1149
1150 return 0;
1151}
1152
1153int __sched
1154ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1155{
1156 int ret;
1157
1158 might_sleep();
1159 ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
1160 0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
1161 ctx);
1162 if (!ret && ctx && ctx->acquired > 1)
1163 return ww_mutex_deadlock_injection(lock, ctx);
1164
1165 return ret;
1166}
1167EXPORT_SYMBOL_GPL(ww_mutex_lock);
1168
1169int __sched
1170ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1171{
1172 int ret;
1173
1174 might_sleep();
1175 ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
1176 0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
1177 ctx);
1178
1179 if (!ret && ctx && ctx->acquired > 1)
1180 return ww_mutex_deadlock_injection(lock, ctx);
1181
1182 return ret;
1183}
1184EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
1185
1186#endif
1187
1188
1189
1190
1191static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
1192{
1193 struct task_struct *next = NULL;
1194 DEFINE_WAKE_Q(wake_q);
1195 unsigned long owner;
1196
1197 mutex_release(&lock->dep_map, 1, ip);
1198
1199
1200
1201
1202
1203
1204
1205
1206 owner = atomic_long_read(&lock->owner);
1207 for (;;) {
1208 unsigned long old;
1209
1210#ifdef CONFIG_DEBUG_MUTEXES
1211 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
1212 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
1213#endif
1214
1215 if (owner & MUTEX_FLAG_HANDOFF)
1216 break;
1217
1218 old = atomic_long_cmpxchg_release(&lock->owner, owner,
1219 __owner_flags(owner));
1220 if (old == owner) {
1221 if (owner & MUTEX_FLAG_WAITERS)
1222 break;
1223
1224 return;
1225 }
1226
1227 owner = old;
1228 }
1229
1230 spin_lock(&lock->wait_lock);
1231 debug_mutex_unlock(lock);
1232 if (!list_empty(&lock->wait_list)) {
1233
1234 struct mutex_waiter *waiter =
1235 list_first_entry(&lock->wait_list,
1236 struct mutex_waiter, list);
1237
1238 next = waiter->task;
1239
1240 debug_mutex_wake_waiter(lock, waiter);
1241 wake_q_add(&wake_q, next);
1242 }
1243
1244 if (owner & MUTEX_FLAG_HANDOFF)
1245 __mutex_handoff(lock, next);
1246
1247 spin_unlock(&lock->wait_lock);
1248
1249 wake_up_q(&wake_q);
1250}
1251
1252#ifndef CONFIG_DEBUG_LOCK_ALLOC
1253
1254
1255
1256
1257static noinline int __sched
1258__mutex_lock_killable_slowpath(struct mutex *lock);
1259
1260static noinline int __sched
1261__mutex_lock_interruptible_slowpath(struct mutex *lock);
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275int __sched mutex_lock_interruptible(struct mutex *lock)
1276{
1277 might_sleep();
1278
1279 if (__mutex_trylock_fast(lock))
1280 return 0;
1281
1282 return __mutex_lock_interruptible_slowpath(lock);
1283}
1284
1285EXPORT_SYMBOL(mutex_lock_interruptible);
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299int __sched mutex_lock_killable(struct mutex *lock)
1300{
1301 might_sleep();
1302
1303 if (__mutex_trylock_fast(lock))
1304 return 0;
1305
1306 return __mutex_lock_killable_slowpath(lock);
1307}
1308EXPORT_SYMBOL(mutex_lock_killable);
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320void __sched mutex_lock_io(struct mutex *lock)
1321{
1322 int token;
1323
1324 token = io_schedule_prepare();
1325 mutex_lock(lock);
1326 io_schedule_finish(token);
1327}
1328EXPORT_SYMBOL_GPL(mutex_lock_io);
1329
1330static noinline void __sched
1331__mutex_lock_slowpath(struct mutex *lock)
1332{
1333 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
1334}
1335
1336static noinline int __sched
1337__mutex_lock_killable_slowpath(struct mutex *lock)
1338{
1339 return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
1340}
1341
1342static noinline int __sched
1343__mutex_lock_interruptible_slowpath(struct mutex *lock)
1344{
1345 return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
1346}
1347
1348static noinline int __sched
1349__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1350{
1351 return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL,
1352 _RET_IP_, ctx);
1353}
1354
1355static noinline int __sched
1356__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
1357 struct ww_acquire_ctx *ctx)
1358{
1359 return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL,
1360 _RET_IP_, ctx);
1361}
1362
1363#endif
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379int __sched mutex_trylock(struct mutex *lock)
1380{
1381 bool locked = __mutex_trylock(lock);
1382
1383 if (locked)
1384 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1385
1386 return locked;
1387}
1388EXPORT_SYMBOL(mutex_trylock);
1389
1390#ifndef CONFIG_DEBUG_LOCK_ALLOC
1391int __sched
1392ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1393{
1394 might_sleep();
1395
1396 if (__mutex_trylock_fast(&lock->base)) {
1397 if (ctx)
1398 ww_mutex_set_context_fastpath(lock, ctx);
1399 return 0;
1400 }
1401
1402 return __ww_mutex_lock_slowpath(lock, ctx);
1403}
1404EXPORT_SYMBOL(ww_mutex_lock);
1405
1406int __sched
1407ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1408{
1409 might_sleep();
1410
1411 if (__mutex_trylock_fast(&lock->base)) {
1412 if (ctx)
1413 ww_mutex_set_context_fastpath(lock, ctx);
1414 return 0;
1415 }
1416
1417 return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1418}
1419EXPORT_SYMBOL(ww_mutex_lock_interruptible);
1420
1421#endif
1422
1423
1424
1425
1426
1427
1428
1429
1430int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1431{
1432
1433 if (atomic_add_unless(cnt, -1, 1))
1434 return 0;
1435
1436 mutex_lock(lock);
1437 if (!atomic_dec_and_test(cnt)) {
1438
1439 mutex_unlock(lock);
1440 return 0;
1441 }
1442
1443 return 1;
1444}
1445EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
1446