1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/mutex.h>
21#include <linux/ww_mutex.h>
22#include <linux/sched/signal.h>
23#include <linux/sched/rt.h>
24#include <linux/sched/wake_q.h>
25#include <linux/sched/debug.h>
26#include <linux/export.h>
27#include <linux/spinlock.h>
28#include <linux/interrupt.h>
29#include <linux/debug_locks.h>
30#include <linux/osq_lock.h>
31
32#ifdef CONFIG_DEBUG_MUTEXES
33# include "mutex-debug.h"
34#else
35# include "mutex.h"
36#endif
37
38void
39__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
40{
41 atomic_long_set(&lock->owner, 0);
42 spin_lock_init(&lock->wait_lock);
43 INIT_LIST_HEAD(&lock->wait_list);
44#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
45 osq_lock_init(&lock->osq);
46#endif
47
48 debug_mutex_init(lock, name, key);
49}
50EXPORT_SYMBOL(__mutex_init);
51
52
53
54
55
56
57
58
59
60
61#define MUTEX_FLAG_WAITERS 0x01
62#define MUTEX_FLAG_HANDOFF 0x02
63#define MUTEX_FLAG_PICKUP 0x04
64
65#define MUTEX_FLAGS 0x07
66
67static inline struct task_struct *__owner_task(unsigned long owner)
68{
69 return (struct task_struct *)(owner & ~MUTEX_FLAGS);
70}
71
72static inline unsigned long __owner_flags(unsigned long owner)
73{
74 return owner & MUTEX_FLAGS;
75}
76
77
78
79
80static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
81{
82 unsigned long owner, curr = (unsigned long)current;
83
84 owner = atomic_long_read(&lock->owner);
85 for (;;) {
86 unsigned long old, flags = __owner_flags(owner);
87 unsigned long task = owner & ~MUTEX_FLAGS;
88
89 if (task) {
90 if (likely(task != curr))
91 break;
92
93 if (likely(!(flags & MUTEX_FLAG_PICKUP)))
94 break;
95
96 flags &= ~MUTEX_FLAG_PICKUP;
97 } else {
98#ifdef CONFIG_DEBUG_MUTEXES
99 DEBUG_LOCKS_WARN_ON(flags & MUTEX_FLAG_PICKUP);
100#endif
101 }
102
103
104
105
106
107
108 flags &= ~MUTEX_FLAG_HANDOFF;
109
110 old = atomic_long_cmpxchg_acquire(&lock->owner, owner, curr | flags);
111 if (old == owner)
112 return NULL;
113
114 owner = old;
115 }
116
117 return __owner_task(owner);
118}
119
120
121
122
123static inline bool __mutex_trylock(struct mutex *lock)
124{
125 return !__mutex_trylock_or_owner(lock);
126}
127
128#ifndef CONFIG_DEBUG_LOCK_ALLOC
129
130
131
132
133
134
135
136
137
138
139static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
140{
141 unsigned long curr = (unsigned long)current;
142
143 if (!atomic_long_cmpxchg_acquire(&lock->owner, 0UL, curr))
144 return true;
145
146 return false;
147}
148
149static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
150{
151 unsigned long curr = (unsigned long)current;
152
153 if (atomic_long_cmpxchg_release(&lock->owner, curr, 0UL) == curr)
154 return true;
155
156 return false;
157}
158#endif
159
160static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
161{
162 atomic_long_or(flag, &lock->owner);
163}
164
165static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
166{
167 atomic_long_andnot(flag, &lock->owner);
168}
169
170static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
171{
172 return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
173}
174
175
176
177
178
179
180
181static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
182{
183 unsigned long owner = atomic_long_read(&lock->owner);
184
185 for (;;) {
186 unsigned long old, new;
187
188#ifdef CONFIG_DEBUG_MUTEXES
189 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
190 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
191#endif
192
193 new = (owner & MUTEX_FLAG_WAITERS);
194 new |= (unsigned long)task;
195 if (task)
196 new |= MUTEX_FLAG_PICKUP;
197
198 old = atomic_long_cmpxchg_release(&lock->owner, owner, new);
199 if (old == owner)
200 break;
201
202 owner = old;
203 }
204}
205
206#ifndef CONFIG_DEBUG_LOCK_ALLOC
207
208
209
210
211
212
213static void __sched __mutex_lock_slowpath(struct mutex *lock);
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236void __sched mutex_lock(struct mutex *lock)
237{
238 might_sleep();
239
240 if (!__mutex_trylock_fast(lock))
241 __mutex_lock_slowpath(lock);
242}
243EXPORT_SYMBOL(mutex_lock);
244#endif
245
246static __always_inline void
247ww_mutex_lock_acquired(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx)
248{
249#ifdef CONFIG_DEBUG_MUTEXES
250
251
252
253
254
255
256 DEBUG_LOCKS_WARN_ON(ww->ctx);
257
258
259
260
261 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
262
263 if (ww_ctx->contending_lock) {
264
265
266
267
268 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
269
270
271
272
273
274 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
275 ww_ctx->contending_lock = NULL;
276 }
277
278
279
280
281 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
282#endif
283 ww_ctx->acquired++;
284}
285
286static inline bool __sched
287__ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
288{
289 return a->stamp - b->stamp <= LONG_MAX &&
290 (a->stamp != b->stamp || a > b);
291}
292
293
294
295
296
297
298
299
300
301
302static void __sched
303__ww_mutex_wakeup_for_backoff(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
304{
305 struct mutex_waiter *cur;
306
307 lockdep_assert_held(&lock->wait_lock);
308
309 list_for_each_entry(cur, &lock->wait_list, list) {
310 if (!cur->ww_ctx)
311 continue;
312
313 if (cur->ww_ctx->acquired > 0 &&
314 __ww_ctx_stamp_after(cur->ww_ctx, ww_ctx)) {
315 debug_mutex_wake_waiter(lock, cur);
316 wake_up_process(cur->task);
317 }
318
319 break;
320 }
321}
322
323
324
325
326
327static __always_inline void
328ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
329{
330 ww_mutex_lock_acquired(lock, ctx);
331
332 lock->ctx = ctx;
333
334
335
336
337
338
339
340
341 smp_mb();
342
343
344
345
346 if (likely(!(atomic_long_read(&lock->base.owner) & MUTEX_FLAG_WAITERS)))
347 return;
348
349
350
351
352
353 spin_lock(&lock->base.wait_lock);
354 __ww_mutex_wakeup_for_backoff(&lock->base, ctx);
355 spin_unlock(&lock->base.wait_lock);
356}
357
358
359
360
361
362
363
364
365
366static __always_inline void
367ww_mutex_set_context_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
368{
369 ww_mutex_lock_acquired(lock, ctx);
370 lock->ctx = ctx;
371}
372
373#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
374
375static inline
376bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
377 struct mutex_waiter *waiter)
378{
379 struct ww_mutex *ww;
380
381 ww = container_of(lock, struct ww_mutex, base);
382
383
384
385
386
387
388
389
390
391
392
393
394 if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
395 return false;
396
397
398
399
400
401
402
403
404 if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
405 return false;
406
407
408
409
410
411 if (waiter && !__mutex_waiter_is_first(lock, waiter))
412 return false;
413
414 return true;
415}
416
417
418
419
420
421
422
423static noinline
424bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
425 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
426{
427 bool ret = true;
428
429 rcu_read_lock();
430 while (__mutex_owner(lock) == owner) {
431
432
433
434
435
436
437 barrier();
438
439
440
441
442 if (!owner->on_cpu || need_resched() ||
443 vcpu_is_preempted(task_cpu(owner))) {
444 ret = false;
445 break;
446 }
447
448 if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
449 ret = false;
450 break;
451 }
452
453 cpu_relax();
454 }
455 rcu_read_unlock();
456
457 return ret;
458}
459
460
461
462
463static inline int mutex_can_spin_on_owner(struct mutex *lock)
464{
465 struct task_struct *owner;
466 int retval = 1;
467
468 if (need_resched())
469 return 0;
470
471 rcu_read_lock();
472 owner = __mutex_owner(lock);
473
474
475
476
477
478 if (owner)
479 retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
480 rcu_read_unlock();
481
482
483
484
485
486
487 return retval;
488}
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511static __always_inline bool
512mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
513 const bool use_ww_ctx, struct mutex_waiter *waiter)
514{
515 if (!waiter) {
516
517
518
519
520
521
522
523 if (!mutex_can_spin_on_owner(lock))
524 goto fail;
525
526
527
528
529
530
531 if (!osq_lock(&lock->osq))
532 goto fail;
533 }
534
535 for (;;) {
536 struct task_struct *owner;
537
538
539 owner = __mutex_trylock_or_owner(lock);
540 if (!owner)
541 break;
542
543
544
545
546
547 if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
548 goto fail_unlock;
549
550
551
552
553
554
555
556 cpu_relax();
557 }
558
559 if (!waiter)
560 osq_unlock(&lock->osq);
561
562 return true;
563
564
565fail_unlock:
566 if (!waiter)
567 osq_unlock(&lock->osq);
568
569fail:
570
571
572
573
574
575 if (need_resched()) {
576
577
578
579
580 __set_current_state(TASK_RUNNING);
581 schedule_preempt_disabled();
582 }
583
584 return false;
585}
586#else
587static __always_inline bool
588mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
589 const bool use_ww_ctx, struct mutex_waiter *waiter)
590{
591 return false;
592}
593#endif
594
595static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
596
597
598
599
600
601
602
603
604
605
606
607
608void __sched mutex_unlock(struct mutex *lock)
609{
610#ifndef CONFIG_DEBUG_LOCK_ALLOC
611 if (__mutex_unlock_fast(lock))
612 return;
613#endif
614 __mutex_unlock_slowpath(lock, _RET_IP_);
615}
616EXPORT_SYMBOL(mutex_unlock);
617
618
619
620
621
622
623
624
625
626
627
628
629void __sched ww_mutex_unlock(struct ww_mutex *lock)
630{
631
632
633
634
635 if (lock->ctx) {
636#ifdef CONFIG_DEBUG_MUTEXES
637 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
638#endif
639 if (lock->ctx->acquired > 0)
640 lock->ctx->acquired--;
641 lock->ctx = NULL;
642 }
643
644 mutex_unlock(&lock->base);
645}
646EXPORT_SYMBOL(ww_mutex_unlock);
647
648static inline int __sched
649__ww_mutex_lock_check_stamp(struct mutex *lock, struct mutex_waiter *waiter,
650 struct ww_acquire_ctx *ctx)
651{
652 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
653 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
654 struct mutex_waiter *cur;
655
656 if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
657 goto deadlock;
658
659
660
661
662
663 cur = waiter;
664 list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
665 if (cur->ww_ctx)
666 goto deadlock;
667 }
668
669 return 0;
670
671deadlock:
672#ifdef CONFIG_DEBUG_MUTEXES
673 DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
674 ctx->contending_lock = ww;
675#endif
676 return -EDEADLK;
677}
678
679static inline int __sched
680__ww_mutex_add_waiter(struct mutex_waiter *waiter,
681 struct mutex *lock,
682 struct ww_acquire_ctx *ww_ctx)
683{
684 struct mutex_waiter *cur;
685 struct list_head *pos;
686
687 if (!ww_ctx) {
688 list_add_tail(&waiter->list, &lock->wait_list);
689 return 0;
690 }
691
692
693
694
695
696
697 pos = &lock->wait_list;
698 list_for_each_entry_reverse(cur, &lock->wait_list, list) {
699 if (!cur->ww_ctx)
700 continue;
701
702 if (__ww_ctx_stamp_after(ww_ctx, cur->ww_ctx)) {
703
704 if (ww_ctx->acquired > 0) {
705#ifdef CONFIG_DEBUG_MUTEXES
706 struct ww_mutex *ww;
707
708 ww = container_of(lock, struct ww_mutex, base);
709 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock);
710 ww_ctx->contending_lock = ww;
711#endif
712 return -EDEADLK;
713 }
714
715 break;
716 }
717
718 pos = &cur->list;
719
720
721
722
723
724 if (cur->ww_ctx->acquired > 0) {
725 debug_mutex_wake_waiter(lock, cur);
726 wake_up_process(cur->task);
727 }
728 }
729
730 list_add_tail(&waiter->list, pos);
731 return 0;
732}
733
734
735
736
737static __always_inline int __sched
738__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
739 struct lockdep_map *nest_lock, unsigned long ip,
740 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
741{
742 struct mutex_waiter waiter;
743 bool first = false;
744 struct ww_mutex *ww;
745 int ret;
746
747 might_sleep();
748
749 ww = container_of(lock, struct ww_mutex, base);
750 if (use_ww_ctx && ww_ctx) {
751 if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
752 return -EALREADY;
753 }
754
755 preempt_disable();
756 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
757
758 if (__mutex_trylock(lock) ||
759 mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) {
760
761 lock_acquired(&lock->dep_map, ip);
762 if (use_ww_ctx && ww_ctx)
763 ww_mutex_set_context_fastpath(ww, ww_ctx);
764 preempt_enable();
765 return 0;
766 }
767
768 spin_lock(&lock->wait_lock);
769
770
771
772 if (__mutex_trylock(lock)) {
773 if (use_ww_ctx && ww_ctx)
774 __ww_mutex_wakeup_for_backoff(lock, ww_ctx);
775
776 goto skip_wait;
777 }
778
779 debug_mutex_lock_common(lock, &waiter);
780 debug_mutex_add_waiter(lock, &waiter, current);
781
782 lock_contended(&lock->dep_map, ip);
783
784 if (!use_ww_ctx) {
785
786 list_add_tail(&waiter.list, &lock->wait_list);
787
788#ifdef CONFIG_DEBUG_MUTEXES
789 waiter.ww_ctx = MUTEX_POISON_WW_CTX;
790#endif
791 } else {
792
793 ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
794 if (ret)
795 goto err_early_backoff;
796
797 waiter.ww_ctx = ww_ctx;
798 }
799
800 waiter.task = current;
801
802 if (__mutex_waiter_is_first(lock, &waiter))
803 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
804
805 set_current_state(state);
806 for (;;) {
807
808
809
810
811
812
813 if (__mutex_trylock(lock))
814 goto acquired;
815
816
817
818
819
820
821 if (unlikely(signal_pending_state(state, current))) {
822 ret = -EINTR;
823 goto err;
824 }
825
826 if (use_ww_ctx && ww_ctx && ww_ctx->acquired > 0) {
827 ret = __ww_mutex_lock_check_stamp(lock, &waiter, ww_ctx);
828 if (ret)
829 goto err;
830 }
831
832 spin_unlock(&lock->wait_lock);
833 schedule_preempt_disabled();
834
835
836
837
838
839 if ((use_ww_ctx && ww_ctx) || !first) {
840 first = __mutex_waiter_is_first(lock, &waiter);
841 if (first)
842 __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
843 }
844
845 set_current_state(state);
846
847
848
849
850
851 if (__mutex_trylock(lock) ||
852 (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter)))
853 break;
854
855 spin_lock(&lock->wait_lock);
856 }
857 spin_lock(&lock->wait_lock);
858acquired:
859 __set_current_state(TASK_RUNNING);
860
861 mutex_remove_waiter(lock, &waiter, current);
862 if (likely(list_empty(&lock->wait_list)))
863 __mutex_clear_flag(lock, MUTEX_FLAGS);
864
865 debug_mutex_free_waiter(&waiter);
866
867skip_wait:
868
869 lock_acquired(&lock->dep_map, ip);
870
871 if (use_ww_ctx && ww_ctx)
872 ww_mutex_set_context_slowpath(ww, ww_ctx);
873
874 spin_unlock(&lock->wait_lock);
875 preempt_enable();
876 return 0;
877
878err:
879 __set_current_state(TASK_RUNNING);
880 mutex_remove_waiter(lock, &waiter, current);
881err_early_backoff:
882 spin_unlock(&lock->wait_lock);
883 debug_mutex_free_waiter(&waiter);
884 mutex_release(&lock->dep_map, 1, ip);
885 preempt_enable();
886 return ret;
887}
888
889static int __sched
890__mutex_lock(struct mutex *lock, long state, unsigned int subclass,
891 struct lockdep_map *nest_lock, unsigned long ip)
892{
893 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
894}
895
896static int __sched
897__ww_mutex_lock(struct mutex *lock, long state, unsigned int subclass,
898 struct lockdep_map *nest_lock, unsigned long ip,
899 struct ww_acquire_ctx *ww_ctx)
900{
901 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, ww_ctx, true);
902}
903
904#ifdef CONFIG_DEBUG_LOCK_ALLOC
905void __sched
906mutex_lock_nested(struct mutex *lock, unsigned int subclass)
907{
908 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
909}
910
911EXPORT_SYMBOL_GPL(mutex_lock_nested);
912
913void __sched
914_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
915{
916 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
917}
918EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
919
920int __sched
921mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
922{
923 return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
924}
925EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
926
927int __sched
928mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
929{
930 return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
931}
932EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
933
934void __sched
935mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
936{
937 int token;
938
939 might_sleep();
940
941 token = io_schedule_prepare();
942 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
943 subclass, NULL, _RET_IP_, NULL, 0);
944 io_schedule_finish(token);
945}
946EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
947
948static inline int
949ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
950{
951#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
952 unsigned tmp;
953
954 if (ctx->deadlock_inject_countdown-- == 0) {
955 tmp = ctx->deadlock_inject_interval;
956 if (tmp > UINT_MAX/4)
957 tmp = UINT_MAX;
958 else
959 tmp = tmp*2 + tmp + tmp/2;
960
961 ctx->deadlock_inject_interval = tmp;
962 ctx->deadlock_inject_countdown = tmp;
963 ctx->contending_lock = lock;
964
965 ww_mutex_unlock(lock);
966
967 return -EDEADLK;
968 }
969#endif
970
971 return 0;
972}
973
974int __sched
975ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
976{
977 int ret;
978
979 might_sleep();
980 ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
981 0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
982 ctx);
983 if (!ret && ctx && ctx->acquired > 1)
984 return ww_mutex_deadlock_injection(lock, ctx);
985
986 return ret;
987}
988EXPORT_SYMBOL_GPL(ww_mutex_lock);
989
990int __sched
991ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
992{
993 int ret;
994
995 might_sleep();
996 ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
997 0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
998 ctx);
999
1000 if (!ret && ctx && ctx->acquired > 1)
1001 return ww_mutex_deadlock_injection(lock, ctx);
1002
1003 return ret;
1004}
1005EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
1006
1007#endif
1008
1009
1010
1011
1012static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
1013{
1014 struct task_struct *next = NULL;
1015 DEFINE_WAKE_Q(wake_q);
1016 unsigned long owner;
1017
1018 mutex_release(&lock->dep_map, 1, ip);
1019
1020
1021
1022
1023
1024
1025
1026
1027 owner = atomic_long_read(&lock->owner);
1028 for (;;) {
1029 unsigned long old;
1030
1031#ifdef CONFIG_DEBUG_MUTEXES
1032 DEBUG_LOCKS_WARN_ON(__owner_task(owner) != current);
1033 DEBUG_LOCKS_WARN_ON(owner & MUTEX_FLAG_PICKUP);
1034#endif
1035
1036 if (owner & MUTEX_FLAG_HANDOFF)
1037 break;
1038
1039 old = atomic_long_cmpxchg_release(&lock->owner, owner,
1040 __owner_flags(owner));
1041 if (old == owner) {
1042 if (owner & MUTEX_FLAG_WAITERS)
1043 break;
1044
1045 return;
1046 }
1047
1048 owner = old;
1049 }
1050
1051 spin_lock(&lock->wait_lock);
1052 debug_mutex_unlock(lock);
1053 if (!list_empty(&lock->wait_list)) {
1054
1055 struct mutex_waiter *waiter =
1056 list_first_entry(&lock->wait_list,
1057 struct mutex_waiter, list);
1058
1059 next = waiter->task;
1060
1061 debug_mutex_wake_waiter(lock, waiter);
1062 wake_q_add(&wake_q, next);
1063 }
1064
1065 if (owner & MUTEX_FLAG_HANDOFF)
1066 __mutex_handoff(lock, next);
1067
1068 spin_unlock(&lock->wait_lock);
1069
1070 wake_up_q(&wake_q);
1071}
1072
1073#ifndef CONFIG_DEBUG_LOCK_ALLOC
1074
1075
1076
1077
1078static noinline int __sched
1079__mutex_lock_killable_slowpath(struct mutex *lock);
1080
1081static noinline int __sched
1082__mutex_lock_interruptible_slowpath(struct mutex *lock);
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095int __sched mutex_lock_interruptible(struct mutex *lock)
1096{
1097 might_sleep();
1098
1099 if (__mutex_trylock_fast(lock))
1100 return 0;
1101
1102 return __mutex_lock_interruptible_slowpath(lock);
1103}
1104
1105EXPORT_SYMBOL(mutex_lock_interruptible);
1106
1107int __sched mutex_lock_killable(struct mutex *lock)
1108{
1109 might_sleep();
1110
1111 if (__mutex_trylock_fast(lock))
1112 return 0;
1113
1114 return __mutex_lock_killable_slowpath(lock);
1115}
1116EXPORT_SYMBOL(mutex_lock_killable);
1117
1118void __sched mutex_lock_io(struct mutex *lock)
1119{
1120 int token;
1121
1122 token = io_schedule_prepare();
1123 mutex_lock(lock);
1124 io_schedule_finish(token);
1125}
1126EXPORT_SYMBOL_GPL(mutex_lock_io);
1127
1128static noinline void __sched
1129__mutex_lock_slowpath(struct mutex *lock)
1130{
1131 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
1132}
1133
1134static noinline int __sched
1135__mutex_lock_killable_slowpath(struct mutex *lock)
1136{
1137 return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
1138}
1139
1140static noinline int __sched
1141__mutex_lock_interruptible_slowpath(struct mutex *lock)
1142{
1143 return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
1144}
1145
1146static noinline int __sched
1147__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1148{
1149 return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, NULL,
1150 _RET_IP_, ctx);
1151}
1152
1153static noinline int __sched
1154__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
1155 struct ww_acquire_ctx *ctx)
1156{
1157 return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, NULL,
1158 _RET_IP_, ctx);
1159}
1160
1161#endif
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177int __sched mutex_trylock(struct mutex *lock)
1178{
1179 bool locked = __mutex_trylock(lock);
1180
1181 if (locked)
1182 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1183
1184 return locked;
1185}
1186EXPORT_SYMBOL(mutex_trylock);
1187
1188#ifndef CONFIG_DEBUG_LOCK_ALLOC
1189int __sched
1190ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1191{
1192 might_sleep();
1193
1194 if (__mutex_trylock_fast(&lock->base)) {
1195 if (ctx)
1196 ww_mutex_set_context_fastpath(lock, ctx);
1197 return 0;
1198 }
1199
1200 return __ww_mutex_lock_slowpath(lock, ctx);
1201}
1202EXPORT_SYMBOL(ww_mutex_lock);
1203
1204int __sched
1205ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1206{
1207 might_sleep();
1208
1209 if (__mutex_trylock_fast(&lock->base)) {
1210 if (ctx)
1211 ww_mutex_set_context_fastpath(lock, ctx);
1212 return 0;
1213 }
1214
1215 return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1216}
1217EXPORT_SYMBOL(ww_mutex_lock_interruptible);
1218
1219#endif
1220
1221
1222
1223
1224
1225
1226
1227
1228int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1229{
1230
1231 if (atomic_add_unless(cnt, -1, 1))
1232 return 0;
1233
1234 mutex_lock(lock);
1235 if (!atomic_dec_and_test(cnt)) {
1236
1237 mutex_unlock(lock);
1238 return 0;
1239 }
1240
1241 return 1;
1242}
1243EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
1244