1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/mutex.h>
22#include <linux/ww_mutex.h>
23#include <linux/sched/signal.h>
24#include <linux/sched/rt.h>
25#include <linux/sched/wake_q.h>
26#include <linux/sched/debug.h>
27#include <linux/export.h>
28#include <linux/spinlock.h>
29#include <linux/interrupt.h>
30#include <linux/debug_locks.h>
31#include <linux/osq_lock.h>
32
33#ifndef CONFIG_PREEMPT_RT
34#include "mutex.h"
35
36#ifdef CONFIG_DEBUG_MUTEXES
37# define MUTEX_WARN_ON(cond) DEBUG_LOCKS_WARN_ON(cond)
38#else
39# define MUTEX_WARN_ON(cond)
40#endif
41
42void
43__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
44{
45 atomic_long_set(&lock->owner, 0);
46 raw_spin_lock_init(&lock->wait_lock);
47 INIT_LIST_HEAD(&lock->wait_list);
48#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
49 osq_lock_init(&lock->osq);
50#endif
51
52 debug_mutex_init(lock, name, key);
53}
54EXPORT_SYMBOL(__mutex_init);
55
56
57
58
59
60
61
62
63
64
65#define MUTEX_FLAG_WAITERS 0x01
66#define MUTEX_FLAG_HANDOFF 0x02
67#define MUTEX_FLAG_PICKUP 0x04
68
69#define MUTEX_FLAGS 0x07
70
71
72
73
74
75
76static inline struct task_struct *__mutex_owner(struct mutex *lock)
77{
78 return (struct task_struct *)(atomic_long_read(&lock->owner) & ~MUTEX_FLAGS);
79}
80
81static inline struct task_struct *__owner_task(unsigned long owner)
82{
83 return (struct task_struct *)(owner & ~MUTEX_FLAGS);
84}
85
86bool mutex_is_locked(struct mutex *lock)
87{
88 return __mutex_owner(lock) != NULL;
89}
90EXPORT_SYMBOL(mutex_is_locked);
91
92static inline unsigned long __owner_flags(unsigned long owner)
93{
94 return owner & MUTEX_FLAGS;
95}
96
97static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff)
98{
99 unsigned long owner, curr = (unsigned long)current;
100
101 owner = atomic_long_read(&lock->owner);
102 for (;;) {
103 unsigned long flags = __owner_flags(owner);
104 unsigned long task = owner & ~MUTEX_FLAGS;
105
106 if (task) {
107 if (flags & MUTEX_FLAG_PICKUP) {
108 if (task != curr)
109 break;
110 flags &= ~MUTEX_FLAG_PICKUP;
111 } else if (handoff) {
112 if (flags & MUTEX_FLAG_HANDOFF)
113 break;
114 flags |= MUTEX_FLAG_HANDOFF;
115 } else {
116 break;
117 }
118 } else {
119 MUTEX_WARN_ON(flags & (MUTEX_FLAG_HANDOFF | MUTEX_FLAG_PICKUP));
120 task = curr;
121 }
122
123 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &owner, task | flags)) {
124 if (task == curr)
125 return NULL;
126 break;
127 }
128 }
129
130 return __owner_task(owner);
131}
132
133
134
135
136static inline bool __mutex_trylock_or_handoff(struct mutex *lock, bool handoff)
137{
138 return !__mutex_trylock_common(lock, handoff);
139}
140
141
142
143
144static inline bool __mutex_trylock(struct mutex *lock)
145{
146 return !__mutex_trylock_common(lock, false);
147}
148
149#ifndef CONFIG_DEBUG_LOCK_ALLOC
150
151
152
153
154
155
156
157
158
159
160static __always_inline bool __mutex_trylock_fast(struct mutex *lock)
161{
162 unsigned long curr = (unsigned long)current;
163 unsigned long zero = 0UL;
164
165 if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr))
166 return true;
167
168 return false;
169}
170
171static __always_inline bool __mutex_unlock_fast(struct mutex *lock)
172{
173 unsigned long curr = (unsigned long)current;
174
175 return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL);
176}
177#endif
178
179static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag)
180{
181 atomic_long_or(flag, &lock->owner);
182}
183
184static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag)
185{
186 atomic_long_andnot(flag, &lock->owner);
187}
188
189static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter)
190{
191 return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter;
192}
193
194
195
196
197
198static void
199__mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
200 struct list_head *list)
201{
202 debug_mutex_add_waiter(lock, waiter, current);
203
204 list_add_tail(&waiter->list, list);
205 if (__mutex_waiter_is_first(lock, waiter))
206 __mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
207}
208
209static void
210__mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
211{
212 list_del(&waiter->list);
213 if (likely(list_empty(&lock->wait_list)))
214 __mutex_clear_flag(lock, MUTEX_FLAGS);
215
216 debug_mutex_remove_waiter(lock, waiter, current);
217}
218
219
220
221
222
223
224
225static void __mutex_handoff(struct mutex *lock, struct task_struct *task)
226{
227 unsigned long owner = atomic_long_read(&lock->owner);
228
229 for (;;) {
230 unsigned long new;
231
232 MUTEX_WARN_ON(__owner_task(owner) != current);
233 MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
234
235 new = (owner & MUTEX_FLAG_WAITERS);
236 new |= (unsigned long)task;
237 if (task)
238 new |= MUTEX_FLAG_PICKUP;
239
240 if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, new))
241 break;
242 }
243}
244
245#ifndef CONFIG_DEBUG_LOCK_ALLOC
246
247
248
249
250
251
252static void __sched __mutex_lock_slowpath(struct mutex *lock);
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275void __sched mutex_lock(struct mutex *lock)
276{
277 might_sleep();
278
279 if (!__mutex_trylock_fast(lock))
280 __mutex_lock_slowpath(lock);
281}
282EXPORT_SYMBOL(mutex_lock);
283#endif
284
285#include "ww_mutex.h"
286
287#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
288
289
290
291
292static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
293{
294 return __mutex_trylock_common(lock, false);
295}
296
297static inline
298bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
299 struct mutex_waiter *waiter)
300{
301 struct ww_mutex *ww;
302
303 ww = container_of(lock, struct ww_mutex, base);
304
305
306
307
308
309
310
311
312
313
314
315
316 if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx))
317 return false;
318
319
320
321
322
323
324
325
326 if (!waiter && (atomic_long_read(&lock->owner) & MUTEX_FLAG_WAITERS))
327 return false;
328
329
330
331
332
333 if (waiter && !__mutex_waiter_is_first(lock, waiter))
334 return false;
335
336 return true;
337}
338
339
340
341
342
343
344
345static noinline
346bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
347 struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter)
348{
349 bool ret = true;
350
351 rcu_read_lock();
352 while (__mutex_owner(lock) == owner) {
353
354
355
356
357
358
359 barrier();
360
361
362
363
364 if (!owner->on_cpu || need_resched() ||
365 vcpu_is_preempted(task_cpu(owner))) {
366 ret = false;
367 break;
368 }
369
370 if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) {
371 ret = false;
372 break;
373 }
374
375 cpu_relax();
376 }
377 rcu_read_unlock();
378
379 return ret;
380}
381
382
383
384
385static inline int mutex_can_spin_on_owner(struct mutex *lock)
386{
387 struct task_struct *owner;
388 int retval = 1;
389
390 if (need_resched())
391 return 0;
392
393 rcu_read_lock();
394 owner = __mutex_owner(lock);
395
396
397
398
399
400 if (owner)
401 retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
402 rcu_read_unlock();
403
404
405
406
407
408
409 return retval;
410}
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433static __always_inline bool
434mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
435 struct mutex_waiter *waiter)
436{
437 if (!waiter) {
438
439
440
441
442
443
444
445 if (!mutex_can_spin_on_owner(lock))
446 goto fail;
447
448
449
450
451
452
453 if (!osq_lock(&lock->osq))
454 goto fail;
455 }
456
457 for (;;) {
458 struct task_struct *owner;
459
460
461 owner = __mutex_trylock_or_owner(lock);
462 if (!owner)
463 break;
464
465
466
467
468
469 if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter))
470 goto fail_unlock;
471
472
473
474
475
476
477
478 cpu_relax();
479 }
480
481 if (!waiter)
482 osq_unlock(&lock->osq);
483
484 return true;
485
486
487fail_unlock:
488 if (!waiter)
489 osq_unlock(&lock->osq);
490
491fail:
492
493
494
495
496
497 if (need_resched()) {
498
499
500
501
502 __set_current_state(TASK_RUNNING);
503 schedule_preempt_disabled();
504 }
505
506 return false;
507}
508#else
509static __always_inline bool
510mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
511 struct mutex_waiter *waiter)
512{
513 return false;
514}
515#endif
516
517static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip);
518
519
520
521
522
523
524
525
526
527
528
529
530void __sched mutex_unlock(struct mutex *lock)
531{
532#ifndef CONFIG_DEBUG_LOCK_ALLOC
533 if (__mutex_unlock_fast(lock))
534 return;
535#endif
536 __mutex_unlock_slowpath(lock, _RET_IP_);
537}
538EXPORT_SYMBOL(mutex_unlock);
539
540
541
542
543
544
545
546
547
548
549
550
551void __sched ww_mutex_unlock(struct ww_mutex *lock)
552{
553 __ww_mutex_unlock(lock);
554 mutex_unlock(&lock->base);
555}
556EXPORT_SYMBOL(ww_mutex_unlock);
557
558
559
560
561static __always_inline int __sched
562__mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass,
563 struct lockdep_map *nest_lock, unsigned long ip,
564 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
565{
566 struct mutex_waiter waiter;
567 struct ww_mutex *ww;
568 int ret;
569
570 if (!use_ww_ctx)
571 ww_ctx = NULL;
572
573 might_sleep();
574
575 MUTEX_WARN_ON(lock->magic != lock);
576
577 ww = container_of(lock, struct ww_mutex, base);
578 if (ww_ctx) {
579 if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
580 return -EALREADY;
581
582
583
584
585
586
587 if (ww_ctx->acquired == 0)
588 ww_ctx->wounded = 0;
589
590#ifdef CONFIG_DEBUG_LOCK_ALLOC
591 nest_lock = &ww_ctx->dep_map;
592#endif
593 }
594
595 preempt_disable();
596 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
597
598 if (__mutex_trylock(lock) ||
599 mutex_optimistic_spin(lock, ww_ctx, NULL)) {
600
601 lock_acquired(&lock->dep_map, ip);
602 if (ww_ctx)
603 ww_mutex_set_context_fastpath(ww, ww_ctx);
604 preempt_enable();
605 return 0;
606 }
607
608 raw_spin_lock(&lock->wait_lock);
609
610
611
612 if (__mutex_trylock(lock)) {
613 if (ww_ctx)
614 __ww_mutex_check_waiters(lock, ww_ctx);
615
616 goto skip_wait;
617 }
618
619 debug_mutex_lock_common(lock, &waiter);
620 waiter.task = current;
621 if (use_ww_ctx)
622 waiter.ww_ctx = ww_ctx;
623
624 lock_contended(&lock->dep_map, ip);
625
626 if (!use_ww_ctx) {
627
628 __mutex_add_waiter(lock, &waiter, &lock->wait_list);
629 } else {
630
631
632
633
634 ret = __ww_mutex_add_waiter(&waiter, lock, ww_ctx);
635 if (ret)
636 goto err_early_kill;
637 }
638
639 set_current_state(state);
640 for (;;) {
641 bool first;
642
643
644
645
646
647
648
649 if (__mutex_trylock(lock))
650 goto acquired;
651
652
653
654
655
656
657 if (signal_pending_state(state, current)) {
658 ret = -EINTR;
659 goto err;
660 }
661
662 if (ww_ctx) {
663 ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
664 if (ret)
665 goto err;
666 }
667
668 raw_spin_unlock(&lock->wait_lock);
669 schedule_preempt_disabled();
670
671 first = __mutex_waiter_is_first(lock, &waiter);
672
673 set_current_state(state);
674
675
676
677
678
679 if (__mutex_trylock_or_handoff(lock, first) ||
680 (first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
681 break;
682
683 raw_spin_lock(&lock->wait_lock);
684 }
685 raw_spin_lock(&lock->wait_lock);
686acquired:
687 __set_current_state(TASK_RUNNING);
688
689 if (ww_ctx) {
690
691
692
693
694 if (!ww_ctx->is_wait_die &&
695 !__mutex_waiter_is_first(lock, &waiter))
696 __ww_mutex_check_waiters(lock, ww_ctx);
697 }
698
699 __mutex_remove_waiter(lock, &waiter);
700
701 debug_mutex_free_waiter(&waiter);
702
703skip_wait:
704
705 lock_acquired(&lock->dep_map, ip);
706
707 if (ww_ctx)
708 ww_mutex_lock_acquired(ww, ww_ctx);
709
710 raw_spin_unlock(&lock->wait_lock);
711 preempt_enable();
712 return 0;
713
714err:
715 __set_current_state(TASK_RUNNING);
716 __mutex_remove_waiter(lock, &waiter);
717err_early_kill:
718 raw_spin_unlock(&lock->wait_lock);
719 debug_mutex_free_waiter(&waiter);
720 mutex_release(&lock->dep_map, ip);
721 preempt_enable();
722 return ret;
723}
724
725static int __sched
726__mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
727 struct lockdep_map *nest_lock, unsigned long ip)
728{
729 return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, false);
730}
731
732static int __sched
733__ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass,
734 unsigned long ip, struct ww_acquire_ctx *ww_ctx)
735{
736 return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx, true);
737}
738
739#ifdef CONFIG_DEBUG_LOCK_ALLOC
740void __sched
741mutex_lock_nested(struct mutex *lock, unsigned int subclass)
742{
743 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
744}
745
746EXPORT_SYMBOL_GPL(mutex_lock_nested);
747
748void __sched
749_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
750{
751 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
752}
753EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
754
755int __sched
756mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
757{
758 return __mutex_lock(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
759}
760EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
761
762int __sched
763mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
764{
765 return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
766}
767EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
768
769void __sched
770mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
771{
772 int token;
773
774 might_sleep();
775
776 token = io_schedule_prepare();
777 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
778 subclass, NULL, _RET_IP_, NULL, 0);
779 io_schedule_finish(token);
780}
781EXPORT_SYMBOL_GPL(mutex_lock_io_nested);
782
783static inline int
784ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
785{
786#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
787 unsigned tmp;
788
789 if (ctx->deadlock_inject_countdown-- == 0) {
790 tmp = ctx->deadlock_inject_interval;
791 if (tmp > UINT_MAX/4)
792 tmp = UINT_MAX;
793 else
794 tmp = tmp*2 + tmp + tmp/2;
795
796 ctx->deadlock_inject_interval = tmp;
797 ctx->deadlock_inject_countdown = tmp;
798 ctx->contending_lock = lock;
799
800 ww_mutex_unlock(lock);
801
802 return -EDEADLK;
803 }
804#endif
805
806 return 0;
807}
808
809int __sched
810ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
811{
812 int ret;
813
814 might_sleep();
815 ret = __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE,
816 0, _RET_IP_, ctx);
817 if (!ret && ctx && ctx->acquired > 1)
818 return ww_mutex_deadlock_injection(lock, ctx);
819
820 return ret;
821}
822EXPORT_SYMBOL_GPL(ww_mutex_lock);
823
824int __sched
825ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
826{
827 int ret;
828
829 might_sleep();
830 ret = __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE,
831 0, _RET_IP_, ctx);
832
833 if (!ret && ctx && ctx->acquired > 1)
834 return ww_mutex_deadlock_injection(lock, ctx);
835
836 return ret;
837}
838EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
839
840#endif
841
842
843
844
845static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
846{
847 struct task_struct *next = NULL;
848 DEFINE_WAKE_Q(wake_q);
849 unsigned long owner;
850
851 mutex_release(&lock->dep_map, ip);
852
853
854
855
856
857
858
859
860 owner = atomic_long_read(&lock->owner);
861 for (;;) {
862 MUTEX_WARN_ON(__owner_task(owner) != current);
863 MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP);
864
865 if (owner & MUTEX_FLAG_HANDOFF)
866 break;
867
868 if (atomic_long_try_cmpxchg_release(&lock->owner, &owner, __owner_flags(owner))) {
869 if (owner & MUTEX_FLAG_WAITERS)
870 break;
871
872 return;
873 }
874 }
875
876 raw_spin_lock(&lock->wait_lock);
877 debug_mutex_unlock(lock);
878 if (!list_empty(&lock->wait_list)) {
879
880 struct mutex_waiter *waiter =
881 list_first_entry(&lock->wait_list,
882 struct mutex_waiter, list);
883
884 next = waiter->task;
885
886 debug_mutex_wake_waiter(lock, waiter);
887 wake_q_add(&wake_q, next);
888 }
889
890 if (owner & MUTEX_FLAG_HANDOFF)
891 __mutex_handoff(lock, next);
892
893 raw_spin_unlock(&lock->wait_lock);
894
895 wake_up_q(&wake_q);
896}
897
898#ifndef CONFIG_DEBUG_LOCK_ALLOC
899
900
901
902
903static noinline int __sched
904__mutex_lock_killable_slowpath(struct mutex *lock);
905
906static noinline int __sched
907__mutex_lock_interruptible_slowpath(struct mutex *lock);
908
909
910
911
912
913
914
915
916
917
918
919
920
921int __sched mutex_lock_interruptible(struct mutex *lock)
922{
923 might_sleep();
924
925 if (__mutex_trylock_fast(lock))
926 return 0;
927
928 return __mutex_lock_interruptible_slowpath(lock);
929}
930
931EXPORT_SYMBOL(mutex_lock_interruptible);
932
933
934
935
936
937
938
939
940
941
942
943
944
945int __sched mutex_lock_killable(struct mutex *lock)
946{
947 might_sleep();
948
949 if (__mutex_trylock_fast(lock))
950 return 0;
951
952 return __mutex_lock_killable_slowpath(lock);
953}
954EXPORT_SYMBOL(mutex_lock_killable);
955
956
957
958
959
960
961
962
963
964
965
966void __sched mutex_lock_io(struct mutex *lock)
967{
968 int token;
969
970 token = io_schedule_prepare();
971 mutex_lock(lock);
972 io_schedule_finish(token);
973}
974EXPORT_SYMBOL_GPL(mutex_lock_io);
975
976static noinline void __sched
977__mutex_lock_slowpath(struct mutex *lock)
978{
979 __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
980}
981
982static noinline int __sched
983__mutex_lock_killable_slowpath(struct mutex *lock)
984{
985 return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
986}
987
988static noinline int __sched
989__mutex_lock_interruptible_slowpath(struct mutex *lock)
990{
991 return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
992}
993
994static noinline int __sched
995__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
996{
997 return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0,
998 _RET_IP_, ctx);
999}
1000
1001static noinline int __sched
1002__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
1003 struct ww_acquire_ctx *ctx)
1004{
1005 return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0,
1006 _RET_IP_, ctx);
1007}
1008
1009#endif
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025int __sched mutex_trylock(struct mutex *lock)
1026{
1027 bool locked;
1028
1029 MUTEX_WARN_ON(lock->magic != lock);
1030
1031 locked = __mutex_trylock(lock);
1032 if (locked)
1033 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1034
1035 return locked;
1036}
1037EXPORT_SYMBOL(mutex_trylock);
1038
1039#ifndef CONFIG_DEBUG_LOCK_ALLOC
1040int __sched
1041ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1042{
1043 might_sleep();
1044
1045 if (__mutex_trylock_fast(&lock->base)) {
1046 if (ctx)
1047 ww_mutex_set_context_fastpath(lock, ctx);
1048 return 0;
1049 }
1050
1051 return __ww_mutex_lock_slowpath(lock, ctx);
1052}
1053EXPORT_SYMBOL(ww_mutex_lock);
1054
1055int __sched
1056ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
1057{
1058 might_sleep();
1059
1060 if (__mutex_trylock_fast(&lock->base)) {
1061 if (ctx)
1062 ww_mutex_set_context_fastpath(lock, ctx);
1063 return 0;
1064 }
1065
1066 return __ww_mutex_lock_interruptible_slowpath(lock, ctx);
1067}
1068EXPORT_SYMBOL(ww_mutex_lock_interruptible);
1069
1070#endif
1071#endif
1072
1073
1074
1075
1076
1077
1078
1079
1080int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
1081{
1082
1083 if (atomic_add_unless(cnt, -1, 1))
1084 return 0;
1085
1086 mutex_lock(lock);
1087 if (!atomic_dec_and_test(cnt)) {
1088
1089 mutex_unlock(lock);
1090 return 0;
1091 }
1092
1093 return 1;
1094}
1095EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
1096