1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/mutex.h>
21#include <linux/ww_mutex.h>
22#include <linux/sched.h>
23#include <linux/sched/rt.h>
24#include <linux/export.h>
25#include <linux/spinlock.h>
26#include <linux/interrupt.h>
27#include <linux/debug_locks.h>
28#include <linux/osq_lock.h>
29
30
31
32
33
34#ifdef CONFIG_DEBUG_MUTEXES
35# include "mutex-debug.h"
36# include <asm-generic/mutex-null.h>
37
38
39
40
41
42# undef __mutex_slowpath_needs_to_unlock
43# define __mutex_slowpath_needs_to_unlock() 0
44#else
45# include "mutex.h"
46# include <asm/mutex.h>
47#endif
48
49void
50__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
51{
52 atomic_set(&lock->count, 1);
53 spin_lock_init(&lock->wait_lock);
54 INIT_LIST_HEAD(&lock->wait_list);
55 mutex_clear_owner(lock);
56#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
57 osq_lock_init(&lock->osq);
58#endif
59
60 debug_mutex_init(lock, name, key);
61}
62
63EXPORT_SYMBOL(__mutex_init);
64
65#ifndef CONFIG_DEBUG_LOCK_ALLOC
66
67
68
69
70
71
72__visible void __sched __mutex_lock_slowpath(atomic_t *lock_count);
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95void __sched mutex_lock(struct mutex *lock)
96{
97 might_sleep();
98
99
100
101
102 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
103 mutex_set_owner(lock);
104}
105
106EXPORT_SYMBOL(mutex_lock);
107#endif
108
109static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
110 struct ww_acquire_ctx *ww_ctx)
111{
112#ifdef CONFIG_DEBUG_MUTEXES
113
114
115
116
117
118
119 DEBUG_LOCKS_WARN_ON(ww->ctx);
120
121
122
123
124 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
125
126 if (ww_ctx->contending_lock) {
127
128
129
130
131 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
132
133
134
135
136
137 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
138 ww_ctx->contending_lock = NULL;
139 }
140
141
142
143
144 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
145#endif
146 ww_ctx->acquired++;
147}
148
149
150
151
152
153
154
155
156static __always_inline void
157ww_mutex_set_context_fastpath(struct ww_mutex *lock,
158 struct ww_acquire_ctx *ctx)
159{
160 unsigned long flags;
161 struct mutex_waiter *cur;
162
163 ww_mutex_lock_acquired(lock, ctx);
164
165 lock->ctx = ctx;
166
167
168
169
170
171
172
173
174 smp_mb();
175
176
177
178
179 if (likely(atomic_read(&lock->base.count) == 0))
180 return;
181
182
183
184
185
186 spin_lock_mutex(&lock->base.wait_lock, flags);
187 list_for_each_entry(cur, &lock->base.wait_list, list) {
188 debug_mutex_wake_waiter(&lock->base, cur);
189 wake_up_process(cur->task);
190 }
191 spin_unlock_mutex(&lock->base.wait_lock, flags);
192}
193
194
195
196
197
198
199
200static __always_inline void
201ww_mutex_set_context_slowpath(struct ww_mutex *lock,
202 struct ww_acquire_ctx *ctx)
203{
204 struct mutex_waiter *cur;
205
206 ww_mutex_lock_acquired(lock, ctx);
207 lock->ctx = ctx;
208
209
210
211
212
213 list_for_each_entry(cur, &lock->base.wait_list, list) {
214 debug_mutex_wake_waiter(&lock->base, cur);
215 wake_up_process(cur->task);
216 }
217}
218
219#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
220
221
222
223
224static noinline
225bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
226{
227 bool ret = true;
228
229 rcu_read_lock();
230 while (lock->owner == owner) {
231
232
233
234
235
236
237 barrier();
238
239 if (!owner->on_cpu || need_resched()) {
240 ret = false;
241 break;
242 }
243
244 cpu_relax_lowlatency();
245 }
246 rcu_read_unlock();
247
248 return ret;
249}
250
251
252
253
254static inline int mutex_can_spin_on_owner(struct mutex *lock)
255{
256 struct task_struct *owner;
257 int retval = 1;
258
259 if (need_resched())
260 return 0;
261
262 rcu_read_lock();
263 owner = READ_ONCE(lock->owner);
264 if (owner)
265 retval = owner->on_cpu;
266 rcu_read_unlock();
267
268
269
270
271 return retval;
272}
273
274
275
276
277static inline bool mutex_try_to_acquire(struct mutex *lock)
278{
279 return !mutex_is_locked(lock) &&
280 (atomic_cmpxchg_acquire(&lock->count, 1, 0) == 1);
281}
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306static bool mutex_optimistic_spin(struct mutex *lock,
307 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
308{
309 struct task_struct *task = current;
310
311 if (!mutex_can_spin_on_owner(lock))
312 goto done;
313
314
315
316
317
318
319 if (!osq_lock(&lock->osq))
320 goto done;
321
322 while (true) {
323 struct task_struct *owner;
324
325 if (use_ww_ctx && ww_ctx->acquired > 0) {
326 struct ww_mutex *ww;
327
328 ww = container_of(lock, struct ww_mutex, base);
329
330
331
332
333
334
335
336
337 if (READ_ONCE(ww->ctx))
338 break;
339 }
340
341
342
343
344
345 owner = READ_ONCE(lock->owner);
346 if (owner && !mutex_spin_on_owner(lock, owner))
347 break;
348
349
350 if (mutex_try_to_acquire(lock)) {
351 lock_acquired(&lock->dep_map, ip);
352
353 if (use_ww_ctx) {
354 struct ww_mutex *ww;
355 ww = container_of(lock, struct ww_mutex, base);
356
357 ww_mutex_set_context_fastpath(ww, ww_ctx);
358 }
359
360 mutex_set_owner(lock);
361 osq_unlock(&lock->osq);
362 return true;
363 }
364
365
366
367
368
369
370
371 if (!owner && (need_resched() || rt_task(task)))
372 break;
373
374
375
376
377
378
379
380 cpu_relax_lowlatency();
381 }
382
383 osq_unlock(&lock->osq);
384done:
385
386
387
388
389
390 if (need_resched()) {
391
392
393
394
395 __set_current_state(TASK_RUNNING);
396 schedule_preempt_disabled();
397 }
398
399 return false;
400}
401#else
402static bool mutex_optimistic_spin(struct mutex *lock,
403 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
404{
405 return false;
406}
407#endif
408
409__visible __used noinline
410void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
411
412
413
414
415
416
417
418
419
420
421
422
423void __sched mutex_unlock(struct mutex *lock)
424{
425
426
427
428
429#ifndef CONFIG_DEBUG_MUTEXES
430
431
432
433
434
435 mutex_clear_owner(lock);
436#endif
437 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
438}
439
440EXPORT_SYMBOL(mutex_unlock);
441
442
443
444
445
446
447
448
449
450
451
452
453void __sched ww_mutex_unlock(struct ww_mutex *lock)
454{
455
456
457
458
459 if (lock->ctx) {
460#ifdef CONFIG_DEBUG_MUTEXES
461 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
462#endif
463 if (lock->ctx->acquired > 0)
464 lock->ctx->acquired--;
465 lock->ctx = NULL;
466 }
467
468#ifndef CONFIG_DEBUG_MUTEXES
469
470
471
472
473
474 mutex_clear_owner(&lock->base);
475#endif
476 __mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath);
477}
478EXPORT_SYMBOL(ww_mutex_unlock);
479
480static inline int __sched
481__ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
482{
483 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
484 struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
485
486 if (!hold_ctx)
487 return 0;
488
489 if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
490 (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
491#ifdef CONFIG_DEBUG_MUTEXES
492 DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
493 ctx->contending_lock = ww;
494#endif
495 return -EDEADLK;
496 }
497
498 return 0;
499}
500
501
502
503
504static __always_inline int __sched
505__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
506 struct lockdep_map *nest_lock, unsigned long ip,
507 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
508{
509 struct task_struct *task = current;
510 struct mutex_waiter waiter;
511 unsigned long flags;
512 int ret;
513
514 if (use_ww_ctx) {
515 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
516 if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
517 return -EALREADY;
518 }
519
520 preempt_disable();
521 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
522
523 if (mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx)) {
524
525 preempt_enable();
526 return 0;
527 }
528
529 spin_lock_mutex(&lock->wait_lock, flags);
530
531
532
533
534
535 if (!mutex_is_locked(lock) &&
536 (atomic_xchg_acquire(&lock->count, 0) == 1))
537 goto skip_wait;
538
539 debug_mutex_lock_common(lock, &waiter);
540 debug_mutex_add_waiter(lock, &waiter, task);
541
542
543 list_add_tail(&waiter.list, &lock->wait_list);
544 waiter.task = task;
545
546 lock_contended(&lock->dep_map, ip);
547
548 for (;;) {
549
550
551
552
553
554
555
556
557
558
559 if (atomic_read(&lock->count) >= 0 &&
560 (atomic_xchg_acquire(&lock->count, -1) == 1))
561 break;
562
563
564
565
566
567 if (unlikely(signal_pending_state(state, task))) {
568 ret = -EINTR;
569 goto err;
570 }
571
572 if (use_ww_ctx && ww_ctx->acquired > 0) {
573 ret = __ww_mutex_lock_check_stamp(lock, ww_ctx);
574 if (ret)
575 goto err;
576 }
577
578 __set_task_state(task, state);
579
580
581 spin_unlock_mutex(&lock->wait_lock, flags);
582 schedule_preempt_disabled();
583 spin_lock_mutex(&lock->wait_lock, flags);
584 }
585 __set_task_state(task, TASK_RUNNING);
586
587 mutex_remove_waiter(lock, &waiter, task);
588
589 if (likely(list_empty(&lock->wait_list)))
590 atomic_set(&lock->count, 0);
591 debug_mutex_free_waiter(&waiter);
592
593skip_wait:
594
595 lock_acquired(&lock->dep_map, ip);
596 mutex_set_owner(lock);
597
598 if (use_ww_ctx) {
599 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
600 ww_mutex_set_context_slowpath(ww, ww_ctx);
601 }
602
603 spin_unlock_mutex(&lock->wait_lock, flags);
604 preempt_enable();
605 return 0;
606
607err:
608 mutex_remove_waiter(lock, &waiter, task);
609 spin_unlock_mutex(&lock->wait_lock, flags);
610 debug_mutex_free_waiter(&waiter);
611 mutex_release(&lock->dep_map, 1, ip);
612 preempt_enable();
613 return ret;
614}
615
616#ifdef CONFIG_DEBUG_LOCK_ALLOC
617void __sched
618mutex_lock_nested(struct mutex *lock, unsigned int subclass)
619{
620 might_sleep();
621 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
622 subclass, NULL, _RET_IP_, NULL, 0);
623}
624
625EXPORT_SYMBOL_GPL(mutex_lock_nested);
626
627void __sched
628_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
629{
630 might_sleep();
631 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
632 0, nest, _RET_IP_, NULL, 0);
633}
634
635EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
636
637int __sched
638mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
639{
640 might_sleep();
641 return __mutex_lock_common(lock, TASK_KILLABLE,
642 subclass, NULL, _RET_IP_, NULL, 0);
643}
644EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
645
646int __sched
647mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
648{
649 might_sleep();
650 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
651 subclass, NULL, _RET_IP_, NULL, 0);
652}
653
654EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
655
656static inline int
657ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
658{
659#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
660 unsigned tmp;
661
662 if (ctx->deadlock_inject_countdown-- == 0) {
663 tmp = ctx->deadlock_inject_interval;
664 if (tmp > UINT_MAX/4)
665 tmp = UINT_MAX;
666 else
667 tmp = tmp*2 + tmp + tmp/2;
668
669 ctx->deadlock_inject_interval = tmp;
670 ctx->deadlock_inject_countdown = tmp;
671 ctx->contending_lock = lock;
672
673 ww_mutex_unlock(lock);
674
675 return -EDEADLK;
676 }
677#endif
678
679 return 0;
680}
681
682int __sched
683__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
684{
685 int ret;
686
687 might_sleep();
688 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
689 0, &ctx->dep_map, _RET_IP_, ctx, 1);
690 if (!ret && ctx->acquired > 1)
691 return ww_mutex_deadlock_injection(lock, ctx);
692
693 return ret;
694}
695EXPORT_SYMBOL_GPL(__ww_mutex_lock);
696
697int __sched
698__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
699{
700 int ret;
701
702 might_sleep();
703 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
704 0, &ctx->dep_map, _RET_IP_, ctx, 1);
705
706 if (!ret && ctx->acquired > 1)
707 return ww_mutex_deadlock_injection(lock, ctx);
708
709 return ret;
710}
711EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
712
713#endif
714
715
716
717
718static inline void
719__mutex_unlock_common_slowpath(struct mutex *lock, int nested)
720{
721 unsigned long flags;
722 WAKE_Q(wake_q);
723
724
725
726
727
728
729
730
731
732
733
734
735
736 if (__mutex_slowpath_needs_to_unlock())
737 atomic_set(&lock->count, 1);
738
739 spin_lock_mutex(&lock->wait_lock, flags);
740 mutex_release(&lock->dep_map, nested, _RET_IP_);
741 debug_mutex_unlock(lock);
742
743 if (!list_empty(&lock->wait_list)) {
744
745 struct mutex_waiter *waiter =
746 list_entry(lock->wait_list.next,
747 struct mutex_waiter, list);
748
749 debug_mutex_wake_waiter(lock, waiter);
750 wake_q_add(&wake_q, waiter->task);
751 }
752
753 spin_unlock_mutex(&lock->wait_lock, flags);
754 wake_up_q(&wake_q);
755}
756
757
758
759
760__visible void
761__mutex_unlock_slowpath(atomic_t *lock_count)
762{
763 struct mutex *lock = container_of(lock_count, struct mutex, count);
764
765 __mutex_unlock_common_slowpath(lock, 1);
766}
767
768#ifndef CONFIG_DEBUG_LOCK_ALLOC
769
770
771
772
773static noinline int __sched
774__mutex_lock_killable_slowpath(struct mutex *lock);
775
776static noinline int __sched
777__mutex_lock_interruptible_slowpath(struct mutex *lock);
778
779
780
781
782
783
784
785
786
787
788
789
790int __sched mutex_lock_interruptible(struct mutex *lock)
791{
792 int ret;
793
794 might_sleep();
795 ret = __mutex_fastpath_lock_retval(&lock->count);
796 if (likely(!ret)) {
797 mutex_set_owner(lock);
798 return 0;
799 } else
800 return __mutex_lock_interruptible_slowpath(lock);
801}
802
803EXPORT_SYMBOL(mutex_lock_interruptible);
804
805int __sched mutex_lock_killable(struct mutex *lock)
806{
807 int ret;
808
809 might_sleep();
810 ret = __mutex_fastpath_lock_retval(&lock->count);
811 if (likely(!ret)) {
812 mutex_set_owner(lock);
813 return 0;
814 } else
815 return __mutex_lock_killable_slowpath(lock);
816}
817EXPORT_SYMBOL(mutex_lock_killable);
818
819__visible void __sched
820__mutex_lock_slowpath(atomic_t *lock_count)
821{
822 struct mutex *lock = container_of(lock_count, struct mutex, count);
823
824 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
825 NULL, _RET_IP_, NULL, 0);
826}
827
828static noinline int __sched
829__mutex_lock_killable_slowpath(struct mutex *lock)
830{
831 return __mutex_lock_common(lock, TASK_KILLABLE, 0,
832 NULL, _RET_IP_, NULL, 0);
833}
834
835static noinline int __sched
836__mutex_lock_interruptible_slowpath(struct mutex *lock)
837{
838 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
839 NULL, _RET_IP_, NULL, 0);
840}
841
842static noinline int __sched
843__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
844{
845 return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
846 NULL, _RET_IP_, ctx, 1);
847}
848
849static noinline int __sched
850__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
851 struct ww_acquire_ctx *ctx)
852{
853 return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
854 NULL, _RET_IP_, ctx, 1);
855}
856
857#endif
858
859
860
861
862
863static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
864{
865 struct mutex *lock = container_of(lock_count, struct mutex, count);
866 unsigned long flags;
867 int prev;
868
869
870 if (mutex_is_locked(lock))
871 return 0;
872
873 spin_lock_mutex(&lock->wait_lock, flags);
874
875 prev = atomic_xchg_acquire(&lock->count, -1);
876 if (likely(prev == 1)) {
877 mutex_set_owner(lock);
878 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
879 }
880
881
882 if (likely(list_empty(&lock->wait_list)))
883 atomic_set(&lock->count, 0);
884
885 spin_unlock_mutex(&lock->wait_lock, flags);
886
887 return prev == 1;
888}
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904int __sched mutex_trylock(struct mutex *lock)
905{
906 int ret;
907
908 ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
909 if (ret)
910 mutex_set_owner(lock);
911
912 return ret;
913}
914EXPORT_SYMBOL(mutex_trylock);
915
916#ifndef CONFIG_DEBUG_LOCK_ALLOC
917int __sched
918__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
919{
920 int ret;
921
922 might_sleep();
923
924 ret = __mutex_fastpath_lock_retval(&lock->base.count);
925
926 if (likely(!ret)) {
927 ww_mutex_set_context_fastpath(lock, ctx);
928 mutex_set_owner(&lock->base);
929 } else
930 ret = __ww_mutex_lock_slowpath(lock, ctx);
931 return ret;
932}
933EXPORT_SYMBOL(__ww_mutex_lock);
934
935int __sched
936__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
937{
938 int ret;
939
940 might_sleep();
941
942 ret = __mutex_fastpath_lock_retval(&lock->base.count);
943
944 if (likely(!ret)) {
945 ww_mutex_set_context_fastpath(lock, ctx);
946 mutex_set_owner(&lock->base);
947 } else
948 ret = __ww_mutex_lock_interruptible_slowpath(lock, ctx);
949 return ret;
950}
951EXPORT_SYMBOL(__ww_mutex_lock_interruptible);
952
953#endif
954
955
956
957
958
959
960
961
962int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
963{
964
965 if (atomic_add_unless(cnt, -1, 1))
966 return 0;
967
968 mutex_lock(lock);
969 if (!atomic_dec_and_test(cnt)) {
970
971 mutex_unlock(lock);
972 return 0;
973 }
974
975 return 1;
976}
977EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
978