1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/mutex.h>
21#include <linux/ww_mutex.h>
22#include <linux/sched.h>
23#include <linux/sched/rt.h>
24#include <linux/export.h>
25#include <linux/spinlock.h>
26#include <linux/interrupt.h>
27#include <linux/debug_locks.h>
28
29
30
31
32
33#ifdef CONFIG_DEBUG_MUTEXES
34# include "mutex-debug.h"
35# include <asm-generic/mutex-null.h>
36#else
37# include "mutex.h"
38# include <asm/mutex.h>
39#endif
40
41
42
43
44
45#define MUTEX_SHOW_NO_WAITER(mutex) (atomic_read(&(mutex)->count) >= 0)
46
47void
48__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
49{
50 atomic_set(&lock->count, 1);
51 spin_lock_init(&lock->wait_lock);
52 INIT_LIST_HEAD(&lock->wait_list);
53 mutex_clear_owner(lock);
54#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
55 lock->spin_mlock = NULL;
56#endif
57
58 debug_mutex_init(lock, name, key);
59}
60
61EXPORT_SYMBOL(__mutex_init);
62
63#ifndef CONFIG_DEBUG_LOCK_ALLOC
64
65
66
67
68
69
70static __used noinline void __sched
71__mutex_lock_slowpath(atomic_t *lock_count);
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94void __sched mutex_lock(struct mutex *lock)
95{
96 might_sleep();
97
98
99
100
101 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
102 mutex_set_owner(lock);
103}
104
105EXPORT_SYMBOL(mutex_lock);
106#endif
107
108#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
109
110
111
112
113
114
115
116
117struct mspin_node {
118 struct mspin_node *next ;
119 int locked;
120};
121#define MLOCK(mutex) ((struct mspin_node **)&((mutex)->spin_mlock))
122
123static noinline
124void mspin_lock(struct mspin_node **lock, struct mspin_node *node)
125{
126 struct mspin_node *prev;
127
128
129 node->locked = 0;
130 node->next = NULL;
131
132 prev = xchg(lock, node);
133 if (likely(prev == NULL)) {
134
135 node->locked = 1;
136 return;
137 }
138 ACCESS_ONCE(prev->next) = node;
139 smp_wmb();
140
141 while (!ACCESS_ONCE(node->locked))
142 arch_mutex_cpu_relax();
143}
144
145static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node)
146{
147 struct mspin_node *next = ACCESS_ONCE(node->next);
148
149 if (likely(!next)) {
150
151
152
153 if (cmpxchg(lock, node, NULL) == node)
154 return;
155
156 while (!(next = ACCESS_ONCE(node->next)))
157 arch_mutex_cpu_relax();
158 }
159 ACCESS_ONCE(next->locked) = 1;
160 smp_wmb();
161}
162
163
164
165
166
167static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
168{
169 if (lock->owner != owner)
170 return false;
171
172
173
174
175
176
177
178 barrier();
179
180 return owner->on_cpu;
181}
182
183
184
185
186
187static noinline
188int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
189{
190 rcu_read_lock();
191 while (owner_running(lock, owner)) {
192 if (need_resched())
193 break;
194
195 arch_mutex_cpu_relax();
196 }
197 rcu_read_unlock();
198
199
200
201
202
203
204 return lock->owner == NULL;
205}
206
207
208
209
210static inline int mutex_can_spin_on_owner(struct mutex *lock)
211{
212 struct task_struct *owner;
213 int retval = 1;
214
215 rcu_read_lock();
216 owner = ACCESS_ONCE(lock->owner);
217 if (owner)
218 retval = owner->on_cpu;
219 rcu_read_unlock();
220
221
222
223
224 return retval;
225}
226#endif
227
228static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
229
230
231
232
233
234
235
236
237
238
239
240
241void __sched mutex_unlock(struct mutex *lock)
242{
243
244
245
246
247#ifndef CONFIG_DEBUG_MUTEXES
248
249
250
251
252
253 mutex_clear_owner(lock);
254#endif
255 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
256}
257
258EXPORT_SYMBOL(mutex_unlock);
259
260
261
262
263
264
265
266
267
268
269
270
271void __sched ww_mutex_unlock(struct ww_mutex *lock)
272{
273
274
275
276
277 if (lock->ctx) {
278#ifdef CONFIG_DEBUG_MUTEXES
279 DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
280#endif
281 if (lock->ctx->acquired > 0)
282 lock->ctx->acquired--;
283 lock->ctx = NULL;
284 }
285
286#ifndef CONFIG_DEBUG_MUTEXES
287
288
289
290
291
292 mutex_clear_owner(&lock->base);
293#endif
294 __mutex_fastpath_unlock(&lock->base.count, __mutex_unlock_slowpath);
295}
296EXPORT_SYMBOL(ww_mutex_unlock);
297
298static inline int __sched
299__mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
300{
301 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
302 struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx);
303
304 if (!hold_ctx)
305 return 0;
306
307 if (unlikely(ctx == hold_ctx))
308 return -EALREADY;
309
310 if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
311 (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
312#ifdef CONFIG_DEBUG_MUTEXES
313 DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
314 ctx->contending_lock = ww;
315#endif
316 return -EDEADLK;
317 }
318
319 return 0;
320}
321
322static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
323 struct ww_acquire_ctx *ww_ctx)
324{
325#ifdef CONFIG_DEBUG_MUTEXES
326
327
328
329
330
331
332 DEBUG_LOCKS_WARN_ON(ww->ctx);
333
334
335
336
337 DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
338
339 if (ww_ctx->contending_lock) {
340
341
342
343
344 DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
345
346
347
348
349
350 DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
351 ww_ctx->contending_lock = NULL;
352 }
353
354
355
356
357 DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
358#endif
359 ww_ctx->acquired++;
360}
361
362
363
364
365
366
367
368
369static __always_inline void
370ww_mutex_set_context_fastpath(struct ww_mutex *lock,
371 struct ww_acquire_ctx *ctx)
372{
373 unsigned long flags;
374 struct mutex_waiter *cur;
375
376 ww_mutex_lock_acquired(lock, ctx);
377
378 lock->ctx = ctx;
379
380
381
382
383
384
385
386
387 smp_mb();
388
389
390
391
392 if (likely(atomic_read(&lock->base.count) == 0))
393 return;
394
395
396
397
398
399 spin_lock_mutex(&lock->base.wait_lock, flags);
400 list_for_each_entry(cur, &lock->base.wait_list, list) {
401 debug_mutex_wake_waiter(&lock->base, cur);
402 wake_up_process(cur->task);
403 }
404 spin_unlock_mutex(&lock->base.wait_lock, flags);
405}
406
407
408
409
410static __always_inline int __sched
411__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
412 struct lockdep_map *nest_lock, unsigned long ip,
413 struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
414{
415 struct task_struct *task = current;
416 struct mutex_waiter waiter;
417 unsigned long flags;
418 int ret;
419
420 preempt_disable();
421 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
422
423#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446 if (!mutex_can_spin_on_owner(lock))
447 goto slowpath;
448
449 for (;;) {
450 struct task_struct *owner;
451 struct mspin_node node;
452
453 if (use_ww_ctx && ww_ctx->acquired > 0) {
454 struct ww_mutex *ww;
455
456 ww = container_of(lock, struct ww_mutex, base);
457
458
459
460
461
462
463
464
465 if (ACCESS_ONCE(ww->ctx))
466 goto slowpath;
467 }
468
469
470
471
472
473 mspin_lock(MLOCK(lock), &node);
474 owner = ACCESS_ONCE(lock->owner);
475 if (owner && !mutex_spin_on_owner(lock, owner)) {
476 mspin_unlock(MLOCK(lock), &node);
477 goto slowpath;
478 }
479
480 if ((atomic_read(&lock->count) == 1) &&
481 (atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
482 lock_acquired(&lock->dep_map, ip);
483 if (use_ww_ctx) {
484 struct ww_mutex *ww;
485 ww = container_of(lock, struct ww_mutex, base);
486
487 ww_mutex_set_context_fastpath(ww, ww_ctx);
488 }
489
490 mutex_set_owner(lock);
491 mspin_unlock(MLOCK(lock), &node);
492 preempt_enable();
493 return 0;
494 }
495 mspin_unlock(MLOCK(lock), &node);
496
497
498
499
500
501
502
503 if (!owner && (need_resched() || rt_task(task)))
504 goto slowpath;
505
506
507
508
509
510
511
512 arch_mutex_cpu_relax();
513 }
514slowpath:
515#endif
516 spin_lock_mutex(&lock->wait_lock, flags);
517
518
519 if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, 0) == 1))
520 goto skip_wait;
521
522 debug_mutex_lock_common(lock, &waiter);
523 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
524
525
526 list_add_tail(&waiter.list, &lock->wait_list);
527 waiter.task = task;
528
529 lock_contended(&lock->dep_map, ip);
530
531 for (;;) {
532
533
534
535
536
537
538
539
540
541 if (MUTEX_SHOW_NO_WAITER(lock) &&
542 (atomic_xchg(&lock->count, -1) == 1))
543 break;
544
545
546
547
548
549 if (unlikely(signal_pending_state(state, task))) {
550 ret = -EINTR;
551 goto err;
552 }
553
554 if (use_ww_ctx && ww_ctx->acquired > 0) {
555 ret = __mutex_lock_check_stamp(lock, ww_ctx);
556 if (ret)
557 goto err;
558 }
559
560 __set_task_state(task, state);
561
562
563 spin_unlock_mutex(&lock->wait_lock, flags);
564 schedule_preempt_disabled();
565 spin_lock_mutex(&lock->wait_lock, flags);
566 }
567 mutex_remove_waiter(lock, &waiter, current_thread_info());
568
569 if (likely(list_empty(&lock->wait_list)))
570 atomic_set(&lock->count, 0);
571 debug_mutex_free_waiter(&waiter);
572
573skip_wait:
574
575 lock_acquired(&lock->dep_map, ip);
576 mutex_set_owner(lock);
577
578 if (use_ww_ctx) {
579 struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
580 struct mutex_waiter *cur;
581
582
583
584
585
586 ww_mutex_lock_acquired(ww, ww_ctx);
587 ww->ctx = ww_ctx;
588
589
590
591
592
593 list_for_each_entry(cur, &lock->wait_list, list) {
594 debug_mutex_wake_waiter(lock, cur);
595 wake_up_process(cur->task);
596 }
597 }
598
599 spin_unlock_mutex(&lock->wait_lock, flags);
600 preempt_enable();
601 return 0;
602
603err:
604 mutex_remove_waiter(lock, &waiter, task_thread_info(task));
605 spin_unlock_mutex(&lock->wait_lock, flags);
606 debug_mutex_free_waiter(&waiter);
607 mutex_release(&lock->dep_map, 1, ip);
608 preempt_enable();
609 return ret;
610}
611
612#ifdef CONFIG_DEBUG_LOCK_ALLOC
613void __sched
614mutex_lock_nested(struct mutex *lock, unsigned int subclass)
615{
616 might_sleep();
617 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
618 subclass, NULL, _RET_IP_, NULL, 0);
619}
620
621EXPORT_SYMBOL_GPL(mutex_lock_nested);
622
623void __sched
624_mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
625{
626 might_sleep();
627 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE,
628 0, nest, _RET_IP_, NULL, 0);
629}
630
631EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
632
633int __sched
634mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
635{
636 might_sleep();
637 return __mutex_lock_common(lock, TASK_KILLABLE,
638 subclass, NULL, _RET_IP_, NULL, 0);
639}
640EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
641
642int __sched
643mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
644{
645 might_sleep();
646 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
647 subclass, NULL, _RET_IP_, NULL, 0);
648}
649
650EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
651
652static inline int
653ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
654{
655#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
656 unsigned tmp;
657
658 if (ctx->deadlock_inject_countdown-- == 0) {
659 tmp = ctx->deadlock_inject_interval;
660 if (tmp > UINT_MAX/4)
661 tmp = UINT_MAX;
662 else
663 tmp = tmp*2 + tmp + tmp/2;
664
665 ctx->deadlock_inject_interval = tmp;
666 ctx->deadlock_inject_countdown = tmp;
667 ctx->contending_lock = lock;
668
669 ww_mutex_unlock(lock);
670
671 return -EDEADLK;
672 }
673#endif
674
675 return 0;
676}
677
678int __sched
679__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
680{
681 int ret;
682
683 might_sleep();
684 ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
685 0, &ctx->dep_map, _RET_IP_, ctx, 1);
686 if (!ret && ctx->acquired > 1)
687 return ww_mutex_deadlock_injection(lock, ctx);
688
689 return ret;
690}
691EXPORT_SYMBOL_GPL(__ww_mutex_lock);
692
693int __sched
694__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
695{
696 int ret;
697
698 might_sleep();
699 ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
700 0, &ctx->dep_map, _RET_IP_, ctx, 1);
701
702 if (!ret && ctx->acquired > 1)
703 return ww_mutex_deadlock_injection(lock, ctx);
704
705 return ret;
706}
707EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
708
709#endif
710
711
712
713
714static inline void
715__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
716{
717 struct mutex *lock = container_of(lock_count, struct mutex, count);
718 unsigned long flags;
719
720 spin_lock_mutex(&lock->wait_lock, flags);
721 mutex_release(&lock->dep_map, nested, _RET_IP_);
722 debug_mutex_unlock(lock);
723
724
725
726
727
728
729 if (__mutex_slowpath_needs_to_unlock())
730 atomic_set(&lock->count, 1);
731
732 if (!list_empty(&lock->wait_list)) {
733
734 struct mutex_waiter *waiter =
735 list_entry(lock->wait_list.next,
736 struct mutex_waiter, list);
737
738 debug_mutex_wake_waiter(lock, waiter);
739
740 wake_up_process(waiter->task);
741 }
742
743 spin_unlock_mutex(&lock->wait_lock, flags);
744}
745
746
747
748
749static __used noinline void
750__mutex_unlock_slowpath(atomic_t *lock_count)
751{
752 __mutex_unlock_common_slowpath(lock_count, 1);
753}
754
755#ifndef CONFIG_DEBUG_LOCK_ALLOC
756
757
758
759
760static noinline int __sched
761__mutex_lock_killable_slowpath(struct mutex *lock);
762
763static noinline int __sched
764__mutex_lock_interruptible_slowpath(struct mutex *lock);
765
766
767
768
769
770
771
772
773
774
775
776
777int __sched mutex_lock_interruptible(struct mutex *lock)
778{
779 int ret;
780
781 might_sleep();
782 ret = __mutex_fastpath_lock_retval(&lock->count);
783 if (likely(!ret)) {
784 mutex_set_owner(lock);
785 return 0;
786 } else
787 return __mutex_lock_interruptible_slowpath(lock);
788}
789
790EXPORT_SYMBOL(mutex_lock_interruptible);
791
792int __sched mutex_lock_killable(struct mutex *lock)
793{
794 int ret;
795
796 might_sleep();
797 ret = __mutex_fastpath_lock_retval(&lock->count);
798 if (likely(!ret)) {
799 mutex_set_owner(lock);
800 return 0;
801 } else
802 return __mutex_lock_killable_slowpath(lock);
803}
804EXPORT_SYMBOL(mutex_lock_killable);
805
806static __used noinline void __sched
807__mutex_lock_slowpath(atomic_t *lock_count)
808{
809 struct mutex *lock = container_of(lock_count, struct mutex, count);
810
811 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0,
812 NULL, _RET_IP_, NULL, 0);
813}
814
815static noinline int __sched
816__mutex_lock_killable_slowpath(struct mutex *lock)
817{
818 return __mutex_lock_common(lock, TASK_KILLABLE, 0,
819 NULL, _RET_IP_, NULL, 0);
820}
821
822static noinline int __sched
823__mutex_lock_interruptible_slowpath(struct mutex *lock)
824{
825 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0,
826 NULL, _RET_IP_, NULL, 0);
827}
828
829static noinline int __sched
830__ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
831{
832 return __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, 0,
833 NULL, _RET_IP_, ctx, 1);
834}
835
836static noinline int __sched
837__ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
838 struct ww_acquire_ctx *ctx)
839{
840 return __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, 0,
841 NULL, _RET_IP_, ctx, 1);
842}
843
844#endif
845
846
847
848
849
850static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
851{
852 struct mutex *lock = container_of(lock_count, struct mutex, count);
853 unsigned long flags;
854 int prev;
855
856 spin_lock_mutex(&lock->wait_lock, flags);
857
858 prev = atomic_xchg(&lock->count, -1);
859 if (likely(prev == 1)) {
860 mutex_set_owner(lock);
861 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
862 }
863
864
865 if (likely(list_empty(&lock->wait_list)))
866 atomic_set(&lock->count, 0);
867
868 spin_unlock_mutex(&lock->wait_lock, flags);
869
870 return prev == 1;
871}
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887int __sched mutex_trylock(struct mutex *lock)
888{
889 int ret;
890
891 ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
892 if (ret)
893 mutex_set_owner(lock);
894
895 return ret;
896}
897EXPORT_SYMBOL(mutex_trylock);
898
899#ifndef CONFIG_DEBUG_LOCK_ALLOC
900int __sched
901__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
902{
903 int ret;
904
905 might_sleep();
906
907 ret = __mutex_fastpath_lock_retval(&lock->base.count);
908
909 if (likely(!ret)) {
910 ww_mutex_set_context_fastpath(lock, ctx);
911 mutex_set_owner(&lock->base);
912 } else
913 ret = __ww_mutex_lock_slowpath(lock, ctx);
914 return ret;
915}
916EXPORT_SYMBOL(__ww_mutex_lock);
917
918int __sched
919__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
920{
921 int ret;
922
923 might_sleep();
924
925 ret = __mutex_fastpath_lock_retval(&lock->base.count);
926
927 if (likely(!ret)) {
928 ww_mutex_set_context_fastpath(lock, ctx);
929 mutex_set_owner(&lock->base);
930 } else
931 ret = __ww_mutex_lock_interruptible_slowpath(lock, ctx);
932 return ret;
933}
934EXPORT_SYMBOL(__ww_mutex_lock_interruptible);
935
936#endif
937
938
939
940
941
942
943
944
945int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
946{
947
948 if (atomic_add_unless(cnt, -1, 1))
949 return 0;
950
951 mutex_lock(lock);
952 if (!atomic_dec_and_test(cnt)) {
953
954 mutex_unlock(lock);
955 return 0;
956 }
957
958 return 1;
959}
960EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
961