1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19#include <linux/sched.h>
20#include <linux/sched/debug.h>
21#include <linux/sched/deadline.h>
22#include <linux/sched/signal.h>
23#include <linux/sched/rt.h>
24#include <linux/sched/wake_q.h>
25#include <linux/ww_mutex.h>
26
27#include "rtmutex_common.h"
28
29#ifndef WW_RT
30# define build_ww_mutex() (false)
31# define ww_container_of(rtm) NULL
32
33static inline int __ww_mutex_add_waiter(struct rt_mutex_waiter *waiter,
34 struct rt_mutex *lock,
35 struct ww_acquire_ctx *ww_ctx)
36{
37 return 0;
38}
39
40static inline void __ww_mutex_check_waiters(struct rt_mutex *lock,
41 struct ww_acquire_ctx *ww_ctx)
42{
43}
44
45static inline void ww_mutex_lock_acquired(struct ww_mutex *lock,
46 struct ww_acquire_ctx *ww_ctx)
47{
48}
49
50static inline int __ww_mutex_check_kill(struct rt_mutex *lock,
51 struct rt_mutex_waiter *waiter,
52 struct ww_acquire_ctx *ww_ctx)
53{
54 return 0;
55}
56
57#else
58# define build_ww_mutex() (true)
59# define ww_container_of(rtm) container_of(rtm, struct ww_mutex, base)
60# include "ww_mutex.h"
61#endif
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90static __always_inline void
91rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner)
92{
93 unsigned long val = (unsigned long)owner;
94
95 if (rt_mutex_has_waiters(lock))
96 val |= RT_MUTEX_HAS_WAITERS;
97
98 WRITE_ONCE(lock->owner, (struct task_struct *)val);
99}
100
101static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock)
102{
103 lock->owner = (struct task_struct *)
104 ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
105}
106
107static __always_inline void fixup_rt_mutex_waiters(struct rt_mutex_base *lock)
108{
109 unsigned long owner, *p = (unsigned long *) &lock->owner;
110
111 if (rt_mutex_has_waiters(lock))
112 return;
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172 owner = READ_ONCE(*p);
173 if (owner & RT_MUTEX_HAS_WAITERS)
174 WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
175}
176
177
178
179
180
181#ifndef CONFIG_DEBUG_RT_MUTEXES
182static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock,
183 struct task_struct *old,
184 struct task_struct *new)
185{
186 return try_cmpxchg_acquire(&lock->owner, &old, new);
187}
188
189static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock,
190 struct task_struct *old,
191 struct task_struct *new)
192{
193 return try_cmpxchg_release(&lock->owner, &old, new);
194}
195
196
197
198
199
200
201static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock)
202{
203 unsigned long owner, *p = (unsigned long *) &lock->owner;
204
205 do {
206 owner = *p;
207 } while (cmpxchg_relaxed(p, owner,
208 owner | RT_MUTEX_HAS_WAITERS) != owner);
209}
210
211
212
213
214
215
216
217static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex_base *lock,
218 unsigned long flags)
219 __releases(lock->wait_lock)
220{
221 struct task_struct *owner = rt_mutex_owner(lock);
222
223 clear_rt_mutex_waiters(lock);
224 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249 return rt_mutex_cmpxchg_release(lock, owner, NULL);
250}
251
252#else
253static __always_inline bool rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock,
254 struct task_struct *old,
255 struct task_struct *new)
256{
257 return false;
258
259}
260
261static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock,
262 struct task_struct *old,
263 struct task_struct *new)
264{
265 return false;
266}
267
268static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock)
269{
270 lock->owner = (struct task_struct *)
271 ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
272}
273
274
275
276
277static __always_inline bool unlock_rt_mutex_safe(struct rt_mutex_base *lock,
278 unsigned long flags)
279 __releases(lock->wait_lock)
280{
281 lock->owner = NULL;
282 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
283 return true;
284}
285#endif
286
287static __always_inline int __waiter_prio(struct task_struct *task)
288{
289 int prio = task->prio;
290
291 if (!rt_prio(prio))
292 return DEFAULT_PRIO;
293
294 return prio;
295}
296
297static __always_inline void
298waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
299{
300 waiter->prio = __waiter_prio(task);
301 waiter->deadline = task->dl.deadline;
302}
303
304
305
306
307#define task_to_waiter(p) \
308 &(struct rt_mutex_waiter){ .prio = __waiter_prio(p), .deadline = (p)->dl.deadline }
309
310static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left,
311 struct rt_mutex_waiter *right)
312{
313 if (left->prio < right->prio)
314 return 1;
315
316
317
318
319
320
321
322 if (dl_prio(left->prio))
323 return dl_time_before(left->deadline, right->deadline);
324
325 return 0;
326}
327
328static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
329 struct rt_mutex_waiter *right)
330{
331 if (left->prio != right->prio)
332 return 0;
333
334
335
336
337
338
339
340 if (dl_prio(left->prio))
341 return left->deadline == right->deadline;
342
343 return 1;
344}
345
346static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter,
347 struct rt_mutex_waiter *top_waiter)
348{
349 if (rt_mutex_waiter_less(waiter, top_waiter))
350 return true;
351
352#ifdef RT_MUTEX_BUILD_SPINLOCKS
353
354
355
356
357 if (rt_prio(waiter->prio) || dl_prio(waiter->prio))
358 return false;
359
360 return rt_mutex_waiter_equal(waiter, top_waiter);
361#else
362 return false;
363#endif
364}
365
366#define __node_2_waiter(node) \
367 rb_entry((node), struct rt_mutex_waiter, tree_entry)
368
369static __always_inline bool __waiter_less(struct rb_node *a, const struct rb_node *b)
370{
371 struct rt_mutex_waiter *aw = __node_2_waiter(a);
372 struct rt_mutex_waiter *bw = __node_2_waiter(b);
373
374 if (rt_mutex_waiter_less(aw, bw))
375 return 1;
376
377 if (!build_ww_mutex())
378 return 0;
379
380 if (rt_mutex_waiter_less(bw, aw))
381 return 0;
382
383
384 if (aw->ww_ctx) {
385 if (!bw->ww_ctx)
386 return 1;
387
388 return (signed long)(aw->ww_ctx->stamp -
389 bw->ww_ctx->stamp) < 0;
390 }
391
392 return 0;
393}
394
395static __always_inline void
396rt_mutex_enqueue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter)
397{
398 rb_add_cached(&waiter->tree_entry, &lock->waiters, __waiter_less);
399}
400
401static __always_inline void
402rt_mutex_dequeue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter)
403{
404 if (RB_EMPTY_NODE(&waiter->tree_entry))
405 return;
406
407 rb_erase_cached(&waiter->tree_entry, &lock->waiters);
408 RB_CLEAR_NODE(&waiter->tree_entry);
409}
410
411#define __node_2_pi_waiter(node) \
412 rb_entry((node), struct rt_mutex_waiter, pi_tree_entry)
413
414static __always_inline bool
415__pi_waiter_less(struct rb_node *a, const struct rb_node *b)
416{
417 return rt_mutex_waiter_less(__node_2_pi_waiter(a), __node_2_pi_waiter(b));
418}
419
420static __always_inline void
421rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
422{
423 rb_add_cached(&waiter->pi_tree_entry, &task->pi_waiters, __pi_waiter_less);
424}
425
426static __always_inline void
427rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
428{
429 if (RB_EMPTY_NODE(&waiter->pi_tree_entry))
430 return;
431
432 rb_erase_cached(&waiter->pi_tree_entry, &task->pi_waiters);
433 RB_CLEAR_NODE(&waiter->pi_tree_entry);
434}
435
436static __always_inline void rt_mutex_adjust_prio(struct task_struct *p)
437{
438 struct task_struct *pi_task = NULL;
439
440 lockdep_assert_held(&p->pi_lock);
441
442 if (task_has_pi_waiters(p))
443 pi_task = task_top_pi_waiter(p)->task;
444
445 rt_mutex_setprio(p, pi_task);
446}
447
448
449static __always_inline void rt_mutex_wake_q_add(struct rt_wake_q_head *wqh,
450 struct rt_mutex_waiter *w)
451{
452 if (IS_ENABLED(CONFIG_PREEMPT_RT) && w->wake_state != TASK_NORMAL) {
453 if (IS_ENABLED(CONFIG_PROVE_LOCKING))
454 WARN_ON_ONCE(wqh->rtlock_task);
455 get_task_struct(w->task);
456 wqh->rtlock_task = w->task;
457 } else {
458 wake_q_add(&wqh->head, w->task);
459 }
460}
461
462static __always_inline void rt_mutex_wake_up_q(struct rt_wake_q_head *wqh)
463{
464 if (IS_ENABLED(CONFIG_PREEMPT_RT) && wqh->rtlock_task) {
465 wake_up_state(wqh->rtlock_task, TASK_RTLOCK_WAIT);
466 put_task_struct(wqh->rtlock_task);
467 wqh->rtlock_task = NULL;
468 }
469
470 if (!wake_q_empty(&wqh->head))
471 wake_up_q(&wqh->head);
472
473
474 preempt_enable();
475}
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490static __always_inline bool
491rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
492 enum rtmutex_chainwalk chwalk)
493{
494 if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
495 return waiter != NULL;
496 return chwalk == RT_MUTEX_FULL_CHAINWALK;
497}
498
499static __always_inline struct rt_mutex_base *task_blocked_on_lock(struct task_struct *p)
500{
501 return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
502}
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task,
568 enum rtmutex_chainwalk chwalk,
569 struct rt_mutex_base *orig_lock,
570 struct rt_mutex_base *next_lock,
571 struct rt_mutex_waiter *orig_waiter,
572 struct task_struct *top_task)
573{
574 struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
575 struct rt_mutex_waiter *prerequeue_top_waiter;
576 int ret = 0, depth = 0;
577 struct rt_mutex_base *lock;
578 bool detect_deadlock;
579 bool requeue = true;
580
581 detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk);
582
583
584
585
586
587
588
589 again:
590
591
592
593 if (++depth > max_lock_depth) {
594 static int prev_max;
595
596
597
598
599
600 if (prev_max != max_lock_depth) {
601 prev_max = max_lock_depth;
602 printk(KERN_WARNING "Maximum lock depth %d reached "
603 "task: %s (%d)\n", max_lock_depth,
604 top_task->comm, task_pid_nr(top_task));
605 }
606 put_task_struct(task);
607
608 return -EDEADLK;
609 }
610
611
612
613
614
615
616
617 retry:
618
619
620
621 raw_spin_lock_irq(&task->pi_lock);
622
623
624
625
626 waiter = task->pi_blocked_on;
627
628
629
630
631
632
633
634
635
636
637 if (!waiter)
638 goto out_unlock_pi;
639
640
641
642
643
644 if (orig_waiter && !rt_mutex_owner(orig_lock))
645 goto out_unlock_pi;
646
647
648
649
650
651
652
653
654
655
656 if (next_lock != waiter->lock)
657 goto out_unlock_pi;
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681 if (IS_ENABLED(CONFIG_PREEMPT_RT) && waiter->ww_ctx && detect_deadlock)
682 detect_deadlock = false;
683
684
685
686
687
688
689 if (top_waiter) {
690 if (!task_has_pi_waiters(task))
691 goto out_unlock_pi;
692
693
694
695
696
697
698 if (top_waiter != task_top_pi_waiter(task)) {
699 if (!detect_deadlock)
700 goto out_unlock_pi;
701 else
702 requeue = false;
703 }
704 }
705
706
707
708
709
710
711
712
713 if (rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
714 if (!detect_deadlock)
715 goto out_unlock_pi;
716 else
717 requeue = false;
718 }
719
720
721
722
723 lock = waiter->lock;
724
725
726
727
728
729 if (!raw_spin_trylock(&lock->wait_lock)) {
730 raw_spin_unlock_irq(&task->pi_lock);
731 cpu_relax();
732 goto retry;
733 }
734
735
736
737
738
739
740
741
742
743
744 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
745 ret = -EDEADLK;
746
747
748
749
750
751
752
753
754
755
756 if (IS_ENABLED(CONFIG_PREEMPT_RT) && orig_waiter && orig_waiter->ww_ctx)
757 ret = 0;
758
759 raw_spin_unlock(&lock->wait_lock);
760 goto out_unlock_pi;
761 }
762
763
764
765
766
767
768
769 if (!requeue) {
770
771
772
773 raw_spin_unlock(&task->pi_lock);
774 put_task_struct(task);
775
776
777
778
779
780 if (!rt_mutex_owner(lock)) {
781 raw_spin_unlock_irq(&lock->wait_lock);
782 return 0;
783 }
784
785
786 task = get_task_struct(rt_mutex_owner(lock));
787 raw_spin_lock(&task->pi_lock);
788
789
790
791
792
793
794
795 next_lock = task_blocked_on_lock(task);
796
797
798
799 top_waiter = rt_mutex_top_waiter(lock);
800
801
802 raw_spin_unlock(&task->pi_lock);
803 raw_spin_unlock_irq(&lock->wait_lock);
804
805
806 if (!next_lock)
807 goto out_put_task;
808 goto again;
809 }
810
811
812
813
814
815
816 prerequeue_top_waiter = rt_mutex_top_waiter(lock);
817
818
819 rt_mutex_dequeue(lock, waiter);
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837 waiter_update_prio(waiter, task);
838
839 rt_mutex_enqueue(lock, waiter);
840
841
842 raw_spin_unlock(&task->pi_lock);
843 put_task_struct(task);
844
845
846
847
848
849
850
851
852 if (!rt_mutex_owner(lock)) {
853
854
855
856
857
858 if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))
859 wake_up_state(waiter->task, waiter->wake_state);
860 raw_spin_unlock_irq(&lock->wait_lock);
861 return 0;
862 }
863
864
865 task = get_task_struct(rt_mutex_owner(lock));
866 raw_spin_lock(&task->pi_lock);
867
868
869 if (waiter == rt_mutex_top_waiter(lock)) {
870
871
872
873
874
875
876 rt_mutex_dequeue_pi(task, prerequeue_top_waiter);
877 rt_mutex_enqueue_pi(task, waiter);
878 rt_mutex_adjust_prio(task);
879
880 } else if (prerequeue_top_waiter == waiter) {
881
882
883
884
885
886
887
888
889
890
891 rt_mutex_dequeue_pi(task, waiter);
892 waiter = rt_mutex_top_waiter(lock);
893 rt_mutex_enqueue_pi(task, waiter);
894 rt_mutex_adjust_prio(task);
895 } else {
896
897
898
899
900 }
901
902
903
904
905
906
907
908
909
910
911
912 next_lock = task_blocked_on_lock(task);
913
914
915
916
917 top_waiter = rt_mutex_top_waiter(lock);
918
919
920 raw_spin_unlock(&task->pi_lock);
921 raw_spin_unlock_irq(&lock->wait_lock);
922
923
924
925
926
927
928
929
930 if (!next_lock)
931 goto out_put_task;
932
933
934
935
936
937
938 if (!detect_deadlock && waiter != top_waiter)
939 goto out_put_task;
940
941 goto again;
942
943 out_unlock_pi:
944 raw_spin_unlock_irq(&task->pi_lock);
945 out_put_task:
946 put_task_struct(task);
947
948 return ret;
949}
950
951
952
953
954
955
956
957
958
959
960
961static int __sched
962try_to_take_rt_mutex(struct rt_mutex_base *lock, struct task_struct *task,
963 struct rt_mutex_waiter *waiter)
964{
965 lockdep_assert_held(&lock->wait_lock);
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984 mark_rt_mutex_waiters(lock);
985
986
987
988
989 if (rt_mutex_owner(lock))
990 return 0;
991
992
993
994
995
996
997 if (waiter) {
998 struct rt_mutex_waiter *top_waiter = rt_mutex_top_waiter(lock);
999
1000
1001
1002
1003
1004 if (waiter == top_waiter || rt_mutex_steal(waiter, top_waiter)) {
1005
1006
1007
1008
1009 rt_mutex_dequeue(lock, waiter);
1010 } else {
1011 return 0;
1012 }
1013 } else {
1014
1015
1016
1017
1018
1019
1020
1021
1022 if (rt_mutex_has_waiters(lock)) {
1023
1024 if (!rt_mutex_steal(task_to_waiter(task),
1025 rt_mutex_top_waiter(lock)))
1026 return 0;
1027
1028
1029
1030
1031
1032
1033 } else {
1034
1035
1036
1037
1038
1039
1040 goto takeit;
1041 }
1042 }
1043
1044
1045
1046
1047
1048
1049
1050 raw_spin_lock(&task->pi_lock);
1051 task->pi_blocked_on = NULL;
1052
1053
1054
1055
1056
1057 if (rt_mutex_has_waiters(lock))
1058 rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock));
1059 raw_spin_unlock(&task->pi_lock);
1060
1061takeit:
1062
1063
1064
1065
1066 rt_mutex_set_owner(lock, task);
1067
1068 return 1;
1069}
1070
1071
1072
1073
1074
1075
1076
1077
1078static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock,
1079 struct rt_mutex_waiter *waiter,
1080 struct task_struct *task,
1081 struct ww_acquire_ctx *ww_ctx,
1082 enum rtmutex_chainwalk chwalk)
1083{
1084 struct task_struct *owner = rt_mutex_owner(lock);
1085 struct rt_mutex_waiter *top_waiter = waiter;
1086 struct rt_mutex_base *next_lock;
1087 int chain_walk = 0, res;
1088
1089 lockdep_assert_held(&lock->wait_lock);
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100 if (owner == task)
1101 return -EDEADLK;
1102
1103 raw_spin_lock(&task->pi_lock);
1104 waiter->task = task;
1105 waiter->lock = lock;
1106 waiter_update_prio(waiter, task);
1107
1108
1109 if (rt_mutex_has_waiters(lock))
1110 top_waiter = rt_mutex_top_waiter(lock);
1111 rt_mutex_enqueue(lock, waiter);
1112
1113 task->pi_blocked_on = waiter;
1114
1115 raw_spin_unlock(&task->pi_lock);
1116
1117 if (build_ww_mutex() && ww_ctx) {
1118 struct rt_mutex *rtm;
1119
1120
1121 rtm = container_of(lock, struct rt_mutex, rtmutex);
1122 res = __ww_mutex_add_waiter(waiter, rtm, ww_ctx);
1123 if (res) {
1124 raw_spin_lock(&task->pi_lock);
1125 rt_mutex_dequeue(lock, waiter);
1126 task->pi_blocked_on = NULL;
1127 raw_spin_unlock(&task->pi_lock);
1128 return res;
1129 }
1130 }
1131
1132 if (!owner)
1133 return 0;
1134
1135 raw_spin_lock(&owner->pi_lock);
1136 if (waiter == rt_mutex_top_waiter(lock)) {
1137 rt_mutex_dequeue_pi(owner, top_waiter);
1138 rt_mutex_enqueue_pi(owner, waiter);
1139
1140 rt_mutex_adjust_prio(owner);
1141 if (owner->pi_blocked_on)
1142 chain_walk = 1;
1143 } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
1144 chain_walk = 1;
1145 }
1146
1147
1148 next_lock = task_blocked_on_lock(owner);
1149
1150 raw_spin_unlock(&owner->pi_lock);
1151
1152
1153
1154
1155
1156 if (!chain_walk || !next_lock)
1157 return 0;
1158
1159
1160
1161
1162
1163
1164 get_task_struct(owner);
1165
1166 raw_spin_unlock_irq(&lock->wait_lock);
1167
1168 res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
1169 next_lock, waiter, task);
1170
1171 raw_spin_lock_irq(&lock->wait_lock);
1172
1173 return res;
1174}
1175
1176
1177
1178
1179
1180
1181
1182static void __sched mark_wakeup_next_waiter(struct rt_wake_q_head *wqh,
1183 struct rt_mutex_base *lock)
1184{
1185 struct rt_mutex_waiter *waiter;
1186
1187 raw_spin_lock(¤t->pi_lock);
1188
1189 waiter = rt_mutex_top_waiter(lock);
1190
1191
1192
1193
1194
1195
1196
1197
1198 rt_mutex_dequeue_pi(current, waiter);
1199 rt_mutex_adjust_prio(current);
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209 lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221 preempt_disable();
1222 rt_mutex_wake_q_add(wqh, waiter);
1223 raw_spin_unlock(¤t->pi_lock);
1224}
1225
1226static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock)
1227{
1228 int ret = try_to_take_rt_mutex(lock, current, NULL);
1229
1230
1231
1232
1233
1234 fixup_rt_mutex_waiters(lock);
1235
1236 return ret;
1237}
1238
1239
1240
1241
1242static int __sched rt_mutex_slowtrylock(struct rt_mutex_base *lock)
1243{
1244 unsigned long flags;
1245 int ret;
1246
1247
1248
1249
1250
1251
1252 if (rt_mutex_owner(lock))
1253 return 0;
1254
1255
1256
1257
1258
1259 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1260
1261 ret = __rt_mutex_slowtrylock(lock);
1262
1263 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1264
1265 return ret;
1266}
1267
1268static __always_inline int __rt_mutex_trylock(struct rt_mutex_base *lock)
1269{
1270 if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
1271 return 1;
1272
1273 return rt_mutex_slowtrylock(lock);
1274}
1275
1276
1277
1278
1279static void __sched rt_mutex_slowunlock(struct rt_mutex_base *lock)
1280{
1281 DEFINE_RT_WAKE_Q(wqh);
1282 unsigned long flags;
1283
1284
1285 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1286
1287 debug_rt_mutex_unlock(lock);
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320 while (!rt_mutex_has_waiters(lock)) {
1321
1322 if (unlock_rt_mutex_safe(lock, flags) == true)
1323 return;
1324
1325 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1326 }
1327
1328
1329
1330
1331
1332
1333
1334 mark_wakeup_next_waiter(&wqh, lock);
1335 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1336
1337 rt_mutex_wake_up_q(&wqh);
1338}
1339
1340static __always_inline void __rt_mutex_unlock(struct rt_mutex_base *lock)
1341{
1342 if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
1343 return;
1344
1345 rt_mutex_slowunlock(lock);
1346}
1347
1348#ifdef CONFIG_SMP
1349static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,
1350 struct rt_mutex_waiter *waiter,
1351 struct task_struct *owner)
1352{
1353 bool res = true;
1354
1355 rcu_read_lock();
1356 for (;;) {
1357
1358 if (owner != rt_mutex_owner(lock))
1359 break;
1360
1361
1362
1363
1364
1365
1366 barrier();
1367
1368
1369
1370
1371
1372
1373
1374
1375 if (!owner->on_cpu || need_resched() ||
1376 rt_mutex_waiter_is_top_waiter(lock, waiter) ||
1377 vcpu_is_preempted(task_cpu(owner))) {
1378 res = false;
1379 break;
1380 }
1381 cpu_relax();
1382 }
1383 rcu_read_unlock();
1384 return res;
1385}
1386#else
1387static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,
1388 struct rt_mutex_waiter *waiter,
1389 struct task_struct *owner)
1390{
1391 return false;
1392}
1393#endif
1394
1395#ifdef RT_MUTEX_BUILD_MUTEX
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408static void __sched remove_waiter(struct rt_mutex_base *lock,
1409 struct rt_mutex_waiter *waiter)
1410{
1411 bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
1412 struct task_struct *owner = rt_mutex_owner(lock);
1413 struct rt_mutex_base *next_lock;
1414
1415 lockdep_assert_held(&lock->wait_lock);
1416
1417 raw_spin_lock(¤t->pi_lock);
1418 rt_mutex_dequeue(lock, waiter);
1419 current->pi_blocked_on = NULL;
1420 raw_spin_unlock(¤t->pi_lock);
1421
1422
1423
1424
1425
1426 if (!owner || !is_top_waiter)
1427 return;
1428
1429 raw_spin_lock(&owner->pi_lock);
1430
1431 rt_mutex_dequeue_pi(owner, waiter);
1432
1433 if (rt_mutex_has_waiters(lock))
1434 rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock));
1435
1436 rt_mutex_adjust_prio(owner);
1437
1438
1439 next_lock = task_blocked_on_lock(owner);
1440
1441 raw_spin_unlock(&owner->pi_lock);
1442
1443
1444
1445
1446
1447 if (!next_lock)
1448 return;
1449
1450
1451 get_task_struct(owner);
1452
1453 raw_spin_unlock_irq(&lock->wait_lock);
1454
1455 rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock,
1456 next_lock, NULL, current);
1457
1458 raw_spin_lock_irq(&lock->wait_lock);
1459}
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
1473 struct ww_acquire_ctx *ww_ctx,
1474 unsigned int state,
1475 struct hrtimer_sleeper *timeout,
1476 struct rt_mutex_waiter *waiter)
1477{
1478 struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex);
1479 struct task_struct *owner;
1480 int ret = 0;
1481
1482 for (;;) {
1483
1484 if (try_to_take_rt_mutex(lock, current, waiter))
1485 break;
1486
1487 if (timeout && !timeout->task) {
1488 ret = -ETIMEDOUT;
1489 break;
1490 }
1491 if (signal_pending_state(state, current)) {
1492 ret = -EINTR;
1493 break;
1494 }
1495
1496 if (build_ww_mutex() && ww_ctx) {
1497 ret = __ww_mutex_check_kill(rtm, waiter, ww_ctx);
1498 if (ret)
1499 break;
1500 }
1501
1502 if (waiter == rt_mutex_top_waiter(lock))
1503 owner = rt_mutex_owner(lock);
1504 else
1505 owner = NULL;
1506 raw_spin_unlock_irq(&lock->wait_lock);
1507
1508 if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner))
1509 schedule();
1510
1511 raw_spin_lock_irq(&lock->wait_lock);
1512 set_current_state(state);
1513 }
1514
1515 __set_current_state(TASK_RUNNING);
1516 return ret;
1517}
1518
1519static void __sched rt_mutex_handle_deadlock(int res, int detect_deadlock,
1520 struct rt_mutex_waiter *w)
1521{
1522
1523
1524
1525
1526 if (res != -EDEADLOCK || detect_deadlock)
1527 return;
1528
1529 if (build_ww_mutex() && w->ww_ctx)
1530 return;
1531
1532
1533
1534
1535 WARN(1, "rtmutex deadlock detected\n");
1536 while (1) {
1537 set_current_state(TASK_INTERRUPTIBLE);
1538 schedule();
1539 }
1540}
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
1551 struct ww_acquire_ctx *ww_ctx,
1552 unsigned int state,
1553 enum rtmutex_chainwalk chwalk,
1554 struct rt_mutex_waiter *waiter)
1555{
1556 struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex);
1557 struct ww_mutex *ww = ww_container_of(rtm);
1558 int ret;
1559
1560 lockdep_assert_held(&lock->wait_lock);
1561
1562
1563 if (try_to_take_rt_mutex(lock, current, NULL)) {
1564 if (build_ww_mutex() && ww_ctx) {
1565 __ww_mutex_check_waiters(rtm, ww_ctx);
1566 ww_mutex_lock_acquired(ww, ww_ctx);
1567 }
1568 return 0;
1569 }
1570
1571 set_current_state(state);
1572
1573 ret = task_blocks_on_rt_mutex(lock, waiter, current, ww_ctx, chwalk);
1574 if (likely(!ret))
1575 ret = rt_mutex_slowlock_block(lock, ww_ctx, state, NULL, waiter);
1576
1577 if (likely(!ret)) {
1578
1579 if (build_ww_mutex() && ww_ctx) {
1580 if (!ww_ctx->is_wait_die)
1581 __ww_mutex_check_waiters(rtm, ww_ctx);
1582 ww_mutex_lock_acquired(ww, ww_ctx);
1583 }
1584 } else {
1585 __set_current_state(TASK_RUNNING);
1586 remove_waiter(lock, waiter);
1587 rt_mutex_handle_deadlock(ret, chwalk, waiter);
1588 }
1589
1590
1591
1592
1593
1594 fixup_rt_mutex_waiters(lock);
1595 return ret;
1596}
1597
1598static inline int __rt_mutex_slowlock_locked(struct rt_mutex_base *lock,
1599 struct ww_acquire_ctx *ww_ctx,
1600 unsigned int state)
1601{
1602 struct rt_mutex_waiter waiter;
1603 int ret;
1604
1605 rt_mutex_init_waiter(&waiter);
1606 waiter.ww_ctx = ww_ctx;
1607
1608 ret = __rt_mutex_slowlock(lock, ww_ctx, state, RT_MUTEX_MIN_CHAINWALK,
1609 &waiter);
1610
1611 debug_rt_mutex_free_waiter(&waiter);
1612 return ret;
1613}
1614
1615
1616
1617
1618
1619
1620
1621static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
1622 struct ww_acquire_ctx *ww_ctx,
1623 unsigned int state)
1624{
1625 unsigned long flags;
1626 int ret;
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1637 ret = __rt_mutex_slowlock_locked(lock, ww_ctx, state);
1638 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1639
1640 return ret;
1641}
1642
1643static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock,
1644 unsigned int state)
1645{
1646 if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
1647 return 0;
1648
1649 return rt_mutex_slowlock(lock, NULL, state);
1650}
1651#endif
1652
1653#ifdef RT_MUTEX_BUILD_SPINLOCKS
1654
1655
1656
1657
1658
1659
1660
1661
1662static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock)
1663{
1664 struct rt_mutex_waiter waiter;
1665 struct task_struct *owner;
1666
1667 lockdep_assert_held(&lock->wait_lock);
1668
1669 if (try_to_take_rt_mutex(lock, current, NULL))
1670 return;
1671
1672 rt_mutex_init_rtlock_waiter(&waiter);
1673
1674
1675 current_save_and_set_rtlock_wait_state();
1676
1677 task_blocks_on_rt_mutex(lock, &waiter, current, NULL, RT_MUTEX_MIN_CHAINWALK);
1678
1679 for (;;) {
1680
1681 if (try_to_take_rt_mutex(lock, current, &waiter))
1682 break;
1683
1684 if (&waiter == rt_mutex_top_waiter(lock))
1685 owner = rt_mutex_owner(lock);
1686 else
1687 owner = NULL;
1688 raw_spin_unlock_irq(&lock->wait_lock);
1689
1690 if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner))
1691 schedule_rtlock();
1692
1693 raw_spin_lock_irq(&lock->wait_lock);
1694 set_current_state(TASK_RTLOCK_WAIT);
1695 }
1696
1697
1698 current_restore_rtlock_saved_state();
1699
1700
1701
1702
1703
1704 fixup_rt_mutex_waiters(lock);
1705 debug_rt_mutex_free_waiter(&waiter);
1706}
1707
1708static __always_inline void __sched rtlock_slowlock(struct rt_mutex_base *lock)
1709{
1710 unsigned long flags;
1711
1712 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1713 rtlock_slowlock_locked(lock);
1714 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1715}
1716
1717#endif
1718