1
2
3
4
5
6
7
8
9
10
11
12
13
14#include <linux/spinlock.h>
15#include <linux/export.h>
16#include <linux/sched/signal.h>
17#include <linux/sched/rt.h>
18#include <linux/sched/deadline.h>
19#include <linux/sched/wake_q.h>
20#include <linux/sched/debug.h>
21#include <linux/timer.h>
22
23#include "rtmutex_common.h"
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52static void
53rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner)
54{
55 unsigned long val = (unsigned long)owner;
56
57 if (rt_mutex_has_waiters(lock))
58 val |= RT_MUTEX_HAS_WAITERS;
59
60 lock->owner = (struct task_struct *)val;
61}
62
63static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
64{
65 lock->owner = (struct task_struct *)
66 ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
67}
68
69static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
70{
71 unsigned long owner, *p = (unsigned long *) &lock->owner;
72
73 if (rt_mutex_has_waiters(lock))
74 return;
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134 owner = READ_ONCE(*p);
135 if (owner & RT_MUTEX_HAS_WAITERS)
136 WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
137}
138
139
140
141
142
143#ifndef CONFIG_DEBUG_RT_MUTEXES
144# define rt_mutex_cmpxchg_relaxed(l,c,n) (cmpxchg_relaxed(&l->owner, c, n) == c)
145# define rt_mutex_cmpxchg_acquire(l,c,n) (cmpxchg_acquire(&l->owner, c, n) == c)
146# define rt_mutex_cmpxchg_release(l,c,n) (cmpxchg_release(&l->owner, c, n) == c)
147
148
149
150
151
152
153static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
154{
155 unsigned long owner, *p = (unsigned long *) &lock->owner;
156
157 do {
158 owner = *p;
159 } while (cmpxchg_relaxed(p, owner,
160 owner | RT_MUTEX_HAS_WAITERS) != owner);
161}
162
163
164
165
166
167
168
169static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
170 unsigned long flags)
171 __releases(lock->wait_lock)
172{
173 struct task_struct *owner = rt_mutex_owner(lock);
174
175 clear_rt_mutex_waiters(lock);
176 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201 return rt_mutex_cmpxchg_release(lock, owner, NULL);
202}
203
204#else
205# define rt_mutex_cmpxchg_relaxed(l,c,n) (0)
206# define rt_mutex_cmpxchg_acquire(l,c,n) (0)
207# define rt_mutex_cmpxchg_release(l,c,n) (0)
208
209static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
210{
211 lock->owner = (struct task_struct *)
212 ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
213}
214
215
216
217
218static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
219 unsigned long flags)
220 __releases(lock->wait_lock)
221{
222 lock->owner = NULL;
223 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
224 return true;
225}
226#endif
227
228
229
230
231#define task_to_waiter(p) \
232 &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline }
233
234static inline int
235rt_mutex_waiter_less(struct rt_mutex_waiter *left,
236 struct rt_mutex_waiter *right)
237{
238 if (left->prio < right->prio)
239 return 1;
240
241
242
243
244
245
246
247 if (dl_prio(left->prio))
248 return dl_time_before(left->deadline, right->deadline);
249
250 return 0;
251}
252
253static inline int
254rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
255 struct rt_mutex_waiter *right)
256{
257 if (left->prio != right->prio)
258 return 0;
259
260
261
262
263
264
265
266 if (dl_prio(left->prio))
267 return left->deadline == right->deadline;
268
269 return 1;
270}
271
272static void
273rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
274{
275 struct rb_node **link = &lock->waiters.rb_root.rb_node;
276 struct rb_node *parent = NULL;
277 struct rt_mutex_waiter *entry;
278 bool leftmost = true;
279
280 while (*link) {
281 parent = *link;
282 entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry);
283 if (rt_mutex_waiter_less(waiter, entry)) {
284 link = &parent->rb_left;
285 } else {
286 link = &parent->rb_right;
287 leftmost = false;
288 }
289 }
290
291 rb_link_node(&waiter->tree_entry, parent, link);
292 rb_insert_color_cached(&waiter->tree_entry, &lock->waiters, leftmost);
293}
294
295static void
296rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
297{
298 if (RB_EMPTY_NODE(&waiter->tree_entry))
299 return;
300
301 rb_erase_cached(&waiter->tree_entry, &lock->waiters);
302 RB_CLEAR_NODE(&waiter->tree_entry);
303}
304
305static void
306rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
307{
308 struct rb_node **link = &task->pi_waiters.rb_root.rb_node;
309 struct rb_node *parent = NULL;
310 struct rt_mutex_waiter *entry;
311 bool leftmost = true;
312
313 while (*link) {
314 parent = *link;
315 entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry);
316 if (rt_mutex_waiter_less(waiter, entry)) {
317 link = &parent->rb_left;
318 } else {
319 link = &parent->rb_right;
320 leftmost = false;
321 }
322 }
323
324 rb_link_node(&waiter->pi_tree_entry, parent, link);
325 rb_insert_color_cached(&waiter->pi_tree_entry, &task->pi_waiters, leftmost);
326}
327
328static void
329rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
330{
331 if (RB_EMPTY_NODE(&waiter->pi_tree_entry))
332 return;
333
334 rb_erase_cached(&waiter->pi_tree_entry, &task->pi_waiters);
335 RB_CLEAR_NODE(&waiter->pi_tree_entry);
336}
337
338static void rt_mutex_adjust_prio(struct task_struct *p)
339{
340 struct task_struct *pi_task = NULL;
341
342 lockdep_assert_held(&p->pi_lock);
343
344 if (task_has_pi_waiters(p))
345 pi_task = task_top_pi_waiter(p)->task;
346
347 rt_mutex_setprio(p, pi_task);
348}
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
364 enum rtmutex_chainwalk chwalk)
365{
366
367
368
369
370
371
372
373 return debug_rt_mutex_detect_deadlock(waiter, chwalk);
374}
375
376
377
378
379int max_lock_depth = 1024;
380
381static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
382{
383 return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
384}
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449static int rt_mutex_adjust_prio_chain(struct task_struct *task,
450 enum rtmutex_chainwalk chwalk,
451 struct rt_mutex *orig_lock,
452 struct rt_mutex *next_lock,
453 struct rt_mutex_waiter *orig_waiter,
454 struct task_struct *top_task)
455{
456 struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
457 struct rt_mutex_waiter *prerequeue_top_waiter;
458 int ret = 0, depth = 0;
459 struct rt_mutex *lock;
460 bool detect_deadlock;
461 bool requeue = true;
462
463 detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk);
464
465
466
467
468
469
470
471 again:
472
473
474
475 if (++depth > max_lock_depth) {
476 static int prev_max;
477
478
479
480
481
482 if (prev_max != max_lock_depth) {
483 prev_max = max_lock_depth;
484 printk(KERN_WARNING "Maximum lock depth %d reached "
485 "task: %s (%d)\n", max_lock_depth,
486 top_task->comm, task_pid_nr(top_task));
487 }
488 put_task_struct(task);
489
490 return -EDEADLK;
491 }
492
493
494
495
496
497
498
499 retry:
500
501
502
503 raw_spin_lock_irq(&task->pi_lock);
504
505
506
507
508 waiter = task->pi_blocked_on;
509
510
511
512
513
514
515
516
517
518
519 if (!waiter)
520 goto out_unlock_pi;
521
522
523
524
525
526 if (orig_waiter && !rt_mutex_owner(orig_lock))
527 goto out_unlock_pi;
528
529
530
531
532
533
534
535
536
537
538 if (next_lock != waiter->lock)
539 goto out_unlock_pi;
540
541
542
543
544
545
546 if (top_waiter) {
547 if (!task_has_pi_waiters(task))
548 goto out_unlock_pi;
549
550
551
552
553
554
555 if (top_waiter != task_top_pi_waiter(task)) {
556 if (!detect_deadlock)
557 goto out_unlock_pi;
558 else
559 requeue = false;
560 }
561 }
562
563
564
565
566
567
568
569
570 if (rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
571 if (!detect_deadlock)
572 goto out_unlock_pi;
573 else
574 requeue = false;
575 }
576
577
578
579
580 lock = waiter->lock;
581
582
583
584
585
586 if (!raw_spin_trylock(&lock->wait_lock)) {
587 raw_spin_unlock_irq(&task->pi_lock);
588 cpu_relax();
589 goto retry;
590 }
591
592
593
594
595
596
597
598
599
600
601 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
602 debug_rt_mutex_deadlock(chwalk, orig_waiter, lock);
603 raw_spin_unlock(&lock->wait_lock);
604 ret = -EDEADLK;
605 goto out_unlock_pi;
606 }
607
608
609
610
611
612
613
614 if (!requeue) {
615
616
617
618 raw_spin_unlock(&task->pi_lock);
619 put_task_struct(task);
620
621
622
623
624
625 if (!rt_mutex_owner(lock)) {
626 raw_spin_unlock_irq(&lock->wait_lock);
627 return 0;
628 }
629
630
631 task = rt_mutex_owner(lock);
632 get_task_struct(task);
633 raw_spin_lock(&task->pi_lock);
634
635
636
637
638
639
640
641 next_lock = task_blocked_on_lock(task);
642
643
644
645 top_waiter = rt_mutex_top_waiter(lock);
646
647
648 raw_spin_unlock(&task->pi_lock);
649 raw_spin_unlock_irq(&lock->wait_lock);
650
651
652 if (!next_lock)
653 goto out_put_task;
654 goto again;
655 }
656
657
658
659
660
661
662 prerequeue_top_waiter = rt_mutex_top_waiter(lock);
663
664
665 rt_mutex_dequeue(lock, waiter);
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683 waiter->prio = task->prio;
684 waiter->deadline = task->dl.deadline;
685
686 rt_mutex_enqueue(lock, waiter);
687
688
689 raw_spin_unlock(&task->pi_lock);
690 put_task_struct(task);
691
692
693
694
695
696
697
698
699 if (!rt_mutex_owner(lock)) {
700
701
702
703
704
705 if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))
706 wake_up_process(rt_mutex_top_waiter(lock)->task);
707 raw_spin_unlock_irq(&lock->wait_lock);
708 return 0;
709 }
710
711
712 task = rt_mutex_owner(lock);
713 get_task_struct(task);
714 raw_spin_lock(&task->pi_lock);
715
716
717 if (waiter == rt_mutex_top_waiter(lock)) {
718
719
720
721
722
723
724 rt_mutex_dequeue_pi(task, prerequeue_top_waiter);
725 rt_mutex_enqueue_pi(task, waiter);
726 rt_mutex_adjust_prio(task);
727
728 } else if (prerequeue_top_waiter == waiter) {
729
730
731
732
733
734
735
736
737
738
739 rt_mutex_dequeue_pi(task, waiter);
740 waiter = rt_mutex_top_waiter(lock);
741 rt_mutex_enqueue_pi(task, waiter);
742 rt_mutex_adjust_prio(task);
743 } else {
744
745
746
747
748 }
749
750
751
752
753
754
755
756
757
758
759
760 next_lock = task_blocked_on_lock(task);
761
762
763
764
765 top_waiter = rt_mutex_top_waiter(lock);
766
767
768 raw_spin_unlock(&task->pi_lock);
769 raw_spin_unlock_irq(&lock->wait_lock);
770
771
772
773
774
775
776
777
778 if (!next_lock)
779 goto out_put_task;
780
781
782
783
784
785
786 if (!detect_deadlock && waiter != top_waiter)
787 goto out_put_task;
788
789 goto again;
790
791 out_unlock_pi:
792 raw_spin_unlock_irq(&task->pi_lock);
793 out_put_task:
794 put_task_struct(task);
795
796 return ret;
797}
798
799
800
801
802
803
804
805
806
807
808
809static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
810 struct rt_mutex_waiter *waiter)
811{
812 lockdep_assert_held(&lock->wait_lock);
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831 mark_rt_mutex_waiters(lock);
832
833
834
835
836 if (rt_mutex_owner(lock))
837 return 0;
838
839
840
841
842
843
844 if (waiter) {
845
846
847
848
849 if (waiter != rt_mutex_top_waiter(lock))
850 return 0;
851
852
853
854
855
856 rt_mutex_dequeue(lock, waiter);
857
858 } else {
859
860
861
862
863
864
865
866
867 if (rt_mutex_has_waiters(lock)) {
868
869
870
871
872
873 if (!rt_mutex_waiter_less(task_to_waiter(task),
874 rt_mutex_top_waiter(lock)))
875 return 0;
876
877
878
879
880
881
882 } else {
883
884
885
886
887
888
889 goto takeit;
890 }
891 }
892
893
894
895
896
897
898
899 raw_spin_lock(&task->pi_lock);
900 task->pi_blocked_on = NULL;
901
902
903
904
905
906 if (rt_mutex_has_waiters(lock))
907 rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock));
908 raw_spin_unlock(&task->pi_lock);
909
910takeit:
911
912 debug_rt_mutex_lock(lock);
913
914
915
916
917
918 rt_mutex_set_owner(lock, task);
919
920 return 1;
921}
922
923
924
925
926
927
928
929
930static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
931 struct rt_mutex_waiter *waiter,
932 struct task_struct *task,
933 enum rtmutex_chainwalk chwalk)
934{
935 struct task_struct *owner = rt_mutex_owner(lock);
936 struct rt_mutex_waiter *top_waiter = waiter;
937 struct rt_mutex *next_lock;
938 int chain_walk = 0, res;
939
940 lockdep_assert_held(&lock->wait_lock);
941
942
943
944
945
946
947
948
949
950
951 if (owner == task)
952 return -EDEADLK;
953
954 raw_spin_lock(&task->pi_lock);
955 waiter->task = task;
956 waiter->lock = lock;
957 waiter->prio = task->prio;
958 waiter->deadline = task->dl.deadline;
959
960
961 if (rt_mutex_has_waiters(lock))
962 top_waiter = rt_mutex_top_waiter(lock);
963 rt_mutex_enqueue(lock, waiter);
964
965 task->pi_blocked_on = waiter;
966
967 raw_spin_unlock(&task->pi_lock);
968
969 if (!owner)
970 return 0;
971
972 raw_spin_lock(&owner->pi_lock);
973 if (waiter == rt_mutex_top_waiter(lock)) {
974 rt_mutex_dequeue_pi(owner, top_waiter);
975 rt_mutex_enqueue_pi(owner, waiter);
976
977 rt_mutex_adjust_prio(owner);
978 if (owner->pi_blocked_on)
979 chain_walk = 1;
980 } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
981 chain_walk = 1;
982 }
983
984
985 next_lock = task_blocked_on_lock(owner);
986
987 raw_spin_unlock(&owner->pi_lock);
988
989
990
991
992
993 if (!chain_walk || !next_lock)
994 return 0;
995
996
997
998
999
1000
1001 get_task_struct(owner);
1002
1003 raw_spin_unlock_irq(&lock->wait_lock);
1004
1005 res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
1006 next_lock, waiter, task);
1007
1008 raw_spin_lock_irq(&lock->wait_lock);
1009
1010 return res;
1011}
1012
1013
1014
1015
1016
1017
1018
1019static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
1020 struct rt_mutex *lock)
1021{
1022 struct rt_mutex_waiter *waiter;
1023
1024 raw_spin_lock(¤t->pi_lock);
1025
1026 waiter = rt_mutex_top_waiter(lock);
1027
1028
1029
1030
1031
1032
1033
1034
1035 rt_mutex_dequeue_pi(current, waiter);
1036 rt_mutex_adjust_prio(current);
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046 lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058 preempt_disable();
1059 wake_q_add(wake_q, waiter->task);
1060 raw_spin_unlock(¤t->pi_lock);
1061}
1062
1063
1064
1065
1066
1067
1068
1069static void remove_waiter(struct rt_mutex *lock,
1070 struct rt_mutex_waiter *waiter)
1071{
1072 bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
1073 struct task_struct *owner = rt_mutex_owner(lock);
1074 struct rt_mutex *next_lock;
1075
1076 lockdep_assert_held(&lock->wait_lock);
1077
1078 raw_spin_lock(¤t->pi_lock);
1079 rt_mutex_dequeue(lock, waiter);
1080 current->pi_blocked_on = NULL;
1081 raw_spin_unlock(¤t->pi_lock);
1082
1083
1084
1085
1086
1087 if (!owner || !is_top_waiter)
1088 return;
1089
1090 raw_spin_lock(&owner->pi_lock);
1091
1092 rt_mutex_dequeue_pi(owner, waiter);
1093
1094 if (rt_mutex_has_waiters(lock))
1095 rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock));
1096
1097 rt_mutex_adjust_prio(owner);
1098
1099
1100 next_lock = task_blocked_on_lock(owner);
1101
1102 raw_spin_unlock(&owner->pi_lock);
1103
1104
1105
1106
1107
1108 if (!next_lock)
1109 return;
1110
1111
1112 get_task_struct(owner);
1113
1114 raw_spin_unlock_irq(&lock->wait_lock);
1115
1116 rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock,
1117 next_lock, NULL, current);
1118
1119 raw_spin_lock_irq(&lock->wait_lock);
1120}
1121
1122
1123
1124
1125
1126
1127void rt_mutex_adjust_pi(struct task_struct *task)
1128{
1129 struct rt_mutex_waiter *waiter;
1130 struct rt_mutex *next_lock;
1131 unsigned long flags;
1132
1133 raw_spin_lock_irqsave(&task->pi_lock, flags);
1134
1135 waiter = task->pi_blocked_on;
1136 if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
1137 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
1138 return;
1139 }
1140 next_lock = waiter->lock;
1141 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
1142
1143
1144 get_task_struct(task);
1145
1146 rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
1147 next_lock, NULL, task);
1148}
1149
1150void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
1151{
1152 debug_rt_mutex_init_waiter(waiter);
1153 RB_CLEAR_NODE(&waiter->pi_tree_entry);
1154 RB_CLEAR_NODE(&waiter->tree_entry);
1155 waiter->task = NULL;
1156}
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168static int __sched
1169__rt_mutex_slowlock(struct rt_mutex *lock, int state,
1170 struct hrtimer_sleeper *timeout,
1171 struct rt_mutex_waiter *waiter)
1172{
1173 int ret = 0;
1174
1175 for (;;) {
1176
1177 if (try_to_take_rt_mutex(lock, current, waiter))
1178 break;
1179
1180
1181
1182
1183
1184 if (likely(state == TASK_INTERRUPTIBLE)) {
1185
1186 if (signal_pending(current))
1187 ret = -EINTR;
1188 if (timeout && !timeout->task)
1189 ret = -ETIMEDOUT;
1190 if (ret)
1191 break;
1192 }
1193
1194 raw_spin_unlock_irq(&lock->wait_lock);
1195
1196 debug_rt_mutex_print_deadlock(waiter);
1197
1198 schedule();
1199
1200 raw_spin_lock_irq(&lock->wait_lock);
1201 set_current_state(state);
1202 }
1203
1204 __set_current_state(TASK_RUNNING);
1205 return ret;
1206}
1207
1208static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
1209 struct rt_mutex_waiter *w)
1210{
1211
1212
1213
1214
1215 if (res != -EDEADLOCK || detect_deadlock)
1216 return;
1217
1218
1219
1220
1221 rt_mutex_print_deadlock(w);
1222 while (1) {
1223 set_current_state(TASK_INTERRUPTIBLE);
1224 schedule();
1225 }
1226}
1227
1228
1229
1230
1231static int __sched
1232rt_mutex_slowlock(struct rt_mutex *lock, int state,
1233 struct hrtimer_sleeper *timeout,
1234 enum rtmutex_chainwalk chwalk)
1235{
1236 struct rt_mutex_waiter waiter;
1237 unsigned long flags;
1238 int ret = 0;
1239
1240 rt_mutex_init_waiter(&waiter);
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1251
1252
1253 if (try_to_take_rt_mutex(lock, current, NULL)) {
1254 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1255 return 0;
1256 }
1257
1258 set_current_state(state);
1259
1260
1261 if (unlikely(timeout))
1262 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
1263
1264 ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
1265
1266 if (likely(!ret))
1267
1268 ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
1269
1270 if (unlikely(ret)) {
1271 __set_current_state(TASK_RUNNING);
1272 remove_waiter(lock, &waiter);
1273 rt_mutex_handle_deadlock(ret, chwalk, &waiter);
1274 }
1275
1276
1277
1278
1279
1280 fixup_rt_mutex_waiters(lock);
1281
1282 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1283
1284
1285 if (unlikely(timeout))
1286 hrtimer_cancel(&timeout->timer);
1287
1288 debug_rt_mutex_free_waiter(&waiter);
1289
1290 return ret;
1291}
1292
1293static inline int __rt_mutex_slowtrylock(struct rt_mutex *lock)
1294{
1295 int ret = try_to_take_rt_mutex(lock, current, NULL);
1296
1297
1298
1299
1300
1301 fixup_rt_mutex_waiters(lock);
1302
1303 return ret;
1304}
1305
1306
1307
1308
1309static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
1310{
1311 unsigned long flags;
1312 int ret;
1313
1314
1315
1316
1317
1318
1319 if (rt_mutex_owner(lock))
1320 return 0;
1321
1322
1323
1324
1325
1326 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1327
1328 ret = __rt_mutex_slowtrylock(lock);
1329
1330 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1331
1332 return ret;
1333}
1334
1335
1336
1337
1338
1339
1340static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
1341 struct wake_q_head *wake_q)
1342{
1343 unsigned long flags;
1344
1345
1346 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1347
1348 debug_rt_mutex_unlock(lock);
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381 while (!rt_mutex_has_waiters(lock)) {
1382
1383 if (unlock_rt_mutex_safe(lock, flags) == true)
1384 return false;
1385
1386 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1387 }
1388
1389
1390
1391
1392
1393
1394
1395 mark_wakeup_next_waiter(wake_q, lock);
1396 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1397
1398 return true;
1399}
1400
1401
1402
1403
1404
1405
1406
1407static inline int
1408rt_mutex_fastlock(struct rt_mutex *lock, int state,
1409 int (*slowfn)(struct rt_mutex *lock, int state,
1410 struct hrtimer_sleeper *timeout,
1411 enum rtmutex_chainwalk chwalk))
1412{
1413 if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
1414 return 0;
1415
1416 return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
1417}
1418
1419static inline int
1420rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
1421 struct hrtimer_sleeper *timeout,
1422 enum rtmutex_chainwalk chwalk,
1423 int (*slowfn)(struct rt_mutex *lock, int state,
1424 struct hrtimer_sleeper *timeout,
1425 enum rtmutex_chainwalk chwalk))
1426{
1427 if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
1428 likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
1429 return 0;
1430
1431 return slowfn(lock, state, timeout, chwalk);
1432}
1433
1434static inline int
1435rt_mutex_fasttrylock(struct rt_mutex *lock,
1436 int (*slowfn)(struct rt_mutex *lock))
1437{
1438 if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
1439 return 1;
1440
1441 return slowfn(lock);
1442}
1443
1444
1445
1446
1447void rt_mutex_postunlock(struct wake_q_head *wake_q)
1448{
1449 wake_up_q(wake_q);
1450
1451
1452 preempt_enable();
1453}
1454
1455static inline void
1456rt_mutex_fastunlock(struct rt_mutex *lock,
1457 bool (*slowfn)(struct rt_mutex *lock,
1458 struct wake_q_head *wqh))
1459{
1460 DEFINE_WAKE_Q(wake_q);
1461
1462 if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
1463 return;
1464
1465 if (slowfn(lock, &wake_q))
1466 rt_mutex_postunlock(&wake_q);
1467}
1468
1469static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass)
1470{
1471 might_sleep();
1472
1473 mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
1474 rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
1475}
1476
1477#ifdef CONFIG_DEBUG_LOCK_ALLOC
1478
1479
1480
1481
1482
1483
1484void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
1485{
1486 __rt_mutex_lock(lock, subclass);
1487}
1488EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
1489
1490#else
1491
1492
1493
1494
1495
1496
1497void __sched rt_mutex_lock(struct rt_mutex *lock)
1498{
1499 __rt_mutex_lock(lock, 0);
1500}
1501EXPORT_SYMBOL_GPL(rt_mutex_lock);
1502#endif
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
1514{
1515 int ret;
1516
1517 might_sleep();
1518
1519 mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
1520 ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
1521 if (ret)
1522 mutex_release(&lock->dep_map, 1, _RET_IP_);
1523
1524 return ret;
1525}
1526EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
1527
1528
1529
1530
1531int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
1532{
1533 return rt_mutex_slowtrylock(lock);
1534}
1535
1536int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock)
1537{
1538 return __rt_mutex_slowtrylock(lock);
1539}
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554int
1555rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
1556{
1557 int ret;
1558
1559 might_sleep();
1560
1561 mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
1562 ret = rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
1563 RT_MUTEX_MIN_CHAINWALK,
1564 rt_mutex_slowlock);
1565 if (ret)
1566 mutex_release(&lock->dep_map, 1, _RET_IP_);
1567
1568 return ret;
1569}
1570EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583int __sched rt_mutex_trylock(struct rt_mutex *lock)
1584{
1585 int ret;
1586
1587 if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
1588 return 0;
1589
1590 ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
1591 if (ret)
1592 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1593
1594 return ret;
1595}
1596EXPORT_SYMBOL_GPL(rt_mutex_trylock);
1597
1598
1599
1600
1601
1602
1603void __sched rt_mutex_unlock(struct rt_mutex *lock)
1604{
1605 mutex_release(&lock->dep_map, 1, _RET_IP_);
1606 rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
1607}
1608EXPORT_SYMBOL_GPL(rt_mutex_unlock);
1609
1610
1611
1612
1613
1614bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
1615 struct wake_q_head *wake_q)
1616{
1617 lockdep_assert_held(&lock->wait_lock);
1618
1619 debug_rt_mutex_unlock(lock);
1620
1621 if (!rt_mutex_has_waiters(lock)) {
1622 lock->owner = NULL;
1623 return false;
1624 }
1625
1626
1627
1628
1629
1630
1631
1632 mark_wakeup_next_waiter(wake_q, lock);
1633
1634 return true;
1635}
1636
1637void __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
1638{
1639 DEFINE_WAKE_Q(wake_q);
1640 unsigned long flags;
1641 bool postunlock;
1642
1643 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1644 postunlock = __rt_mutex_futex_unlock(lock, &wake_q);
1645 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1646
1647 if (postunlock)
1648 rt_mutex_postunlock(&wake_q);
1649}
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659void rt_mutex_destroy(struct rt_mutex *lock)
1660{
1661 WARN_ON(rt_mutex_is_locked(lock));
1662#ifdef CONFIG_DEBUG_RT_MUTEXES
1663 lock->magic = NULL;
1664#endif
1665}
1666EXPORT_SYMBOL_GPL(rt_mutex_destroy);
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677void __rt_mutex_init(struct rt_mutex *lock, const char *name,
1678 struct lock_class_key *key)
1679{
1680 lock->owner = NULL;
1681 raw_spin_lock_init(&lock->wait_lock);
1682 lock->waiters = RB_ROOT_CACHED;
1683
1684 if (name && key)
1685 debug_rt_mutex_init(lock, name, key);
1686}
1687EXPORT_SYMBOL_GPL(__rt_mutex_init);
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
1704 struct task_struct *proxy_owner)
1705{
1706 __rt_mutex_init(lock, NULL, NULL);
1707 debug_rt_mutex_proxy_lock(lock, proxy_owner);
1708 rt_mutex_set_owner(lock, proxy_owner);
1709}
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723void rt_mutex_proxy_unlock(struct rt_mutex *lock,
1724 struct task_struct *proxy_owner)
1725{
1726 debug_rt_mutex_proxy_unlock(lock);
1727 rt_mutex_set_owner(lock, NULL);
1728}
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1750 struct rt_mutex_waiter *waiter,
1751 struct task_struct *task)
1752{
1753 int ret;
1754
1755 lockdep_assert_held(&lock->wait_lock);
1756
1757 if (try_to_take_rt_mutex(lock, task, NULL))
1758 return 1;
1759
1760
1761 ret = task_blocks_on_rt_mutex(lock, waiter, task,
1762 RT_MUTEX_FULL_CHAINWALK);
1763
1764 if (ret && !rt_mutex_owner(lock)) {
1765
1766
1767
1768
1769
1770
1771 ret = 0;
1772 }
1773
1774 debug_rt_mutex_print_deadlock(waiter);
1775
1776 return ret;
1777}
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1799 struct rt_mutex_waiter *waiter,
1800 struct task_struct *task)
1801{
1802 int ret;
1803
1804 raw_spin_lock_irq(&lock->wait_lock);
1805 ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
1806 if (unlikely(ret))
1807 remove_waiter(lock, waiter);
1808 raw_spin_unlock_irq(&lock->wait_lock);
1809
1810 return ret;
1811}
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
1826{
1827 if (!rt_mutex_has_waiters(lock))
1828 return NULL;
1829
1830 return rt_mutex_top_waiter(lock)->task;
1831}
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
1851 struct hrtimer_sleeper *to,
1852 struct rt_mutex_waiter *waiter)
1853{
1854 int ret;
1855
1856 raw_spin_lock_irq(&lock->wait_lock);
1857
1858 set_current_state(TASK_INTERRUPTIBLE);
1859 ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
1860
1861
1862
1863
1864 fixup_rt_mutex_waiters(lock);
1865 raw_spin_unlock_irq(&lock->wait_lock);
1866
1867 return ret;
1868}
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
1891 struct rt_mutex_waiter *waiter)
1892{
1893 bool cleanup = false;
1894
1895 raw_spin_lock_irq(&lock->wait_lock);
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907 try_to_take_rt_mutex(lock, current, waiter);
1908
1909
1910
1911
1912 if (rt_mutex_owner(lock) != current) {
1913 remove_waiter(lock, waiter);
1914 cleanup = true;
1915 }
1916
1917
1918
1919
1920 fixup_rt_mutex_waiters(lock);
1921
1922 raw_spin_unlock_irq(&lock->wait_lock);
1923
1924 return cleanup;
1925}
1926