1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/spinlock.h>
14#include <linux/export.h>
15#include <linux/sched/signal.h>
16#include <linux/sched/rt.h>
17#include <linux/sched/deadline.h>
18#include <linux/sched/wake_q.h>
19#include <linux/sched/debug.h>
20#include <linux/timer.h>
21
22#include "rtmutex_common.h"
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51static void
52rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner)
53{
54 unsigned long val = (unsigned long)owner;
55
56 if (rt_mutex_has_waiters(lock))
57 val |= RT_MUTEX_HAS_WAITERS;
58
59 lock->owner = (struct task_struct *)val;
60}
61
62static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
63{
64 lock->owner = (struct task_struct *)
65 ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
66}
67
68static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
69{
70 unsigned long owner, *p = (unsigned long *) &lock->owner;
71
72 if (rt_mutex_has_waiters(lock))
73 return;
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133 owner = READ_ONCE(*p);
134 if (owner & RT_MUTEX_HAS_WAITERS)
135 WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
136}
137
138
139
140
141
142#ifndef CONFIG_DEBUG_RT_MUTEXES
143# define rt_mutex_cmpxchg_relaxed(l,c,n) (cmpxchg_relaxed(&l->owner, c, n) == c)
144# define rt_mutex_cmpxchg_acquire(l,c,n) (cmpxchg_acquire(&l->owner, c, n) == c)
145# define rt_mutex_cmpxchg_release(l,c,n) (cmpxchg_release(&l->owner, c, n) == c)
146
147
148
149
150
151
152static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
153{
154 unsigned long owner, *p = (unsigned long *) &lock->owner;
155
156 do {
157 owner = *p;
158 } while (cmpxchg_relaxed(p, owner,
159 owner | RT_MUTEX_HAS_WAITERS) != owner);
160}
161
162
163
164
165
166
167
168static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
169 unsigned long flags)
170 __releases(lock->wait_lock)
171{
172 struct task_struct *owner = rt_mutex_owner(lock);
173
174 clear_rt_mutex_waiters(lock);
175 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200 return rt_mutex_cmpxchg_release(lock, owner, NULL);
201}
202
203#else
204# define rt_mutex_cmpxchg_relaxed(l,c,n) (0)
205# define rt_mutex_cmpxchg_acquire(l,c,n) (0)
206# define rt_mutex_cmpxchg_release(l,c,n) (0)
207
208static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
209{
210 lock->owner = (struct task_struct *)
211 ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
212}
213
214
215
216
217static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
218 unsigned long flags)
219 __releases(lock->wait_lock)
220{
221 lock->owner = NULL;
222 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
223 return true;
224}
225#endif
226
227
228
229
230#define task_to_waiter(p) \
231 &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline }
232
233static inline int
234rt_mutex_waiter_less(struct rt_mutex_waiter *left,
235 struct rt_mutex_waiter *right)
236{
237 if (left->prio < right->prio)
238 return 1;
239
240
241
242
243
244
245
246 if (dl_prio(left->prio))
247 return dl_time_before(left->deadline, right->deadline);
248
249 return 0;
250}
251
252static inline int
253rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
254 struct rt_mutex_waiter *right)
255{
256 if (left->prio != right->prio)
257 return 0;
258
259
260
261
262
263
264
265 if (dl_prio(left->prio))
266 return left->deadline == right->deadline;
267
268 return 1;
269}
270
271static void
272rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
273{
274 struct rb_node **link = &lock->waiters.rb_root.rb_node;
275 struct rb_node *parent = NULL;
276 struct rt_mutex_waiter *entry;
277 bool leftmost = true;
278
279 while (*link) {
280 parent = *link;
281 entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry);
282 if (rt_mutex_waiter_less(waiter, entry)) {
283 link = &parent->rb_left;
284 } else {
285 link = &parent->rb_right;
286 leftmost = false;
287 }
288 }
289
290 rb_link_node(&waiter->tree_entry, parent, link);
291 rb_insert_color_cached(&waiter->tree_entry, &lock->waiters, leftmost);
292}
293
294static void
295rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
296{
297 if (RB_EMPTY_NODE(&waiter->tree_entry))
298 return;
299
300 rb_erase_cached(&waiter->tree_entry, &lock->waiters);
301 RB_CLEAR_NODE(&waiter->tree_entry);
302}
303
304static void
305rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
306{
307 struct rb_node **link = &task->pi_waiters.rb_root.rb_node;
308 struct rb_node *parent = NULL;
309 struct rt_mutex_waiter *entry;
310 bool leftmost = true;
311
312 while (*link) {
313 parent = *link;
314 entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry);
315 if (rt_mutex_waiter_less(waiter, entry)) {
316 link = &parent->rb_left;
317 } else {
318 link = &parent->rb_right;
319 leftmost = false;
320 }
321 }
322
323 rb_link_node(&waiter->pi_tree_entry, parent, link);
324 rb_insert_color_cached(&waiter->pi_tree_entry, &task->pi_waiters, leftmost);
325}
326
327static void
328rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
329{
330 if (RB_EMPTY_NODE(&waiter->pi_tree_entry))
331 return;
332
333 rb_erase_cached(&waiter->pi_tree_entry, &task->pi_waiters);
334 RB_CLEAR_NODE(&waiter->pi_tree_entry);
335}
336
337static void rt_mutex_adjust_prio(struct task_struct *p)
338{
339 struct task_struct *pi_task = NULL;
340
341 lockdep_assert_held(&p->pi_lock);
342
343 if (task_has_pi_waiters(p))
344 pi_task = task_top_pi_waiter(p)->task;
345
346 rt_mutex_setprio(p, pi_task);
347}
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
363 enum rtmutex_chainwalk chwalk)
364{
365
366
367
368
369
370
371
372 return debug_rt_mutex_detect_deadlock(waiter, chwalk);
373}
374
375
376
377
378int max_lock_depth = 1024;
379
380static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
381{
382 return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
383}
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448static int rt_mutex_adjust_prio_chain(struct task_struct *task,
449 enum rtmutex_chainwalk chwalk,
450 struct rt_mutex *orig_lock,
451 struct rt_mutex *next_lock,
452 struct rt_mutex_waiter *orig_waiter,
453 struct task_struct *top_task)
454{
455 struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
456 struct rt_mutex_waiter *prerequeue_top_waiter;
457 int ret = 0, depth = 0;
458 struct rt_mutex *lock;
459 bool detect_deadlock;
460 bool requeue = true;
461
462 detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk);
463
464
465
466
467
468
469
470 again:
471
472
473
474 if (++depth > max_lock_depth) {
475 static int prev_max;
476
477
478
479
480
481 if (prev_max != max_lock_depth) {
482 prev_max = max_lock_depth;
483 printk(KERN_WARNING "Maximum lock depth %d reached "
484 "task: %s (%d)\n", max_lock_depth,
485 top_task->comm, task_pid_nr(top_task));
486 }
487 put_task_struct(task);
488
489 return -EDEADLK;
490 }
491
492
493
494
495
496
497
498 retry:
499
500
501
502 raw_spin_lock_irq(&task->pi_lock);
503
504
505
506
507 waiter = task->pi_blocked_on;
508
509
510
511
512
513
514
515
516
517
518 if (!waiter)
519 goto out_unlock_pi;
520
521
522
523
524
525 if (orig_waiter && !rt_mutex_owner(orig_lock))
526 goto out_unlock_pi;
527
528
529
530
531
532
533
534
535
536
537 if (next_lock != waiter->lock)
538 goto out_unlock_pi;
539
540
541
542
543
544
545 if (top_waiter) {
546 if (!task_has_pi_waiters(task))
547 goto out_unlock_pi;
548
549
550
551
552
553
554 if (top_waiter != task_top_pi_waiter(task)) {
555 if (!detect_deadlock)
556 goto out_unlock_pi;
557 else
558 requeue = false;
559 }
560 }
561
562
563
564
565
566
567
568
569 if (rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
570 if (!detect_deadlock)
571 goto out_unlock_pi;
572 else
573 requeue = false;
574 }
575
576
577
578
579 lock = waiter->lock;
580
581
582
583
584
585 if (!raw_spin_trylock(&lock->wait_lock)) {
586 raw_spin_unlock_irq(&task->pi_lock);
587 cpu_relax();
588 goto retry;
589 }
590
591
592
593
594
595
596
597
598
599
600 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
601 debug_rt_mutex_deadlock(chwalk, orig_waiter, lock);
602 raw_spin_unlock(&lock->wait_lock);
603 ret = -EDEADLK;
604 goto out_unlock_pi;
605 }
606
607
608
609
610
611
612
613 if (!requeue) {
614
615
616
617 raw_spin_unlock(&task->pi_lock);
618 put_task_struct(task);
619
620
621
622
623
624 if (!rt_mutex_owner(lock)) {
625 raw_spin_unlock_irq(&lock->wait_lock);
626 return 0;
627 }
628
629
630 task = rt_mutex_owner(lock);
631 get_task_struct(task);
632 raw_spin_lock(&task->pi_lock);
633
634
635
636
637
638
639
640 next_lock = task_blocked_on_lock(task);
641
642
643
644 top_waiter = rt_mutex_top_waiter(lock);
645
646
647 raw_spin_unlock(&task->pi_lock);
648 raw_spin_unlock_irq(&lock->wait_lock);
649
650
651 if (!next_lock)
652 goto out_put_task;
653 goto again;
654 }
655
656
657
658
659
660
661 prerequeue_top_waiter = rt_mutex_top_waiter(lock);
662
663
664 rt_mutex_dequeue(lock, waiter);
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682 waiter->prio = task->prio;
683 waiter->deadline = task->dl.deadline;
684
685 rt_mutex_enqueue(lock, waiter);
686
687
688 raw_spin_unlock(&task->pi_lock);
689 put_task_struct(task);
690
691
692
693
694
695
696
697
698 if (!rt_mutex_owner(lock)) {
699
700
701
702
703
704 if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))
705 wake_up_process(rt_mutex_top_waiter(lock)->task);
706 raw_spin_unlock_irq(&lock->wait_lock);
707 return 0;
708 }
709
710
711 task = rt_mutex_owner(lock);
712 get_task_struct(task);
713 raw_spin_lock(&task->pi_lock);
714
715
716 if (waiter == rt_mutex_top_waiter(lock)) {
717
718
719
720
721
722
723 rt_mutex_dequeue_pi(task, prerequeue_top_waiter);
724 rt_mutex_enqueue_pi(task, waiter);
725 rt_mutex_adjust_prio(task);
726
727 } else if (prerequeue_top_waiter == waiter) {
728
729
730
731
732
733
734
735
736
737
738 rt_mutex_dequeue_pi(task, waiter);
739 waiter = rt_mutex_top_waiter(lock);
740 rt_mutex_enqueue_pi(task, waiter);
741 rt_mutex_adjust_prio(task);
742 } else {
743
744
745
746
747 }
748
749
750
751
752
753
754
755
756
757
758
759 next_lock = task_blocked_on_lock(task);
760
761
762
763
764 top_waiter = rt_mutex_top_waiter(lock);
765
766
767 raw_spin_unlock(&task->pi_lock);
768 raw_spin_unlock_irq(&lock->wait_lock);
769
770
771
772
773
774
775
776
777 if (!next_lock)
778 goto out_put_task;
779
780
781
782
783
784
785 if (!detect_deadlock && waiter != top_waiter)
786 goto out_put_task;
787
788 goto again;
789
790 out_unlock_pi:
791 raw_spin_unlock_irq(&task->pi_lock);
792 out_put_task:
793 put_task_struct(task);
794
795 return ret;
796}
797
798
799
800
801
802
803
804
805
806
807
808static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
809 struct rt_mutex_waiter *waiter)
810{
811 lockdep_assert_held(&lock->wait_lock);
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830 mark_rt_mutex_waiters(lock);
831
832
833
834
835 if (rt_mutex_owner(lock))
836 return 0;
837
838
839
840
841
842
843 if (waiter) {
844
845
846
847
848 if (waiter != rt_mutex_top_waiter(lock))
849 return 0;
850
851
852
853
854
855 rt_mutex_dequeue(lock, waiter);
856
857 } else {
858
859
860
861
862
863
864
865
866 if (rt_mutex_has_waiters(lock)) {
867
868
869
870
871
872 if (!rt_mutex_waiter_less(task_to_waiter(task),
873 rt_mutex_top_waiter(lock)))
874 return 0;
875
876
877
878
879
880
881 } else {
882
883
884
885
886
887
888 goto takeit;
889 }
890 }
891
892
893
894
895
896
897
898 raw_spin_lock(&task->pi_lock);
899 task->pi_blocked_on = NULL;
900
901
902
903
904
905 if (rt_mutex_has_waiters(lock))
906 rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock));
907 raw_spin_unlock(&task->pi_lock);
908
909takeit:
910
911 debug_rt_mutex_lock(lock);
912
913
914
915
916
917 rt_mutex_set_owner(lock, task);
918
919 return 1;
920}
921
922
923
924
925
926
927
928
929static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
930 struct rt_mutex_waiter *waiter,
931 struct task_struct *task,
932 enum rtmutex_chainwalk chwalk)
933{
934 struct task_struct *owner = rt_mutex_owner(lock);
935 struct rt_mutex_waiter *top_waiter = waiter;
936 struct rt_mutex *next_lock;
937 int chain_walk = 0, res;
938
939 lockdep_assert_held(&lock->wait_lock);
940
941
942
943
944
945
946
947
948
949
950 if (owner == task)
951 return -EDEADLK;
952
953 raw_spin_lock(&task->pi_lock);
954 waiter->task = task;
955 waiter->lock = lock;
956 waiter->prio = task->prio;
957 waiter->deadline = task->dl.deadline;
958
959
960 if (rt_mutex_has_waiters(lock))
961 top_waiter = rt_mutex_top_waiter(lock);
962 rt_mutex_enqueue(lock, waiter);
963
964 task->pi_blocked_on = waiter;
965
966 raw_spin_unlock(&task->pi_lock);
967
968 if (!owner)
969 return 0;
970
971 raw_spin_lock(&owner->pi_lock);
972 if (waiter == rt_mutex_top_waiter(lock)) {
973 rt_mutex_dequeue_pi(owner, top_waiter);
974 rt_mutex_enqueue_pi(owner, waiter);
975
976 rt_mutex_adjust_prio(owner);
977 if (owner->pi_blocked_on)
978 chain_walk = 1;
979 } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
980 chain_walk = 1;
981 }
982
983
984 next_lock = task_blocked_on_lock(owner);
985
986 raw_spin_unlock(&owner->pi_lock);
987
988
989
990
991
992 if (!chain_walk || !next_lock)
993 return 0;
994
995
996
997
998
999
1000 get_task_struct(owner);
1001
1002 raw_spin_unlock_irq(&lock->wait_lock);
1003
1004 res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
1005 next_lock, waiter, task);
1006
1007 raw_spin_lock_irq(&lock->wait_lock);
1008
1009 return res;
1010}
1011
1012
1013
1014
1015
1016
1017
1018static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
1019 struct rt_mutex *lock)
1020{
1021 struct rt_mutex_waiter *waiter;
1022
1023 raw_spin_lock(¤t->pi_lock);
1024
1025 waiter = rt_mutex_top_waiter(lock);
1026
1027
1028
1029
1030
1031
1032
1033
1034 rt_mutex_dequeue_pi(current, waiter);
1035 rt_mutex_adjust_prio(current);
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045 lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057 preempt_disable();
1058 wake_q_add(wake_q, waiter->task);
1059 raw_spin_unlock(¤t->pi_lock);
1060}
1061
1062
1063
1064
1065
1066
1067
1068static void remove_waiter(struct rt_mutex *lock,
1069 struct rt_mutex_waiter *waiter)
1070{
1071 bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
1072 struct task_struct *owner = rt_mutex_owner(lock);
1073 struct rt_mutex *next_lock;
1074
1075 lockdep_assert_held(&lock->wait_lock);
1076
1077 raw_spin_lock(¤t->pi_lock);
1078 rt_mutex_dequeue(lock, waiter);
1079 current->pi_blocked_on = NULL;
1080 raw_spin_unlock(¤t->pi_lock);
1081
1082
1083
1084
1085
1086 if (!owner || !is_top_waiter)
1087 return;
1088
1089 raw_spin_lock(&owner->pi_lock);
1090
1091 rt_mutex_dequeue_pi(owner, waiter);
1092
1093 if (rt_mutex_has_waiters(lock))
1094 rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock));
1095
1096 rt_mutex_adjust_prio(owner);
1097
1098
1099 next_lock = task_blocked_on_lock(owner);
1100
1101 raw_spin_unlock(&owner->pi_lock);
1102
1103
1104
1105
1106
1107 if (!next_lock)
1108 return;
1109
1110
1111 get_task_struct(owner);
1112
1113 raw_spin_unlock_irq(&lock->wait_lock);
1114
1115 rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock,
1116 next_lock, NULL, current);
1117
1118 raw_spin_lock_irq(&lock->wait_lock);
1119}
1120
1121
1122
1123
1124
1125
1126void rt_mutex_adjust_pi(struct task_struct *task)
1127{
1128 struct rt_mutex_waiter *waiter;
1129 struct rt_mutex *next_lock;
1130 unsigned long flags;
1131
1132 raw_spin_lock_irqsave(&task->pi_lock, flags);
1133
1134 waiter = task->pi_blocked_on;
1135 if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
1136 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
1137 return;
1138 }
1139 next_lock = waiter->lock;
1140 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
1141
1142
1143 get_task_struct(task);
1144
1145 rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
1146 next_lock, NULL, task);
1147}
1148
1149void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
1150{
1151 debug_rt_mutex_init_waiter(waiter);
1152 RB_CLEAR_NODE(&waiter->pi_tree_entry);
1153 RB_CLEAR_NODE(&waiter->tree_entry);
1154 waiter->task = NULL;
1155}
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167static int __sched
1168__rt_mutex_slowlock(struct rt_mutex *lock, int state,
1169 struct hrtimer_sleeper *timeout,
1170 struct rt_mutex_waiter *waiter)
1171{
1172 int ret = 0;
1173
1174 for (;;) {
1175
1176 if (try_to_take_rt_mutex(lock, current, waiter))
1177 break;
1178
1179
1180
1181
1182
1183 if (likely(state == TASK_INTERRUPTIBLE)) {
1184
1185 if (signal_pending(current))
1186 ret = -EINTR;
1187 if (timeout && !timeout->task)
1188 ret = -ETIMEDOUT;
1189 if (ret)
1190 break;
1191 }
1192
1193 raw_spin_unlock_irq(&lock->wait_lock);
1194
1195 debug_rt_mutex_print_deadlock(waiter);
1196
1197 schedule();
1198
1199 raw_spin_lock_irq(&lock->wait_lock);
1200 set_current_state(state);
1201 }
1202
1203 __set_current_state(TASK_RUNNING);
1204 return ret;
1205}
1206
1207static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
1208 struct rt_mutex_waiter *w)
1209{
1210
1211
1212
1213
1214 if (res != -EDEADLOCK || detect_deadlock)
1215 return;
1216
1217
1218
1219
1220 rt_mutex_print_deadlock(w);
1221 while (1) {
1222 set_current_state(TASK_INTERRUPTIBLE);
1223 schedule();
1224 }
1225}
1226
1227
1228
1229
1230static int __sched
1231rt_mutex_slowlock(struct rt_mutex *lock, int state,
1232 struct hrtimer_sleeper *timeout,
1233 enum rtmutex_chainwalk chwalk)
1234{
1235 struct rt_mutex_waiter waiter;
1236 unsigned long flags;
1237 int ret = 0;
1238
1239 rt_mutex_init_waiter(&waiter);
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1250
1251
1252 if (try_to_take_rt_mutex(lock, current, NULL)) {
1253 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1254 return 0;
1255 }
1256
1257 set_current_state(state);
1258
1259
1260 if (unlikely(timeout))
1261 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
1262
1263 ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
1264
1265 if (likely(!ret))
1266
1267 ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
1268
1269 if (unlikely(ret)) {
1270 __set_current_state(TASK_RUNNING);
1271 remove_waiter(lock, &waiter);
1272 rt_mutex_handle_deadlock(ret, chwalk, &waiter);
1273 }
1274
1275
1276
1277
1278
1279 fixup_rt_mutex_waiters(lock);
1280
1281 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1282
1283
1284 if (unlikely(timeout))
1285 hrtimer_cancel(&timeout->timer);
1286
1287 debug_rt_mutex_free_waiter(&waiter);
1288
1289 return ret;
1290}
1291
1292static inline int __rt_mutex_slowtrylock(struct rt_mutex *lock)
1293{
1294 int ret = try_to_take_rt_mutex(lock, current, NULL);
1295
1296
1297
1298
1299
1300 fixup_rt_mutex_waiters(lock);
1301
1302 return ret;
1303}
1304
1305
1306
1307
1308static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
1309{
1310 unsigned long flags;
1311 int ret;
1312
1313
1314
1315
1316
1317
1318 if (rt_mutex_owner(lock))
1319 return 0;
1320
1321
1322
1323
1324
1325 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1326
1327 ret = __rt_mutex_slowtrylock(lock);
1328
1329 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1330
1331 return ret;
1332}
1333
1334
1335
1336
1337
1338
1339static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
1340 struct wake_q_head *wake_q)
1341{
1342 unsigned long flags;
1343
1344
1345 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1346
1347 debug_rt_mutex_unlock(lock);
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380 while (!rt_mutex_has_waiters(lock)) {
1381
1382 if (unlock_rt_mutex_safe(lock, flags) == true)
1383 return false;
1384
1385 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1386 }
1387
1388
1389
1390
1391
1392
1393
1394 mark_wakeup_next_waiter(wake_q, lock);
1395 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1396
1397 return true;
1398}
1399
1400
1401
1402
1403
1404
1405
1406static inline int
1407rt_mutex_fastlock(struct rt_mutex *lock, int state,
1408 int (*slowfn)(struct rt_mutex *lock, int state,
1409 struct hrtimer_sleeper *timeout,
1410 enum rtmutex_chainwalk chwalk))
1411{
1412 if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
1413 return 0;
1414
1415 return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
1416}
1417
1418static inline int
1419rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
1420 struct hrtimer_sleeper *timeout,
1421 enum rtmutex_chainwalk chwalk,
1422 int (*slowfn)(struct rt_mutex *lock, int state,
1423 struct hrtimer_sleeper *timeout,
1424 enum rtmutex_chainwalk chwalk))
1425{
1426 if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
1427 likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
1428 return 0;
1429
1430 return slowfn(lock, state, timeout, chwalk);
1431}
1432
1433static inline int
1434rt_mutex_fasttrylock(struct rt_mutex *lock,
1435 int (*slowfn)(struct rt_mutex *lock))
1436{
1437 if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
1438 return 1;
1439
1440 return slowfn(lock);
1441}
1442
1443
1444
1445
1446void rt_mutex_postunlock(struct wake_q_head *wake_q)
1447{
1448 wake_up_q(wake_q);
1449
1450
1451 preempt_enable();
1452}
1453
1454static inline void
1455rt_mutex_fastunlock(struct rt_mutex *lock,
1456 bool (*slowfn)(struct rt_mutex *lock,
1457 struct wake_q_head *wqh))
1458{
1459 DEFINE_WAKE_Q(wake_q);
1460
1461 if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
1462 return;
1463
1464 if (slowfn(lock, &wake_q))
1465 rt_mutex_postunlock(&wake_q);
1466}
1467
1468static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass)
1469{
1470 might_sleep();
1471
1472 mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
1473 rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
1474}
1475
1476#ifdef CONFIG_DEBUG_LOCK_ALLOC
1477
1478
1479
1480
1481
1482
1483void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
1484{
1485 __rt_mutex_lock(lock, subclass);
1486}
1487EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
1488#endif
1489
1490#ifndef CONFIG_DEBUG_LOCK_ALLOC
1491
1492
1493
1494
1495
1496void __sched rt_mutex_lock(struct rt_mutex *lock)
1497{
1498 __rt_mutex_lock(lock, 0);
1499}
1500EXPORT_SYMBOL_GPL(rt_mutex_lock);
1501#endif
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
1513{
1514 int ret;
1515
1516 might_sleep();
1517
1518 mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
1519 ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
1520 if (ret)
1521 mutex_release(&lock->dep_map, 1, _RET_IP_);
1522
1523 return ret;
1524}
1525EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
1526
1527
1528
1529
1530int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
1531{
1532 return rt_mutex_slowtrylock(lock);
1533}
1534
1535int __sched __rt_mutex_futex_trylock(struct rt_mutex *lock)
1536{
1537 return __rt_mutex_slowtrylock(lock);
1538}
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553int
1554rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
1555{
1556 int ret;
1557
1558 might_sleep();
1559
1560 mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
1561 ret = rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
1562 RT_MUTEX_MIN_CHAINWALK,
1563 rt_mutex_slowlock);
1564 if (ret)
1565 mutex_release(&lock->dep_map, 1, _RET_IP_);
1566
1567 return ret;
1568}
1569EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582int __sched rt_mutex_trylock(struct rt_mutex *lock)
1583{
1584 int ret;
1585
1586 if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
1587 return 0;
1588
1589 ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
1590 if (ret)
1591 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1592
1593 return ret;
1594}
1595EXPORT_SYMBOL_GPL(rt_mutex_trylock);
1596
1597
1598
1599
1600
1601
1602void __sched rt_mutex_unlock(struct rt_mutex *lock)
1603{
1604 mutex_release(&lock->dep_map, 1, _RET_IP_);
1605 rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
1606}
1607EXPORT_SYMBOL_GPL(rt_mutex_unlock);
1608
1609
1610
1611
1612
1613bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
1614 struct wake_q_head *wake_q)
1615{
1616 lockdep_assert_held(&lock->wait_lock);
1617
1618 debug_rt_mutex_unlock(lock);
1619
1620 if (!rt_mutex_has_waiters(lock)) {
1621 lock->owner = NULL;
1622 return false;
1623 }
1624
1625
1626
1627
1628
1629
1630
1631 mark_wakeup_next_waiter(wake_q, lock);
1632
1633 return true;
1634}
1635
1636void __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
1637{
1638 DEFINE_WAKE_Q(wake_q);
1639 unsigned long flags;
1640 bool postunlock;
1641
1642 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1643 postunlock = __rt_mutex_futex_unlock(lock, &wake_q);
1644 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1645
1646 if (postunlock)
1647 rt_mutex_postunlock(&wake_q);
1648}
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658void rt_mutex_destroy(struct rt_mutex *lock)
1659{
1660 WARN_ON(rt_mutex_is_locked(lock));
1661#ifdef CONFIG_DEBUG_RT_MUTEXES
1662 lock->magic = NULL;
1663#endif
1664}
1665EXPORT_SYMBOL_GPL(rt_mutex_destroy);
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676void __rt_mutex_init(struct rt_mutex *lock, const char *name,
1677 struct lock_class_key *key)
1678{
1679 lock->owner = NULL;
1680 raw_spin_lock_init(&lock->wait_lock);
1681 lock->waiters = RB_ROOT_CACHED;
1682
1683 if (name && key)
1684 debug_rt_mutex_init(lock, name, key);
1685}
1686EXPORT_SYMBOL_GPL(__rt_mutex_init);
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
1703 struct task_struct *proxy_owner)
1704{
1705 __rt_mutex_init(lock, NULL, NULL);
1706 debug_rt_mutex_proxy_lock(lock, proxy_owner);
1707 rt_mutex_set_owner(lock, proxy_owner);
1708}
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722void rt_mutex_proxy_unlock(struct rt_mutex *lock,
1723 struct task_struct *proxy_owner)
1724{
1725 debug_rt_mutex_proxy_unlock(lock);
1726 rt_mutex_set_owner(lock, NULL);
1727}
1728
1729int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1730 struct rt_mutex_waiter *waiter,
1731 struct task_struct *task)
1732{
1733 int ret;
1734
1735 if (try_to_take_rt_mutex(lock, task, NULL))
1736 return 1;
1737
1738
1739 ret = task_blocks_on_rt_mutex(lock, waiter, task,
1740 RT_MUTEX_FULL_CHAINWALK);
1741
1742 if (ret && !rt_mutex_owner(lock)) {
1743
1744
1745
1746
1747
1748
1749 ret = 0;
1750 }
1751
1752 if (unlikely(ret))
1753 remove_waiter(lock, waiter);
1754
1755 debug_rt_mutex_print_deadlock(waiter);
1756
1757 return ret;
1758}
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1774 struct rt_mutex_waiter *waiter,
1775 struct task_struct *task)
1776{
1777 int ret;
1778
1779 raw_spin_lock_irq(&lock->wait_lock);
1780 ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
1781 raw_spin_unlock_irq(&lock->wait_lock);
1782
1783 return ret;
1784}
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
1799{
1800 if (!rt_mutex_has_waiters(lock))
1801 return NULL;
1802
1803 return rt_mutex_top_waiter(lock)->task;
1804}
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
1824 struct hrtimer_sleeper *to,
1825 struct rt_mutex_waiter *waiter)
1826{
1827 int ret;
1828
1829 raw_spin_lock_irq(&lock->wait_lock);
1830
1831 set_current_state(TASK_INTERRUPTIBLE);
1832 ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
1833
1834
1835
1836
1837 fixup_rt_mutex_waiters(lock);
1838 raw_spin_unlock_irq(&lock->wait_lock);
1839
1840 return ret;
1841}
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
1863 struct rt_mutex_waiter *waiter)
1864{
1865 bool cleanup = false;
1866
1867 raw_spin_lock_irq(&lock->wait_lock);
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879 try_to_take_rt_mutex(lock, current, waiter);
1880
1881
1882
1883
1884 if (rt_mutex_owner(lock) != current) {
1885 remove_waiter(lock, waiter);
1886 cleanup = true;
1887 }
1888
1889
1890
1891
1892 fixup_rt_mutex_waiters(lock);
1893
1894 raw_spin_unlock_irq(&lock->wait_lock);
1895
1896 return cleanup;
1897}
1898