1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/spinlock.h>
14#include <linux/export.h>
15#include <linux/sched.h>
16#include <linux/sched/rt.h>
17#include <linux/sched/deadline.h>
18#include <linux/timer.h>
19
20#include "rtmutex_common.h"
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49static void
50rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner)
51{
52 unsigned long val = (unsigned long)owner;
53
54 if (rt_mutex_has_waiters(lock))
55 val |= RT_MUTEX_HAS_WAITERS;
56
57 lock->owner = (struct task_struct *)val;
58}
59
60static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
61{
62 lock->owner = (struct task_struct *)
63 ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
64}
65
66static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
67{
68 if (!rt_mutex_has_waiters(lock))
69 clear_rt_mutex_waiters(lock);
70}
71
72
73
74
75
76#if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES)
77# define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c)
78static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
79{
80 unsigned long owner, *p = (unsigned long *) &lock->owner;
81
82 do {
83 owner = *p;
84 } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
85}
86
87
88
89
90
91
92
93static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
94 __releases(lock->wait_lock)
95{
96 struct task_struct *owner = rt_mutex_owner(lock);
97
98 clear_rt_mutex_waiters(lock);
99 raw_spin_unlock(&lock->wait_lock);
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124 return rt_mutex_cmpxchg(lock, owner, NULL);
125}
126
127#else
128# define rt_mutex_cmpxchg(l,c,n) (0)
129static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
130{
131 lock->owner = (struct task_struct *)
132 ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
133}
134
135
136
137
138static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
139 __releases(lock->wait_lock)
140{
141 lock->owner = NULL;
142 raw_spin_unlock(&lock->wait_lock);
143 return true;
144}
145#endif
146
147static inline int
148rt_mutex_waiter_less(struct rt_mutex_waiter *left,
149 struct rt_mutex_waiter *right)
150{
151 if (left->prio < right->prio)
152 return 1;
153
154
155
156
157
158
159
160 if (dl_prio(left->prio))
161 return (left->task->dl.deadline < right->task->dl.deadline);
162
163 return 0;
164}
165
166static void
167rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
168{
169 struct rb_node **link = &lock->waiters.rb_node;
170 struct rb_node *parent = NULL;
171 struct rt_mutex_waiter *entry;
172 int leftmost = 1;
173
174 while (*link) {
175 parent = *link;
176 entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry);
177 if (rt_mutex_waiter_less(waiter, entry)) {
178 link = &parent->rb_left;
179 } else {
180 link = &parent->rb_right;
181 leftmost = 0;
182 }
183 }
184
185 if (leftmost)
186 lock->waiters_leftmost = &waiter->tree_entry;
187
188 rb_link_node(&waiter->tree_entry, parent, link);
189 rb_insert_color(&waiter->tree_entry, &lock->waiters);
190}
191
192static void
193rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
194{
195 if (RB_EMPTY_NODE(&waiter->tree_entry))
196 return;
197
198 if (lock->waiters_leftmost == &waiter->tree_entry)
199 lock->waiters_leftmost = rb_next(&waiter->tree_entry);
200
201 rb_erase(&waiter->tree_entry, &lock->waiters);
202 RB_CLEAR_NODE(&waiter->tree_entry);
203}
204
205static void
206rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
207{
208 struct rb_node **link = &task->pi_waiters.rb_node;
209 struct rb_node *parent = NULL;
210 struct rt_mutex_waiter *entry;
211 int leftmost = 1;
212
213 while (*link) {
214 parent = *link;
215 entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry);
216 if (rt_mutex_waiter_less(waiter, entry)) {
217 link = &parent->rb_left;
218 } else {
219 link = &parent->rb_right;
220 leftmost = 0;
221 }
222 }
223
224 if (leftmost)
225 task->pi_waiters_leftmost = &waiter->pi_tree_entry;
226
227 rb_link_node(&waiter->pi_tree_entry, parent, link);
228 rb_insert_color(&waiter->pi_tree_entry, &task->pi_waiters);
229}
230
231static void
232rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
233{
234 if (RB_EMPTY_NODE(&waiter->pi_tree_entry))
235 return;
236
237 if (task->pi_waiters_leftmost == &waiter->pi_tree_entry)
238 task->pi_waiters_leftmost = rb_next(&waiter->pi_tree_entry);
239
240 rb_erase(&waiter->pi_tree_entry, &task->pi_waiters);
241 RB_CLEAR_NODE(&waiter->pi_tree_entry);
242}
243
244
245
246
247
248
249
250int rt_mutex_getprio(struct task_struct *task)
251{
252 if (likely(!task_has_pi_waiters(task)))
253 return task->normal_prio;
254
255 return min(task_top_pi_waiter(task)->prio,
256 task->normal_prio);
257}
258
259struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
260{
261 if (likely(!task_has_pi_waiters(task)))
262 return NULL;
263
264 return task_top_pi_waiter(task)->task;
265}
266
267
268
269
270
271int rt_mutex_check_prio(struct task_struct *task, int newprio)
272{
273 if (!task_has_pi_waiters(task))
274 return 0;
275
276 return task_top_pi_waiter(task)->task->prio <= newprio;
277}
278
279
280
281
282
283
284static void __rt_mutex_adjust_prio(struct task_struct *task)
285{
286 int prio = rt_mutex_getprio(task);
287
288 if (task->prio != prio || dl_prio(prio))
289 rt_mutex_setprio(task, prio);
290}
291
292
293
294
295
296
297
298
299
300
301static void rt_mutex_adjust_prio(struct task_struct *task)
302{
303 unsigned long flags;
304
305 raw_spin_lock_irqsave(&task->pi_lock, flags);
306 __rt_mutex_adjust_prio(task);
307 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
308}
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
324 enum rtmutex_chainwalk chwalk)
325{
326
327
328
329
330
331
332
333 return debug_rt_mutex_detect_deadlock(waiter, chwalk);
334}
335
336
337
338
339int max_lock_depth = 1024;
340
341static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
342{
343 return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
344}
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409static int rt_mutex_adjust_prio_chain(struct task_struct *task,
410 enum rtmutex_chainwalk chwalk,
411 struct rt_mutex *orig_lock,
412 struct rt_mutex *next_lock,
413 struct rt_mutex_waiter *orig_waiter,
414 struct task_struct *top_task)
415{
416 struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
417 struct rt_mutex_waiter *prerequeue_top_waiter;
418 int ret = 0, depth = 0;
419 struct rt_mutex *lock;
420 bool detect_deadlock;
421 unsigned long flags;
422 bool requeue = true;
423
424 detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk);
425
426
427
428
429
430
431
432 again:
433
434
435
436 if (++depth > max_lock_depth) {
437 static int prev_max;
438
439
440
441
442
443 if (prev_max != max_lock_depth) {
444 prev_max = max_lock_depth;
445 printk(KERN_WARNING "Maximum lock depth %d reached "
446 "task: %s (%d)\n", max_lock_depth,
447 top_task->comm, task_pid_nr(top_task));
448 }
449 put_task_struct(task);
450
451 return -EDEADLK;
452 }
453
454
455
456
457
458
459
460 retry:
461
462
463
464 raw_spin_lock_irqsave(&task->pi_lock, flags);
465
466
467
468
469 waiter = task->pi_blocked_on;
470
471
472
473
474
475
476
477
478
479
480 if (!waiter)
481 goto out_unlock_pi;
482
483
484
485
486
487 if (orig_waiter && !rt_mutex_owner(orig_lock))
488 goto out_unlock_pi;
489
490
491
492
493
494
495
496
497
498
499 if (next_lock != waiter->lock)
500 goto out_unlock_pi;
501
502
503
504
505
506
507 if (top_waiter) {
508 if (!task_has_pi_waiters(task))
509 goto out_unlock_pi;
510
511
512
513
514
515
516 if (top_waiter != task_top_pi_waiter(task)) {
517 if (!detect_deadlock)
518 goto out_unlock_pi;
519 else
520 requeue = false;
521 }
522 }
523
524
525
526
527
528
529
530
531 if (waiter->prio == task->prio) {
532 if (!detect_deadlock)
533 goto out_unlock_pi;
534 else
535 requeue = false;
536 }
537
538
539
540
541 lock = waiter->lock;
542
543
544
545
546
547 if (!raw_spin_trylock(&lock->wait_lock)) {
548 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
549 cpu_relax();
550 goto retry;
551 }
552
553
554
555
556
557
558
559
560
561
562 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
563 debug_rt_mutex_deadlock(chwalk, orig_waiter, lock);
564 raw_spin_unlock(&lock->wait_lock);
565 ret = -EDEADLK;
566 goto out_unlock_pi;
567 }
568
569
570
571
572
573
574
575 if (!requeue) {
576
577
578
579 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
580 put_task_struct(task);
581
582
583
584
585
586 if (!rt_mutex_owner(lock)) {
587 raw_spin_unlock(&lock->wait_lock);
588 return 0;
589 }
590
591
592 task = rt_mutex_owner(lock);
593 get_task_struct(task);
594 raw_spin_lock_irqsave(&task->pi_lock, flags);
595
596
597
598
599
600
601
602 next_lock = task_blocked_on_lock(task);
603
604
605
606 top_waiter = rt_mutex_top_waiter(lock);
607
608
609 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
610 raw_spin_unlock(&lock->wait_lock);
611
612
613 if (!next_lock)
614 goto out_put_task;
615 goto again;
616 }
617
618
619
620
621
622
623 prerequeue_top_waiter = rt_mutex_top_waiter(lock);
624
625
626 rt_mutex_dequeue(lock, waiter);
627 waiter->prio = task->prio;
628 rt_mutex_enqueue(lock, waiter);
629
630
631 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
632 put_task_struct(task);
633
634
635
636
637
638
639
640
641 if (!rt_mutex_owner(lock)) {
642
643
644
645
646
647 if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))
648 wake_up_process(rt_mutex_top_waiter(lock)->task);
649 raw_spin_unlock(&lock->wait_lock);
650 return 0;
651 }
652
653
654 task = rt_mutex_owner(lock);
655 get_task_struct(task);
656 raw_spin_lock_irqsave(&task->pi_lock, flags);
657
658
659 if (waiter == rt_mutex_top_waiter(lock)) {
660
661
662
663
664
665
666 rt_mutex_dequeue_pi(task, prerequeue_top_waiter);
667 rt_mutex_enqueue_pi(task, waiter);
668 __rt_mutex_adjust_prio(task);
669
670 } else if (prerequeue_top_waiter == waiter) {
671
672
673
674
675
676
677
678
679
680
681 rt_mutex_dequeue_pi(task, waiter);
682 waiter = rt_mutex_top_waiter(lock);
683 rt_mutex_enqueue_pi(task, waiter);
684 __rt_mutex_adjust_prio(task);
685 } else {
686
687
688
689
690 }
691
692
693
694
695
696
697
698
699
700
701
702 next_lock = task_blocked_on_lock(task);
703
704
705
706
707 top_waiter = rt_mutex_top_waiter(lock);
708
709
710 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
711 raw_spin_unlock(&lock->wait_lock);
712
713
714
715
716
717
718
719
720 if (!next_lock)
721 goto out_put_task;
722
723
724
725
726
727
728 if (!detect_deadlock && waiter != top_waiter)
729 goto out_put_task;
730
731 goto again;
732
733 out_unlock_pi:
734 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
735 out_put_task:
736 put_task_struct(task);
737
738 return ret;
739}
740
741
742
743
744
745
746
747
748
749
750
751static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
752 struct rt_mutex_waiter *waiter)
753{
754 unsigned long flags;
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773 mark_rt_mutex_waiters(lock);
774
775
776
777
778 if (rt_mutex_owner(lock))
779 return 0;
780
781
782
783
784
785
786 if (waiter) {
787
788
789
790
791 if (waiter != rt_mutex_top_waiter(lock))
792 return 0;
793
794
795
796
797
798 rt_mutex_dequeue(lock, waiter);
799
800 } else {
801
802
803
804
805
806
807
808
809 if (rt_mutex_has_waiters(lock)) {
810
811
812
813
814
815 if (task->prio >= rt_mutex_top_waiter(lock)->prio)
816 return 0;
817
818
819
820
821
822
823 } else {
824
825
826
827
828
829
830 goto takeit;
831 }
832 }
833
834
835
836
837
838
839
840 raw_spin_lock_irqsave(&task->pi_lock, flags);
841 task->pi_blocked_on = NULL;
842
843
844
845
846
847 if (rt_mutex_has_waiters(lock))
848 rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock));
849 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
850
851takeit:
852
853 debug_rt_mutex_lock(lock);
854
855
856
857
858
859 rt_mutex_set_owner(lock, task);
860
861 rt_mutex_deadlock_account_lock(lock, task);
862
863 return 1;
864}
865
866
867
868
869
870
871
872
873static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
874 struct rt_mutex_waiter *waiter,
875 struct task_struct *task,
876 enum rtmutex_chainwalk chwalk)
877{
878 struct task_struct *owner = rt_mutex_owner(lock);
879 struct rt_mutex_waiter *top_waiter = waiter;
880 struct rt_mutex *next_lock;
881 int chain_walk = 0, res;
882 unsigned long flags;
883
884
885
886
887
888
889
890
891
892
893 if (owner == task)
894 return -EDEADLK;
895
896 raw_spin_lock_irqsave(&task->pi_lock, flags);
897 __rt_mutex_adjust_prio(task);
898 waiter->task = task;
899 waiter->lock = lock;
900 waiter->prio = task->prio;
901
902
903 if (rt_mutex_has_waiters(lock))
904 top_waiter = rt_mutex_top_waiter(lock);
905 rt_mutex_enqueue(lock, waiter);
906
907 task->pi_blocked_on = waiter;
908
909 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
910
911 if (!owner)
912 return 0;
913
914 raw_spin_lock_irqsave(&owner->pi_lock, flags);
915 if (waiter == rt_mutex_top_waiter(lock)) {
916 rt_mutex_dequeue_pi(owner, top_waiter);
917 rt_mutex_enqueue_pi(owner, waiter);
918
919 __rt_mutex_adjust_prio(owner);
920 if (owner->pi_blocked_on)
921 chain_walk = 1;
922 } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
923 chain_walk = 1;
924 }
925
926
927 next_lock = task_blocked_on_lock(owner);
928
929 raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
930
931
932
933
934
935 if (!chain_walk || !next_lock)
936 return 0;
937
938
939
940
941
942
943 get_task_struct(owner);
944
945 raw_spin_unlock(&lock->wait_lock);
946
947 res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
948 next_lock, waiter, task);
949
950 raw_spin_lock(&lock->wait_lock);
951
952 return res;
953}
954
955
956
957
958
959
960
961
962
963static void wakeup_next_waiter(struct rt_mutex *lock)
964{
965 struct rt_mutex_waiter *waiter;
966 unsigned long flags;
967
968 raw_spin_lock_irqsave(¤t->pi_lock, flags);
969
970 waiter = rt_mutex_top_waiter(lock);
971
972
973
974
975
976
977
978 rt_mutex_dequeue_pi(current, waiter);
979
980
981
982
983
984
985
986
987
988 lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
989
990 raw_spin_unlock_irqrestore(¤t->pi_lock, flags);
991
992
993
994
995
996
997 wake_up_process(waiter->task);
998}
999
1000
1001
1002
1003
1004
1005
1006static void remove_waiter(struct rt_mutex *lock,
1007 struct rt_mutex_waiter *waiter)
1008{
1009 bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
1010 struct task_struct *owner = rt_mutex_owner(lock);
1011 struct rt_mutex *next_lock;
1012 unsigned long flags;
1013
1014 raw_spin_lock_irqsave(¤t->pi_lock, flags);
1015 rt_mutex_dequeue(lock, waiter);
1016 current->pi_blocked_on = NULL;
1017 raw_spin_unlock_irqrestore(¤t->pi_lock, flags);
1018
1019
1020
1021
1022
1023 if (!owner || !is_top_waiter)
1024 return;
1025
1026 raw_spin_lock_irqsave(&owner->pi_lock, flags);
1027
1028 rt_mutex_dequeue_pi(owner, waiter);
1029
1030 if (rt_mutex_has_waiters(lock))
1031 rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock));
1032
1033 __rt_mutex_adjust_prio(owner);
1034
1035
1036 next_lock = task_blocked_on_lock(owner);
1037
1038 raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
1039
1040
1041
1042
1043
1044 if (!next_lock)
1045 return;
1046
1047
1048 get_task_struct(owner);
1049
1050 raw_spin_unlock(&lock->wait_lock);
1051
1052 rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock,
1053 next_lock, NULL, current);
1054
1055 raw_spin_lock(&lock->wait_lock);
1056}
1057
1058
1059
1060
1061
1062
1063void rt_mutex_adjust_pi(struct task_struct *task)
1064{
1065 struct rt_mutex_waiter *waiter;
1066 struct rt_mutex *next_lock;
1067 unsigned long flags;
1068
1069 raw_spin_lock_irqsave(&task->pi_lock, flags);
1070
1071 waiter = task->pi_blocked_on;
1072 if (!waiter || (waiter->prio == task->prio &&
1073 !dl_prio(task->prio))) {
1074 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
1075 return;
1076 }
1077 next_lock = waiter->lock;
1078 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
1079
1080
1081 get_task_struct(task);
1082
1083 rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
1084 next_lock, NULL, task);
1085}
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097static int __sched
1098__rt_mutex_slowlock(struct rt_mutex *lock, int state,
1099 struct hrtimer_sleeper *timeout,
1100 struct rt_mutex_waiter *waiter)
1101{
1102 int ret = 0;
1103
1104 for (;;) {
1105
1106 if (try_to_take_rt_mutex(lock, current, waiter))
1107 break;
1108
1109
1110
1111
1112
1113 if (unlikely(state == TASK_INTERRUPTIBLE)) {
1114
1115 if (signal_pending(current))
1116 ret = -EINTR;
1117 if (timeout && !timeout->task)
1118 ret = -ETIMEDOUT;
1119 if (ret)
1120 break;
1121 }
1122
1123 raw_spin_unlock(&lock->wait_lock);
1124
1125 debug_rt_mutex_print_deadlock(waiter);
1126
1127 schedule_rt_mutex(lock);
1128
1129 raw_spin_lock(&lock->wait_lock);
1130 set_current_state(state);
1131 }
1132
1133 return ret;
1134}
1135
1136static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
1137 struct rt_mutex_waiter *w)
1138{
1139
1140
1141
1142
1143 if (res != -EDEADLOCK || detect_deadlock)
1144 return;
1145
1146
1147
1148
1149 rt_mutex_print_deadlock(w);
1150 while (1) {
1151 set_current_state(TASK_INTERRUPTIBLE);
1152 schedule();
1153 }
1154}
1155
1156
1157
1158
1159static int __sched
1160rt_mutex_slowlock(struct rt_mutex *lock, int state,
1161 struct hrtimer_sleeper *timeout,
1162 enum rtmutex_chainwalk chwalk)
1163{
1164 struct rt_mutex_waiter waiter;
1165 int ret = 0;
1166
1167 debug_rt_mutex_init_waiter(&waiter);
1168 RB_CLEAR_NODE(&waiter.pi_tree_entry);
1169 RB_CLEAR_NODE(&waiter.tree_entry);
1170
1171 raw_spin_lock(&lock->wait_lock);
1172
1173
1174 if (try_to_take_rt_mutex(lock, current, NULL)) {
1175 raw_spin_unlock(&lock->wait_lock);
1176 return 0;
1177 }
1178
1179 set_current_state(state);
1180
1181
1182 if (unlikely(timeout)) {
1183 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
1184 if (!hrtimer_active(&timeout->timer))
1185 timeout->task = NULL;
1186 }
1187
1188 ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
1189
1190 if (likely(!ret))
1191 ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
1192
1193 set_current_state(TASK_RUNNING);
1194
1195 if (unlikely(ret)) {
1196 remove_waiter(lock, &waiter);
1197 rt_mutex_handle_deadlock(ret, chwalk, &waiter);
1198 }
1199
1200
1201
1202
1203
1204 fixup_rt_mutex_waiters(lock);
1205
1206 raw_spin_unlock(&lock->wait_lock);
1207
1208
1209 if (unlikely(timeout))
1210 hrtimer_cancel(&timeout->timer);
1211
1212 debug_rt_mutex_free_waiter(&waiter);
1213
1214 return ret;
1215}
1216
1217
1218
1219
1220static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
1221{
1222 int ret;
1223
1224
1225
1226
1227
1228
1229 if (rt_mutex_owner(lock))
1230 return 0;
1231
1232
1233
1234
1235
1236 raw_spin_lock(&lock->wait_lock);
1237
1238 ret = try_to_take_rt_mutex(lock, current, NULL);
1239
1240
1241
1242
1243
1244 fixup_rt_mutex_waiters(lock);
1245
1246 raw_spin_unlock(&lock->wait_lock);
1247
1248 return ret;
1249}
1250
1251
1252
1253
1254static void __sched
1255rt_mutex_slowunlock(struct rt_mutex *lock)
1256{
1257 raw_spin_lock(&lock->wait_lock);
1258
1259 debug_rt_mutex_unlock(lock);
1260
1261 rt_mutex_deadlock_account_unlock(current);
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294 while (!rt_mutex_has_waiters(lock)) {
1295
1296 if (unlock_rt_mutex_safe(lock) == true)
1297 return;
1298
1299 raw_spin_lock(&lock->wait_lock);
1300 }
1301
1302
1303
1304
1305
1306 wakeup_next_waiter(lock);
1307
1308 raw_spin_unlock(&lock->wait_lock);
1309
1310
1311 rt_mutex_adjust_prio(current);
1312}
1313
1314
1315
1316
1317
1318
1319
1320static inline int
1321rt_mutex_fastlock(struct rt_mutex *lock, int state,
1322 int (*slowfn)(struct rt_mutex *lock, int state,
1323 struct hrtimer_sleeper *timeout,
1324 enum rtmutex_chainwalk chwalk))
1325{
1326 if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
1327 rt_mutex_deadlock_account_lock(lock, current);
1328 return 0;
1329 } else
1330 return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
1331}
1332
1333static inline int
1334rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
1335 struct hrtimer_sleeper *timeout,
1336 enum rtmutex_chainwalk chwalk,
1337 int (*slowfn)(struct rt_mutex *lock, int state,
1338 struct hrtimer_sleeper *timeout,
1339 enum rtmutex_chainwalk chwalk))
1340{
1341 if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
1342 likely(rt_mutex_cmpxchg(lock, NULL, current))) {
1343 rt_mutex_deadlock_account_lock(lock, current);
1344 return 0;
1345 } else
1346 return slowfn(lock, state, timeout, chwalk);
1347}
1348
1349static inline int
1350rt_mutex_fasttrylock(struct rt_mutex *lock,
1351 int (*slowfn)(struct rt_mutex *lock))
1352{
1353 if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
1354 rt_mutex_deadlock_account_lock(lock, current);
1355 return 1;
1356 }
1357 return slowfn(lock);
1358}
1359
1360static inline void
1361rt_mutex_fastunlock(struct rt_mutex *lock,
1362 void (*slowfn)(struct rt_mutex *lock))
1363{
1364 if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
1365 rt_mutex_deadlock_account_unlock(current);
1366 else
1367 slowfn(lock);
1368}
1369
1370
1371
1372
1373
1374
1375void __sched rt_mutex_lock(struct rt_mutex *lock)
1376{
1377 might_sleep();
1378
1379 rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
1380}
1381EXPORT_SYMBOL_GPL(rt_mutex_lock);
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
1393{
1394 might_sleep();
1395
1396 return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
1397}
1398EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
1399
1400
1401
1402
1403int rt_mutex_timed_futex_lock(struct rt_mutex *lock,
1404 struct hrtimer_sleeper *timeout)
1405{
1406 might_sleep();
1407
1408 return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
1409 RT_MUTEX_FULL_CHAINWALK,
1410 rt_mutex_slowlock);
1411}
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426int
1427rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
1428{
1429 might_sleep();
1430
1431 return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
1432 RT_MUTEX_MIN_CHAINWALK,
1433 rt_mutex_slowlock);
1434}
1435EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
1436
1437
1438
1439
1440
1441
1442
1443
1444int __sched rt_mutex_trylock(struct rt_mutex *lock)
1445{
1446 return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
1447}
1448EXPORT_SYMBOL_GPL(rt_mutex_trylock);
1449
1450
1451
1452
1453
1454
1455void __sched rt_mutex_unlock(struct rt_mutex *lock)
1456{
1457 rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
1458}
1459EXPORT_SYMBOL_GPL(rt_mutex_unlock);
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469void rt_mutex_destroy(struct rt_mutex *lock)
1470{
1471 WARN_ON(rt_mutex_is_locked(lock));
1472#ifdef CONFIG_DEBUG_RT_MUTEXES
1473 lock->magic = NULL;
1474#endif
1475}
1476
1477EXPORT_SYMBOL_GPL(rt_mutex_destroy);
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488void __rt_mutex_init(struct rt_mutex *lock, const char *name)
1489{
1490 lock->owner = NULL;
1491 raw_spin_lock_init(&lock->wait_lock);
1492 lock->waiters = RB_ROOT;
1493 lock->waiters_leftmost = NULL;
1494
1495 debug_rt_mutex_init(lock, name);
1496}
1497EXPORT_SYMBOL_GPL(__rt_mutex_init);
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
1510 struct task_struct *proxy_owner)
1511{
1512 __rt_mutex_init(lock, NULL);
1513 debug_rt_mutex_proxy_lock(lock, proxy_owner);
1514 rt_mutex_set_owner(lock, proxy_owner);
1515 rt_mutex_deadlock_account_lock(lock, proxy_owner);
1516}
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526void rt_mutex_proxy_unlock(struct rt_mutex *lock,
1527 struct task_struct *proxy_owner)
1528{
1529 debug_rt_mutex_proxy_unlock(lock);
1530 rt_mutex_set_owner(lock, NULL);
1531 rt_mutex_deadlock_account_unlock(proxy_owner);
1532}
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1548 struct rt_mutex_waiter *waiter,
1549 struct task_struct *task)
1550{
1551 int ret;
1552
1553 raw_spin_lock(&lock->wait_lock);
1554
1555 if (try_to_take_rt_mutex(lock, task, NULL)) {
1556 raw_spin_unlock(&lock->wait_lock);
1557 return 1;
1558 }
1559
1560
1561 ret = task_blocks_on_rt_mutex(lock, waiter, task,
1562 RT_MUTEX_FULL_CHAINWALK);
1563
1564 if (ret && !rt_mutex_owner(lock)) {
1565
1566
1567
1568
1569
1570
1571 ret = 0;
1572 }
1573
1574 if (unlikely(ret))
1575 remove_waiter(lock, waiter);
1576
1577 raw_spin_unlock(&lock->wait_lock);
1578
1579 debug_rt_mutex_print_deadlock(waiter);
1580
1581 return ret;
1582}
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
1597{
1598 if (!rt_mutex_has_waiters(lock))
1599 return NULL;
1600
1601 return rt_mutex_top_waiter(lock)->task;
1602}
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
1620 struct hrtimer_sleeper *to,
1621 struct rt_mutex_waiter *waiter)
1622{
1623 int ret;
1624
1625 raw_spin_lock(&lock->wait_lock);
1626
1627 set_current_state(TASK_INTERRUPTIBLE);
1628
1629 ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
1630
1631 set_current_state(TASK_RUNNING);
1632
1633 if (unlikely(ret))
1634 remove_waiter(lock, waiter);
1635
1636
1637
1638
1639
1640 fixup_rt_mutex_waiters(lock);
1641
1642 raw_spin_unlock(&lock->wait_lock);
1643
1644 return ret;
1645}
1646