1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/spinlock.h>
14#include <linux/export.h>
15#include <linux/sched.h>
16#include <linux/sched/rt.h>
17#include <linux/sched/deadline.h>
18#include <linux/timer.h>
19
20#include "rtmutex_common.h"
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49static void
50rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner)
51{
52 unsigned long val = (unsigned long)owner;
53
54 if (rt_mutex_has_waiters(lock))
55 val |= RT_MUTEX_HAS_WAITERS;
56
57 lock->owner = (struct task_struct *)val;
58}
59
60static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
61{
62 lock->owner = (struct task_struct *)
63 ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
64}
65
66static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
67{
68 if (!rt_mutex_has_waiters(lock))
69 clear_rt_mutex_waiters(lock);
70}
71
72
73
74
75
76#ifndef CONFIG_DEBUG_RT_MUTEXES
77# define rt_mutex_cmpxchg_relaxed(l,c,n) (cmpxchg_relaxed(&l->owner, c, n) == c)
78# define rt_mutex_cmpxchg_acquire(l,c,n) (cmpxchg_acquire(&l->owner, c, n) == c)
79# define rt_mutex_cmpxchg_release(l,c,n) (cmpxchg_release(&l->owner, c, n) == c)
80
81
82
83
84
85
86static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
87{
88 unsigned long owner, *p = (unsigned long *) &lock->owner;
89
90 do {
91 owner = *p;
92 } while (cmpxchg_relaxed(p, owner,
93 owner | RT_MUTEX_HAS_WAITERS) != owner);
94}
95
96
97
98
99
100
101
102static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
103 unsigned long flags)
104 __releases(lock->wait_lock)
105{
106 struct task_struct *owner = rt_mutex_owner(lock);
107
108 clear_rt_mutex_waiters(lock);
109 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134 return rt_mutex_cmpxchg_release(lock, owner, NULL);
135}
136
137#else
138# define rt_mutex_cmpxchg_relaxed(l,c,n) (0)
139# define rt_mutex_cmpxchg_acquire(l,c,n) (0)
140# define rt_mutex_cmpxchg_release(l,c,n) (0)
141
142static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
143{
144 lock->owner = (struct task_struct *)
145 ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
146}
147
148
149
150
151static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
152 unsigned long flags)
153 __releases(lock->wait_lock)
154{
155 lock->owner = NULL;
156 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
157 return true;
158}
159#endif
160
161static inline int
162rt_mutex_waiter_less(struct rt_mutex_waiter *left,
163 struct rt_mutex_waiter *right)
164{
165 if (left->prio < right->prio)
166 return 1;
167
168
169
170
171
172
173
174 if (dl_prio(left->prio))
175 return dl_time_before(left->task->dl.deadline,
176 right->task->dl.deadline);
177
178 return 0;
179}
180
181static void
182rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
183{
184 struct rb_node **link = &lock->waiters.rb_node;
185 struct rb_node *parent = NULL;
186 struct rt_mutex_waiter *entry;
187 int leftmost = 1;
188
189 while (*link) {
190 parent = *link;
191 entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry);
192 if (rt_mutex_waiter_less(waiter, entry)) {
193 link = &parent->rb_left;
194 } else {
195 link = &parent->rb_right;
196 leftmost = 0;
197 }
198 }
199
200 if (leftmost)
201 lock->waiters_leftmost = &waiter->tree_entry;
202
203 rb_link_node(&waiter->tree_entry, parent, link);
204 rb_insert_color(&waiter->tree_entry, &lock->waiters);
205}
206
207static void
208rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
209{
210 if (RB_EMPTY_NODE(&waiter->tree_entry))
211 return;
212
213 if (lock->waiters_leftmost == &waiter->tree_entry)
214 lock->waiters_leftmost = rb_next(&waiter->tree_entry);
215
216 rb_erase(&waiter->tree_entry, &lock->waiters);
217 RB_CLEAR_NODE(&waiter->tree_entry);
218}
219
220static void
221rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
222{
223 struct rb_node **link = &task->pi_waiters.rb_node;
224 struct rb_node *parent = NULL;
225 struct rt_mutex_waiter *entry;
226 int leftmost = 1;
227
228 while (*link) {
229 parent = *link;
230 entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry);
231 if (rt_mutex_waiter_less(waiter, entry)) {
232 link = &parent->rb_left;
233 } else {
234 link = &parent->rb_right;
235 leftmost = 0;
236 }
237 }
238
239 if (leftmost)
240 task->pi_waiters_leftmost = &waiter->pi_tree_entry;
241
242 rb_link_node(&waiter->pi_tree_entry, parent, link);
243 rb_insert_color(&waiter->pi_tree_entry, &task->pi_waiters);
244}
245
246static void
247rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
248{
249 if (RB_EMPTY_NODE(&waiter->pi_tree_entry))
250 return;
251
252 if (task->pi_waiters_leftmost == &waiter->pi_tree_entry)
253 task->pi_waiters_leftmost = rb_next(&waiter->pi_tree_entry);
254
255 rb_erase(&waiter->pi_tree_entry, &task->pi_waiters);
256 RB_CLEAR_NODE(&waiter->pi_tree_entry);
257}
258
259
260
261
262
263
264
265int rt_mutex_getprio(struct task_struct *task)
266{
267 if (likely(!task_has_pi_waiters(task)))
268 return task->normal_prio;
269
270 return min(task_top_pi_waiter(task)->prio,
271 task->normal_prio);
272}
273
274struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
275{
276 if (likely(!task_has_pi_waiters(task)))
277 return NULL;
278
279 return task_top_pi_waiter(task)->task;
280}
281
282
283
284
285
286int rt_mutex_get_effective_prio(struct task_struct *task, int newprio)
287{
288 if (!task_has_pi_waiters(task))
289 return newprio;
290
291 if (task_top_pi_waiter(task)->task->prio <= newprio)
292 return task_top_pi_waiter(task)->task->prio;
293 return newprio;
294}
295
296
297
298
299
300
301static void __rt_mutex_adjust_prio(struct task_struct *task)
302{
303 int prio = rt_mutex_getprio(task);
304
305 if (task->prio != prio || dl_prio(prio))
306 rt_mutex_setprio(task, prio);
307}
308
309
310
311
312
313
314
315
316
317
318void rt_mutex_adjust_prio(struct task_struct *task)
319{
320 unsigned long flags;
321
322 raw_spin_lock_irqsave(&task->pi_lock, flags);
323 __rt_mutex_adjust_prio(task);
324 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
325}
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
341 enum rtmutex_chainwalk chwalk)
342{
343
344
345
346
347
348
349
350 return debug_rt_mutex_detect_deadlock(waiter, chwalk);
351}
352
353
354
355
356int max_lock_depth = 1024;
357
358static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
359{
360 return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
361}
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426static int rt_mutex_adjust_prio_chain(struct task_struct *task,
427 enum rtmutex_chainwalk chwalk,
428 struct rt_mutex *orig_lock,
429 struct rt_mutex *next_lock,
430 struct rt_mutex_waiter *orig_waiter,
431 struct task_struct *top_task)
432{
433 struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
434 struct rt_mutex_waiter *prerequeue_top_waiter;
435 int ret = 0, depth = 0;
436 struct rt_mutex *lock;
437 bool detect_deadlock;
438 bool requeue = true;
439
440 detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk);
441
442
443
444
445
446
447
448 again:
449
450
451
452 if (++depth > max_lock_depth) {
453 static int prev_max;
454
455
456
457
458
459 if (prev_max != max_lock_depth) {
460 prev_max = max_lock_depth;
461 printk(KERN_WARNING "Maximum lock depth %d reached "
462 "task: %s (%d)\n", max_lock_depth,
463 top_task->comm, task_pid_nr(top_task));
464 }
465 put_task_struct(task);
466
467 return -EDEADLK;
468 }
469
470
471
472
473
474
475
476 retry:
477
478
479
480 raw_spin_lock_irq(&task->pi_lock);
481
482
483
484
485 waiter = task->pi_blocked_on;
486
487
488
489
490
491
492
493
494
495
496 if (!waiter)
497 goto out_unlock_pi;
498
499
500
501
502
503 if (orig_waiter && !rt_mutex_owner(orig_lock))
504 goto out_unlock_pi;
505
506
507
508
509
510
511
512
513
514
515 if (next_lock != waiter->lock)
516 goto out_unlock_pi;
517
518
519
520
521
522
523 if (top_waiter) {
524 if (!task_has_pi_waiters(task))
525 goto out_unlock_pi;
526
527
528
529
530
531
532 if (top_waiter != task_top_pi_waiter(task)) {
533 if (!detect_deadlock)
534 goto out_unlock_pi;
535 else
536 requeue = false;
537 }
538 }
539
540
541
542
543
544
545
546
547 if (waiter->prio == task->prio) {
548 if (!detect_deadlock)
549 goto out_unlock_pi;
550 else
551 requeue = false;
552 }
553
554
555
556
557 lock = waiter->lock;
558
559
560
561
562
563 if (!raw_spin_trylock(&lock->wait_lock)) {
564 raw_spin_unlock_irq(&task->pi_lock);
565 cpu_relax();
566 goto retry;
567 }
568
569
570
571
572
573
574
575
576
577
578 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
579 debug_rt_mutex_deadlock(chwalk, orig_waiter, lock);
580 raw_spin_unlock(&lock->wait_lock);
581 ret = -EDEADLK;
582 goto out_unlock_pi;
583 }
584
585
586
587
588
589
590
591 if (!requeue) {
592
593
594
595 raw_spin_unlock(&task->pi_lock);
596 put_task_struct(task);
597
598
599
600
601
602 if (!rt_mutex_owner(lock)) {
603 raw_spin_unlock_irq(&lock->wait_lock);
604 return 0;
605 }
606
607
608 task = rt_mutex_owner(lock);
609 get_task_struct(task);
610 raw_spin_lock(&task->pi_lock);
611
612
613
614
615
616
617
618 next_lock = task_blocked_on_lock(task);
619
620
621
622 top_waiter = rt_mutex_top_waiter(lock);
623
624
625 raw_spin_unlock(&task->pi_lock);
626 raw_spin_unlock_irq(&lock->wait_lock);
627
628
629 if (!next_lock)
630 goto out_put_task;
631 goto again;
632 }
633
634
635
636
637
638
639 prerequeue_top_waiter = rt_mutex_top_waiter(lock);
640
641
642 rt_mutex_dequeue(lock, waiter);
643 waiter->prio = task->prio;
644 rt_mutex_enqueue(lock, waiter);
645
646
647 raw_spin_unlock(&task->pi_lock);
648 put_task_struct(task);
649
650
651
652
653
654
655
656
657 if (!rt_mutex_owner(lock)) {
658
659
660
661
662
663 if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))
664 wake_up_process(rt_mutex_top_waiter(lock)->task);
665 raw_spin_unlock_irq(&lock->wait_lock);
666 return 0;
667 }
668
669
670 task = rt_mutex_owner(lock);
671 get_task_struct(task);
672 raw_spin_lock(&task->pi_lock);
673
674
675 if (waiter == rt_mutex_top_waiter(lock)) {
676
677
678
679
680
681
682 rt_mutex_dequeue_pi(task, prerequeue_top_waiter);
683 rt_mutex_enqueue_pi(task, waiter);
684 __rt_mutex_adjust_prio(task);
685
686 } else if (prerequeue_top_waiter == waiter) {
687
688
689
690
691
692
693
694
695
696
697 rt_mutex_dequeue_pi(task, waiter);
698 waiter = rt_mutex_top_waiter(lock);
699 rt_mutex_enqueue_pi(task, waiter);
700 __rt_mutex_adjust_prio(task);
701 } else {
702
703
704
705
706 }
707
708
709
710
711
712
713
714
715
716
717
718 next_lock = task_blocked_on_lock(task);
719
720
721
722
723 top_waiter = rt_mutex_top_waiter(lock);
724
725
726 raw_spin_unlock(&task->pi_lock);
727 raw_spin_unlock_irq(&lock->wait_lock);
728
729
730
731
732
733
734
735
736 if (!next_lock)
737 goto out_put_task;
738
739
740
741
742
743
744 if (!detect_deadlock && waiter != top_waiter)
745 goto out_put_task;
746
747 goto again;
748
749 out_unlock_pi:
750 raw_spin_unlock_irq(&task->pi_lock);
751 out_put_task:
752 put_task_struct(task);
753
754 return ret;
755}
756
757
758
759
760
761
762
763
764
765
766
767static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
768 struct rt_mutex_waiter *waiter)
769{
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787 mark_rt_mutex_waiters(lock);
788
789
790
791
792 if (rt_mutex_owner(lock))
793 return 0;
794
795
796
797
798
799
800 if (waiter) {
801
802
803
804
805 if (waiter != rt_mutex_top_waiter(lock))
806 return 0;
807
808
809
810
811
812 rt_mutex_dequeue(lock, waiter);
813
814 } else {
815
816
817
818
819
820
821
822
823 if (rt_mutex_has_waiters(lock)) {
824
825
826
827
828
829 if (task->prio >= rt_mutex_top_waiter(lock)->prio)
830 return 0;
831
832
833
834
835
836
837 } else {
838
839
840
841
842
843
844 goto takeit;
845 }
846 }
847
848
849
850
851
852
853
854 raw_spin_lock(&task->pi_lock);
855 task->pi_blocked_on = NULL;
856
857
858
859
860
861 if (rt_mutex_has_waiters(lock))
862 rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock));
863 raw_spin_unlock(&task->pi_lock);
864
865takeit:
866
867 debug_rt_mutex_lock(lock);
868
869
870
871
872
873 rt_mutex_set_owner(lock, task);
874
875 rt_mutex_deadlock_account_lock(lock, task);
876
877 return 1;
878}
879
880
881
882
883
884
885
886
887static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
888 struct rt_mutex_waiter *waiter,
889 struct task_struct *task,
890 enum rtmutex_chainwalk chwalk)
891{
892 struct task_struct *owner = rt_mutex_owner(lock);
893 struct rt_mutex_waiter *top_waiter = waiter;
894 struct rt_mutex *next_lock;
895 int chain_walk = 0, res;
896
897
898
899
900
901
902
903
904
905
906 if (owner == task)
907 return -EDEADLK;
908
909 raw_spin_lock(&task->pi_lock);
910 __rt_mutex_adjust_prio(task);
911 waiter->task = task;
912 waiter->lock = lock;
913 waiter->prio = task->prio;
914
915
916 if (rt_mutex_has_waiters(lock))
917 top_waiter = rt_mutex_top_waiter(lock);
918 rt_mutex_enqueue(lock, waiter);
919
920 task->pi_blocked_on = waiter;
921
922 raw_spin_unlock(&task->pi_lock);
923
924 if (!owner)
925 return 0;
926
927 raw_spin_lock(&owner->pi_lock);
928 if (waiter == rt_mutex_top_waiter(lock)) {
929 rt_mutex_dequeue_pi(owner, top_waiter);
930 rt_mutex_enqueue_pi(owner, waiter);
931
932 __rt_mutex_adjust_prio(owner);
933 if (owner->pi_blocked_on)
934 chain_walk = 1;
935 } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
936 chain_walk = 1;
937 }
938
939
940 next_lock = task_blocked_on_lock(owner);
941
942 raw_spin_unlock(&owner->pi_lock);
943
944
945
946
947
948 if (!chain_walk || !next_lock)
949 return 0;
950
951
952
953
954
955
956 get_task_struct(owner);
957
958 raw_spin_unlock_irq(&lock->wait_lock);
959
960 res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
961 next_lock, waiter, task);
962
963 raw_spin_lock_irq(&lock->wait_lock);
964
965 return res;
966}
967
968
969
970
971
972
973
974static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
975 struct rt_mutex *lock)
976{
977 struct rt_mutex_waiter *waiter;
978
979 raw_spin_lock(¤t->pi_lock);
980
981 waiter = rt_mutex_top_waiter(lock);
982
983
984
985
986
987
988
989 rt_mutex_dequeue_pi(current, waiter);
990
991
992
993
994
995
996
997
998
999 lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
1000
1001 raw_spin_unlock(¤t->pi_lock);
1002
1003 wake_q_add(wake_q, waiter->task);
1004}
1005
1006
1007
1008
1009
1010
1011
1012static void remove_waiter(struct rt_mutex *lock,
1013 struct rt_mutex_waiter *waiter)
1014{
1015 bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
1016 struct task_struct *owner = rt_mutex_owner(lock);
1017 struct rt_mutex *next_lock;
1018
1019 raw_spin_lock(¤t->pi_lock);
1020 rt_mutex_dequeue(lock, waiter);
1021 current->pi_blocked_on = NULL;
1022 raw_spin_unlock(¤t->pi_lock);
1023
1024
1025
1026
1027
1028 if (!owner || !is_top_waiter)
1029 return;
1030
1031 raw_spin_lock(&owner->pi_lock);
1032
1033 rt_mutex_dequeue_pi(owner, waiter);
1034
1035 if (rt_mutex_has_waiters(lock))
1036 rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock));
1037
1038 __rt_mutex_adjust_prio(owner);
1039
1040
1041 next_lock = task_blocked_on_lock(owner);
1042
1043 raw_spin_unlock(&owner->pi_lock);
1044
1045
1046
1047
1048
1049 if (!next_lock)
1050 return;
1051
1052
1053 get_task_struct(owner);
1054
1055 raw_spin_unlock_irq(&lock->wait_lock);
1056
1057 rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock,
1058 next_lock, NULL, current);
1059
1060 raw_spin_lock_irq(&lock->wait_lock);
1061}
1062
1063
1064
1065
1066
1067
1068void rt_mutex_adjust_pi(struct task_struct *task)
1069{
1070 struct rt_mutex_waiter *waiter;
1071 struct rt_mutex *next_lock;
1072 unsigned long flags;
1073
1074 raw_spin_lock_irqsave(&task->pi_lock, flags);
1075
1076 waiter = task->pi_blocked_on;
1077 if (!waiter || (waiter->prio == task->prio &&
1078 !dl_prio(task->prio))) {
1079 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
1080 return;
1081 }
1082 next_lock = waiter->lock;
1083 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
1084
1085
1086 get_task_struct(task);
1087
1088 rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
1089 next_lock, NULL, task);
1090}
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102static int __sched
1103__rt_mutex_slowlock(struct rt_mutex *lock, int state,
1104 struct hrtimer_sleeper *timeout,
1105 struct rt_mutex_waiter *waiter)
1106{
1107 int ret = 0;
1108
1109 for (;;) {
1110
1111 if (try_to_take_rt_mutex(lock, current, waiter))
1112 break;
1113
1114
1115
1116
1117
1118 if (unlikely(state == TASK_INTERRUPTIBLE)) {
1119
1120 if (signal_pending(current))
1121 ret = -EINTR;
1122 if (timeout && !timeout->task)
1123 ret = -ETIMEDOUT;
1124 if (ret)
1125 break;
1126 }
1127
1128 raw_spin_unlock_irq(&lock->wait_lock);
1129
1130 debug_rt_mutex_print_deadlock(waiter);
1131
1132 schedule();
1133
1134 raw_spin_lock_irq(&lock->wait_lock);
1135 set_current_state(state);
1136 }
1137
1138 __set_current_state(TASK_RUNNING);
1139 return ret;
1140}
1141
1142static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
1143 struct rt_mutex_waiter *w)
1144{
1145
1146
1147
1148
1149 if (res != -EDEADLOCK || detect_deadlock)
1150 return;
1151
1152
1153
1154
1155 rt_mutex_print_deadlock(w);
1156 while (1) {
1157 set_current_state(TASK_INTERRUPTIBLE);
1158 schedule();
1159 }
1160}
1161
1162
1163
1164
1165static int __sched
1166rt_mutex_slowlock(struct rt_mutex *lock, int state,
1167 struct hrtimer_sleeper *timeout,
1168 enum rtmutex_chainwalk chwalk)
1169{
1170 struct rt_mutex_waiter waiter;
1171 unsigned long flags;
1172 int ret = 0;
1173
1174 debug_rt_mutex_init_waiter(&waiter);
1175 RB_CLEAR_NODE(&waiter.pi_tree_entry);
1176 RB_CLEAR_NODE(&waiter.tree_entry);
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1187
1188
1189 if (try_to_take_rt_mutex(lock, current, NULL)) {
1190 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1191 return 0;
1192 }
1193
1194 set_current_state(state);
1195
1196
1197 if (unlikely(timeout))
1198 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
1199
1200 ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
1201
1202 if (likely(!ret))
1203
1204 ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
1205
1206 if (unlikely(ret)) {
1207 __set_current_state(TASK_RUNNING);
1208 if (rt_mutex_has_waiters(lock))
1209 remove_waiter(lock, &waiter);
1210 rt_mutex_handle_deadlock(ret, chwalk, &waiter);
1211 }
1212
1213
1214
1215
1216
1217 fixup_rt_mutex_waiters(lock);
1218
1219 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1220
1221
1222 if (unlikely(timeout))
1223 hrtimer_cancel(&timeout->timer);
1224
1225 debug_rt_mutex_free_waiter(&waiter);
1226
1227 return ret;
1228}
1229
1230
1231
1232
1233static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
1234{
1235 unsigned long flags;
1236 int ret;
1237
1238
1239
1240
1241
1242
1243 if (rt_mutex_owner(lock))
1244 return 0;
1245
1246
1247
1248
1249
1250 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1251
1252 ret = try_to_take_rt_mutex(lock, current, NULL);
1253
1254
1255
1256
1257
1258 fixup_rt_mutex_waiters(lock);
1259
1260 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1261
1262 return ret;
1263}
1264
1265
1266
1267
1268
1269static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
1270 struct wake_q_head *wake_q)
1271{
1272 unsigned long flags;
1273
1274
1275 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1276
1277 debug_rt_mutex_unlock(lock);
1278
1279 rt_mutex_deadlock_account_unlock(current);
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312 while (!rt_mutex_has_waiters(lock)) {
1313
1314 if (unlock_rt_mutex_safe(lock, flags) == true)
1315 return false;
1316
1317 raw_spin_lock_irqsave(&lock->wait_lock, flags);
1318 }
1319
1320
1321
1322
1323
1324
1325
1326 mark_wakeup_next_waiter(wake_q, lock);
1327
1328 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1329
1330
1331 return true;
1332}
1333
1334
1335
1336
1337
1338
1339
1340static inline int
1341rt_mutex_fastlock(struct rt_mutex *lock, int state,
1342 int (*slowfn)(struct rt_mutex *lock, int state,
1343 struct hrtimer_sleeper *timeout,
1344 enum rtmutex_chainwalk chwalk))
1345{
1346 if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
1347 rt_mutex_deadlock_account_lock(lock, current);
1348 return 0;
1349 } else
1350 return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
1351}
1352
1353static inline int
1354rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
1355 struct hrtimer_sleeper *timeout,
1356 enum rtmutex_chainwalk chwalk,
1357 int (*slowfn)(struct rt_mutex *lock, int state,
1358 struct hrtimer_sleeper *timeout,
1359 enum rtmutex_chainwalk chwalk))
1360{
1361 if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
1362 likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
1363 rt_mutex_deadlock_account_lock(lock, current);
1364 return 0;
1365 } else
1366 return slowfn(lock, state, timeout, chwalk);
1367}
1368
1369static inline int
1370rt_mutex_fasttrylock(struct rt_mutex *lock,
1371 int (*slowfn)(struct rt_mutex *lock))
1372{
1373 if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) {
1374 rt_mutex_deadlock_account_lock(lock, current);
1375 return 1;
1376 }
1377 return slowfn(lock);
1378}
1379
1380static inline void
1381rt_mutex_fastunlock(struct rt_mutex *lock,
1382 bool (*slowfn)(struct rt_mutex *lock,
1383 struct wake_q_head *wqh))
1384{
1385 WAKE_Q(wake_q);
1386
1387 if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
1388 rt_mutex_deadlock_account_unlock(current);
1389
1390 } else {
1391 bool deboost = slowfn(lock, &wake_q);
1392
1393 wake_up_q(&wake_q);
1394
1395
1396 if (deboost)
1397 rt_mutex_adjust_prio(current);
1398 }
1399}
1400
1401
1402
1403
1404
1405
1406void __sched rt_mutex_lock(struct rt_mutex *lock)
1407{
1408 might_sleep();
1409
1410 rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
1411}
1412EXPORT_SYMBOL_GPL(rt_mutex_lock);
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
1424{
1425 might_sleep();
1426
1427 return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
1428}
1429EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
1430
1431
1432
1433
1434int rt_mutex_timed_futex_lock(struct rt_mutex *lock,
1435 struct hrtimer_sleeper *timeout)
1436{
1437 might_sleep();
1438
1439 return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
1440 RT_MUTEX_FULL_CHAINWALK,
1441 rt_mutex_slowlock);
1442}
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457int
1458rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
1459{
1460 might_sleep();
1461
1462 return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
1463 RT_MUTEX_MIN_CHAINWALK,
1464 rt_mutex_slowlock);
1465}
1466EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479int __sched rt_mutex_trylock(struct rt_mutex *lock)
1480{
1481 if (WARN_ON(in_irq() || in_nmi() || in_serving_softirq()))
1482 return 0;
1483
1484 return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
1485}
1486EXPORT_SYMBOL_GPL(rt_mutex_trylock);
1487
1488
1489
1490
1491
1492
1493void __sched rt_mutex_unlock(struct rt_mutex *lock)
1494{
1495 rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
1496}
1497EXPORT_SYMBOL_GPL(rt_mutex_unlock);
1498
1499
1500
1501
1502
1503
1504
1505
1506bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock,
1507 struct wake_q_head *wqh)
1508{
1509 if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
1510 rt_mutex_deadlock_account_unlock(current);
1511 return false;
1512 }
1513 return rt_mutex_slowunlock(lock, wqh);
1514}
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524void rt_mutex_destroy(struct rt_mutex *lock)
1525{
1526 WARN_ON(rt_mutex_is_locked(lock));
1527#ifdef CONFIG_DEBUG_RT_MUTEXES
1528 lock->magic = NULL;
1529#endif
1530}
1531
1532EXPORT_SYMBOL_GPL(rt_mutex_destroy);
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543void __rt_mutex_init(struct rt_mutex *lock, const char *name)
1544{
1545 lock->owner = NULL;
1546 raw_spin_lock_init(&lock->wait_lock);
1547 lock->waiters = RB_ROOT;
1548 lock->waiters_leftmost = NULL;
1549
1550 debug_rt_mutex_init(lock, name);
1551}
1552EXPORT_SYMBOL_GPL(__rt_mutex_init);
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
1565 struct task_struct *proxy_owner)
1566{
1567 __rt_mutex_init(lock, NULL);
1568 debug_rt_mutex_proxy_lock(lock, proxy_owner);
1569 rt_mutex_set_owner(lock, proxy_owner);
1570 rt_mutex_deadlock_account_lock(lock, proxy_owner);
1571}
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581void rt_mutex_proxy_unlock(struct rt_mutex *lock,
1582 struct task_struct *proxy_owner)
1583{
1584 debug_rt_mutex_proxy_unlock(lock);
1585 rt_mutex_set_owner(lock, NULL);
1586 rt_mutex_deadlock_account_unlock(proxy_owner);
1587}
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1603 struct rt_mutex_waiter *waiter,
1604 struct task_struct *task)
1605{
1606 int ret;
1607
1608 raw_spin_lock_irq(&lock->wait_lock);
1609
1610 if (try_to_take_rt_mutex(lock, task, NULL)) {
1611 raw_spin_unlock_irq(&lock->wait_lock);
1612 return 1;
1613 }
1614
1615
1616 ret = task_blocks_on_rt_mutex(lock, waiter, task,
1617 RT_MUTEX_FULL_CHAINWALK);
1618
1619 if (ret && !rt_mutex_owner(lock)) {
1620
1621
1622
1623
1624
1625
1626 ret = 0;
1627 }
1628
1629 if (unlikely(ret))
1630 remove_waiter(lock, waiter);
1631
1632 raw_spin_unlock_irq(&lock->wait_lock);
1633
1634 debug_rt_mutex_print_deadlock(waiter);
1635
1636 return ret;
1637}
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
1652{
1653 if (!rt_mutex_has_waiters(lock))
1654 return NULL;
1655
1656 return rt_mutex_top_waiter(lock)->task;
1657}
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
1675 struct hrtimer_sleeper *to,
1676 struct rt_mutex_waiter *waiter)
1677{
1678 int ret;
1679
1680 raw_spin_lock_irq(&lock->wait_lock);
1681
1682 set_current_state(TASK_INTERRUPTIBLE);
1683
1684
1685 ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
1686
1687 if (unlikely(ret))
1688 remove_waiter(lock, waiter);
1689
1690
1691
1692
1693
1694 fixup_rt_mutex_waiters(lock);
1695
1696 raw_spin_unlock_irq(&lock->wait_lock);
1697
1698 return ret;
1699}
1700