1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/spinlock.h>
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <linux/timer.h>
17
18#include "rtmutex_common.h"
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47static void
48rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner)
49{
50 unsigned long val = (unsigned long)owner;
51
52 if (rt_mutex_has_waiters(lock))
53 val |= RT_MUTEX_HAS_WAITERS;
54
55 lock->owner = (struct task_struct *)val;
56}
57
58static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
59{
60 lock->owner = (struct task_struct *)
61 ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
62}
63
64static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
65{
66 if (!rt_mutex_has_waiters(lock))
67 clear_rt_mutex_waiters(lock);
68}
69
70
71
72
73
74#if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES)
75# define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c)
76static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
77{
78 unsigned long owner, *p = (unsigned long *) &lock->owner;
79
80 do {
81 owner = *p;
82 } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
83}
84#else
85# define rt_mutex_cmpxchg(l,c,n) (0)
86static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
87{
88 lock->owner = (struct task_struct *)
89 ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
90}
91#endif
92
93
94
95
96
97
98
99int rt_mutex_getprio(struct task_struct *task)
100{
101 if (likely(!task_has_pi_waiters(task)))
102 return task->normal_prio;
103
104 return min(task_top_pi_waiter(task)->pi_list_entry.prio,
105 task->normal_prio);
106}
107
108
109
110
111
112
113static void __rt_mutex_adjust_prio(struct task_struct *task)
114{
115 int prio = rt_mutex_getprio(task);
116
117 if (task->prio != prio)
118 rt_mutex_setprio(task, prio);
119}
120
121
122
123
124
125
126
127
128
129
130static void rt_mutex_adjust_prio(struct task_struct *task)
131{
132 unsigned long flags;
133
134 raw_spin_lock_irqsave(&task->pi_lock, flags);
135 __rt_mutex_adjust_prio(task);
136 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
137}
138
139
140
141
142int max_lock_depth = 1024;
143
144
145
146
147
148
149static int rt_mutex_adjust_prio_chain(struct task_struct *task,
150 int deadlock_detect,
151 struct rt_mutex *orig_lock,
152 struct rt_mutex_waiter *orig_waiter,
153 struct task_struct *top_task)
154{
155 struct rt_mutex *lock;
156 struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
157 int detect_deadlock, ret = 0, depth = 0;
158 unsigned long flags;
159
160 detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter,
161 deadlock_detect);
162
163
164
165
166
167
168
169 again:
170 if (++depth > max_lock_depth) {
171 static int prev_max;
172
173
174
175
176
177 if (prev_max != max_lock_depth) {
178 prev_max = max_lock_depth;
179 printk(KERN_WARNING "Maximum lock depth %d reached "
180 "task: %s (%d)\n", max_lock_depth,
181 top_task->comm, task_pid_nr(top_task));
182 }
183 put_task_struct(task);
184
185 return deadlock_detect ? -EDEADLK : 0;
186 }
187 retry:
188
189
190
191 raw_spin_lock_irqsave(&task->pi_lock, flags);
192
193 waiter = task->pi_blocked_on;
194
195
196
197
198
199 if (!waiter)
200 goto out_unlock_pi;
201
202
203
204
205
206 if (orig_waiter && !rt_mutex_owner(orig_lock))
207 goto out_unlock_pi;
208
209
210
211
212
213
214 if (top_waiter && (!task_has_pi_waiters(task) ||
215 top_waiter != task_top_pi_waiter(task)))
216 goto out_unlock_pi;
217
218
219
220
221
222 if (!detect_deadlock && waiter->list_entry.prio == task->prio)
223 goto out_unlock_pi;
224
225 lock = waiter->lock;
226 if (!raw_spin_trylock(&lock->wait_lock)) {
227 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
228 cpu_relax();
229 goto retry;
230 }
231
232
233 if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
234 debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
235 raw_spin_unlock(&lock->wait_lock);
236 ret = deadlock_detect ? -EDEADLK : 0;
237 goto out_unlock_pi;
238 }
239
240 top_waiter = rt_mutex_top_waiter(lock);
241
242
243 plist_del(&waiter->list_entry, &lock->wait_list);
244 waiter->list_entry.prio = task->prio;
245 plist_add(&waiter->list_entry, &lock->wait_list);
246
247
248 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
249 if (!rt_mutex_owner(lock)) {
250
251
252
253
254
255 if (top_waiter != rt_mutex_top_waiter(lock))
256 wake_up_process(rt_mutex_top_waiter(lock)->task);
257 raw_spin_unlock(&lock->wait_lock);
258 goto out_put_task;
259 }
260 put_task_struct(task);
261
262
263 task = rt_mutex_owner(lock);
264 get_task_struct(task);
265 raw_spin_lock_irqsave(&task->pi_lock, flags);
266
267 if (waiter == rt_mutex_top_waiter(lock)) {
268
269 plist_del(&top_waiter->pi_list_entry, &task->pi_waiters);
270 waiter->pi_list_entry.prio = waiter->list_entry.prio;
271 plist_add(&waiter->pi_list_entry, &task->pi_waiters);
272 __rt_mutex_adjust_prio(task);
273
274 } else if (top_waiter == waiter) {
275
276 plist_del(&waiter->pi_list_entry, &task->pi_waiters);
277 waiter = rt_mutex_top_waiter(lock);
278 waiter->pi_list_entry.prio = waiter->list_entry.prio;
279 plist_add(&waiter->pi_list_entry, &task->pi_waiters);
280 __rt_mutex_adjust_prio(task);
281 }
282
283 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
284
285 top_waiter = rt_mutex_top_waiter(lock);
286 raw_spin_unlock(&lock->wait_lock);
287
288 if (!detect_deadlock && waiter != top_waiter)
289 goto out_put_task;
290
291 goto again;
292
293 out_unlock_pi:
294 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
295 out_put_task:
296 put_task_struct(task);
297
298 return ret;
299}
300
301
302
303
304
305
306
307
308
309
310static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
311 struct rt_mutex_waiter *waiter)
312{
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332 mark_rt_mutex_waiters(lock);
333
334 if (rt_mutex_owner(lock))
335 return 0;
336
337
338
339
340
341
342
343 if (rt_mutex_has_waiters(lock)) {
344 if (task->prio >= rt_mutex_top_waiter(lock)->list_entry.prio) {
345 if (!waiter || waiter != rt_mutex_top_waiter(lock))
346 return 0;
347 }
348 }
349
350 if (waiter || rt_mutex_has_waiters(lock)) {
351 unsigned long flags;
352 struct rt_mutex_waiter *top;
353
354 raw_spin_lock_irqsave(&task->pi_lock, flags);
355
356
357 if (waiter) {
358 plist_del(&waiter->list_entry, &lock->wait_list);
359 task->pi_blocked_on = NULL;
360 }
361
362
363
364
365
366 if (rt_mutex_has_waiters(lock)) {
367 top = rt_mutex_top_waiter(lock);
368 top->pi_list_entry.prio = top->list_entry.prio;
369 plist_add(&top->pi_list_entry, &task->pi_waiters);
370 }
371 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
372 }
373
374
375 debug_rt_mutex_lock(lock);
376
377 rt_mutex_set_owner(lock, task);
378
379 rt_mutex_deadlock_account_lock(lock, task);
380
381 return 1;
382}
383
384
385
386
387
388
389
390
391static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
392 struct rt_mutex_waiter *waiter,
393 struct task_struct *task,
394 int detect_deadlock)
395{
396 struct task_struct *owner = rt_mutex_owner(lock);
397 struct rt_mutex_waiter *top_waiter = waiter;
398 unsigned long flags;
399 int chain_walk = 0, res;
400
401 raw_spin_lock_irqsave(&task->pi_lock, flags);
402 __rt_mutex_adjust_prio(task);
403 waiter->task = task;
404 waiter->lock = lock;
405 plist_node_init(&waiter->list_entry, task->prio);
406 plist_node_init(&waiter->pi_list_entry, task->prio);
407
408
409 if (rt_mutex_has_waiters(lock))
410 top_waiter = rt_mutex_top_waiter(lock);
411 plist_add(&waiter->list_entry, &lock->wait_list);
412
413 task->pi_blocked_on = waiter;
414
415 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
416
417 if (!owner)
418 return 0;
419
420 if (waiter == rt_mutex_top_waiter(lock)) {
421 raw_spin_lock_irqsave(&owner->pi_lock, flags);
422 plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
423 plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
424
425 __rt_mutex_adjust_prio(owner);
426 if (owner->pi_blocked_on)
427 chain_walk = 1;
428 raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
429 }
430 else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock))
431 chain_walk = 1;
432
433 if (!chain_walk)
434 return 0;
435
436
437
438
439
440
441 get_task_struct(owner);
442
443 raw_spin_unlock(&lock->wait_lock);
444
445 res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
446 task);
447
448 raw_spin_lock(&lock->wait_lock);
449
450 return res;
451}
452
453
454
455
456
457
458
459
460static void wakeup_next_waiter(struct rt_mutex *lock)
461{
462 struct rt_mutex_waiter *waiter;
463 unsigned long flags;
464
465 raw_spin_lock_irqsave(¤t->pi_lock, flags);
466
467 waiter = rt_mutex_top_waiter(lock);
468
469
470
471
472
473
474
475 plist_del(&waiter->pi_list_entry, ¤t->pi_waiters);
476
477 rt_mutex_set_owner(lock, NULL);
478
479 raw_spin_unlock_irqrestore(¤t->pi_lock, flags);
480
481 wake_up_process(waiter->task);
482}
483
484
485
486
487
488
489
490static void remove_waiter(struct rt_mutex *lock,
491 struct rt_mutex_waiter *waiter)
492{
493 int first = (waiter == rt_mutex_top_waiter(lock));
494 struct task_struct *owner = rt_mutex_owner(lock);
495 unsigned long flags;
496 int chain_walk = 0;
497
498 raw_spin_lock_irqsave(¤t->pi_lock, flags);
499 plist_del(&waiter->list_entry, &lock->wait_list);
500 current->pi_blocked_on = NULL;
501 raw_spin_unlock_irqrestore(¤t->pi_lock, flags);
502
503 if (!owner)
504 return;
505
506 if (first) {
507
508 raw_spin_lock_irqsave(&owner->pi_lock, flags);
509
510 plist_del(&waiter->pi_list_entry, &owner->pi_waiters);
511
512 if (rt_mutex_has_waiters(lock)) {
513 struct rt_mutex_waiter *next;
514
515 next = rt_mutex_top_waiter(lock);
516 plist_add(&next->pi_list_entry, &owner->pi_waiters);
517 }
518 __rt_mutex_adjust_prio(owner);
519
520 if (owner->pi_blocked_on)
521 chain_walk = 1;
522
523 raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
524 }
525
526 WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
527
528 if (!chain_walk)
529 return;
530
531
532 get_task_struct(owner);
533
534 raw_spin_unlock(&lock->wait_lock);
535
536 rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
537
538 raw_spin_lock(&lock->wait_lock);
539}
540
541
542
543
544
545
546void rt_mutex_adjust_pi(struct task_struct *task)
547{
548 struct rt_mutex_waiter *waiter;
549 unsigned long flags;
550
551 raw_spin_lock_irqsave(&task->pi_lock, flags);
552
553 waiter = task->pi_blocked_on;
554 if (!waiter || waiter->list_entry.prio == task->prio) {
555 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
556 return;
557 }
558
559 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
560
561
562 get_task_struct(task);
563 rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);
564}
565
566
567
568
569
570
571
572
573
574
575
576static int __sched
577__rt_mutex_slowlock(struct rt_mutex *lock, int state,
578 struct hrtimer_sleeper *timeout,
579 struct rt_mutex_waiter *waiter)
580{
581 int ret = 0;
582
583 for (;;) {
584
585 if (try_to_take_rt_mutex(lock, current, waiter))
586 break;
587
588
589
590
591
592 if (unlikely(state == TASK_INTERRUPTIBLE)) {
593
594 if (signal_pending(current))
595 ret = -EINTR;
596 if (timeout && !timeout->task)
597 ret = -ETIMEDOUT;
598 if (ret)
599 break;
600 }
601
602 raw_spin_unlock(&lock->wait_lock);
603
604 debug_rt_mutex_print_deadlock(waiter);
605
606 schedule_rt_mutex(lock);
607
608 raw_spin_lock(&lock->wait_lock);
609 set_current_state(state);
610 }
611
612 return ret;
613}
614
615
616
617
618static int __sched
619rt_mutex_slowlock(struct rt_mutex *lock, int state,
620 struct hrtimer_sleeper *timeout,
621 int detect_deadlock)
622{
623 struct rt_mutex_waiter waiter;
624 int ret = 0;
625
626 debug_rt_mutex_init_waiter(&waiter);
627
628 raw_spin_lock(&lock->wait_lock);
629
630
631 if (try_to_take_rt_mutex(lock, current, NULL)) {
632 raw_spin_unlock(&lock->wait_lock);
633 return 0;
634 }
635
636 set_current_state(state);
637
638
639 if (unlikely(timeout)) {
640 hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
641 if (!hrtimer_active(&timeout->timer))
642 timeout->task = NULL;
643 }
644
645 ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock);
646
647 if (likely(!ret))
648 ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
649
650 set_current_state(TASK_RUNNING);
651
652 if (unlikely(ret))
653 remove_waiter(lock, &waiter);
654
655
656
657
658
659 fixup_rt_mutex_waiters(lock);
660
661 raw_spin_unlock(&lock->wait_lock);
662
663
664 if (unlikely(timeout))
665 hrtimer_cancel(&timeout->timer);
666
667 debug_rt_mutex_free_waiter(&waiter);
668
669 return ret;
670}
671
672
673
674
675static inline int
676rt_mutex_slowtrylock(struct rt_mutex *lock)
677{
678 int ret = 0;
679
680 raw_spin_lock(&lock->wait_lock);
681
682 if (likely(rt_mutex_owner(lock) != current)) {
683
684 ret = try_to_take_rt_mutex(lock, current, NULL);
685
686
687
688
689 fixup_rt_mutex_waiters(lock);
690 }
691
692 raw_spin_unlock(&lock->wait_lock);
693
694 return ret;
695}
696
697
698
699
700static void __sched
701rt_mutex_slowunlock(struct rt_mutex *lock)
702{
703 raw_spin_lock(&lock->wait_lock);
704
705 debug_rt_mutex_unlock(lock);
706
707 rt_mutex_deadlock_account_unlock(current);
708
709 if (!rt_mutex_has_waiters(lock)) {
710 lock->owner = NULL;
711 raw_spin_unlock(&lock->wait_lock);
712 return;
713 }
714
715 wakeup_next_waiter(lock);
716
717 raw_spin_unlock(&lock->wait_lock);
718
719
720 rt_mutex_adjust_prio(current);
721}
722
723
724
725
726
727
728
729static inline int
730rt_mutex_fastlock(struct rt_mutex *lock, int state,
731 int detect_deadlock,
732 int (*slowfn)(struct rt_mutex *lock, int state,
733 struct hrtimer_sleeper *timeout,
734 int detect_deadlock))
735{
736 if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
737 rt_mutex_deadlock_account_lock(lock, current);
738 return 0;
739 } else
740 return slowfn(lock, state, NULL, detect_deadlock);
741}
742
743static inline int
744rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
745 struct hrtimer_sleeper *timeout, int detect_deadlock,
746 int (*slowfn)(struct rt_mutex *lock, int state,
747 struct hrtimer_sleeper *timeout,
748 int detect_deadlock))
749{
750 if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
751 rt_mutex_deadlock_account_lock(lock, current);
752 return 0;
753 } else
754 return slowfn(lock, state, timeout, detect_deadlock);
755}
756
757static inline int
758rt_mutex_fasttrylock(struct rt_mutex *lock,
759 int (*slowfn)(struct rt_mutex *lock))
760{
761 if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
762 rt_mutex_deadlock_account_lock(lock, current);
763 return 1;
764 }
765 return slowfn(lock);
766}
767
768static inline void
769rt_mutex_fastunlock(struct rt_mutex *lock,
770 void (*slowfn)(struct rt_mutex *lock))
771{
772 if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
773 rt_mutex_deadlock_account_unlock(current);
774 else
775 slowfn(lock);
776}
777
778
779
780
781
782
783void __sched rt_mutex_lock(struct rt_mutex *lock)
784{
785 might_sleep();
786
787 rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock);
788}
789EXPORT_SYMBOL_GPL(rt_mutex_lock);
790
791
792
793
794
795
796
797
798
799
800
801
802int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
803 int detect_deadlock)
804{
805 might_sleep();
806
807 return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE,
808 detect_deadlock, rt_mutex_slowlock);
809}
810EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827int
828rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout,
829 int detect_deadlock)
830{
831 might_sleep();
832
833 return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
834 detect_deadlock, rt_mutex_slowlock);
835}
836EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
837
838
839
840
841
842
843
844
845int __sched rt_mutex_trylock(struct rt_mutex *lock)
846{
847 return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
848}
849EXPORT_SYMBOL_GPL(rt_mutex_trylock);
850
851
852
853
854
855
856void __sched rt_mutex_unlock(struct rt_mutex *lock)
857{
858 rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
859}
860EXPORT_SYMBOL_GPL(rt_mutex_unlock);
861
862
863
864
865
866
867
868
869
870void rt_mutex_destroy(struct rt_mutex *lock)
871{
872 WARN_ON(rt_mutex_is_locked(lock));
873#ifdef CONFIG_DEBUG_RT_MUTEXES
874 lock->magic = NULL;
875#endif
876}
877
878EXPORT_SYMBOL_GPL(rt_mutex_destroy);
879
880
881
882
883
884
885
886
887
888
889void __rt_mutex_init(struct rt_mutex *lock, const char *name)
890{
891 lock->owner = NULL;
892 raw_spin_lock_init(&lock->wait_lock);
893 plist_head_init_raw(&lock->wait_list, &lock->wait_lock);
894
895 debug_rt_mutex_init(lock, name);
896}
897EXPORT_SYMBOL_GPL(__rt_mutex_init);
898
899
900
901
902
903
904
905
906
907
908
909void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
910 struct task_struct *proxy_owner)
911{
912 __rt_mutex_init(lock, NULL);
913 debug_rt_mutex_proxy_lock(lock, proxy_owner);
914 rt_mutex_set_owner(lock, proxy_owner);
915 rt_mutex_deadlock_account_lock(lock, proxy_owner);
916}
917
918
919
920
921
922
923
924
925
926void rt_mutex_proxy_unlock(struct rt_mutex *lock,
927 struct task_struct *proxy_owner)
928{
929 debug_rt_mutex_proxy_unlock(lock);
930 rt_mutex_set_owner(lock, NULL);
931 rt_mutex_deadlock_account_unlock(proxy_owner);
932}
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
949 struct rt_mutex_waiter *waiter,
950 struct task_struct *task, int detect_deadlock)
951{
952 int ret;
953
954 raw_spin_lock(&lock->wait_lock);
955
956 if (try_to_take_rt_mutex(lock, task, NULL)) {
957 raw_spin_unlock(&lock->wait_lock);
958 return 1;
959 }
960
961 ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
962
963 if (ret && !rt_mutex_owner(lock)) {
964
965
966
967
968
969
970 ret = 0;
971 }
972
973 if (unlikely(ret))
974 remove_waiter(lock, waiter);
975
976 raw_spin_unlock(&lock->wait_lock);
977
978 debug_rt_mutex_print_deadlock(waiter);
979
980 return ret;
981}
982
983
984
985
986
987
988
989
990
991
992
993
994
995struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
996{
997 if (!rt_mutex_has_waiters(lock))
998 return NULL;
999
1000 return rt_mutex_top_waiter(lock)->task;
1001}
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
1020 struct hrtimer_sleeper *to,
1021 struct rt_mutex_waiter *waiter,
1022 int detect_deadlock)
1023{
1024 int ret;
1025
1026 raw_spin_lock(&lock->wait_lock);
1027
1028 set_current_state(TASK_INTERRUPTIBLE);
1029
1030 ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
1031
1032 set_current_state(TASK_RUNNING);
1033
1034 if (unlikely(ret))
1035 remove_waiter(lock, waiter);
1036
1037
1038
1039
1040
1041 fixup_rt_mutex_waiters(lock);
1042
1043 raw_spin_unlock(&lock->wait_lock);
1044
1045 return ret;
1046}
1047